From f6887b4d8afe4f7e0865ee59b1dfc3994d5abff3 Mon Sep 17 00:00:00 2001 From: Zanis <22601571+ZanisO@users.noreply.github.com> Date: Tue, 24 Dec 2024 22:45:19 +0000 Subject: [PATCH 01/24] Added support for CSI secret provider Signed-off-by: Zanis <22601571+ZanisO@users.noreply.github.com> --- go.mod | 1 + go.sum | 2 ++ internal/pkg/cmd/reloader.go | 5 +++++ internal/pkg/constants/constants.go | 2 ++ internal/pkg/controller/controller.go | 11 ++++++++++- internal/pkg/handler/update.go | 4 ++++ internal/pkg/handler/upgrade.go | 9 +++++++++ internal/pkg/options/flags.go | 4 ++++ internal/pkg/util/config.go | 13 ++++++++++++- internal/pkg/util/util.go | 11 +++++++++++ pkg/kube/client.go | 18 ++++++++++++++++++ pkg/kube/resourcemapper.go | 8 +++++--- 12 files changed, 83 insertions(+), 5 deletions(-) diff --git a/go.mod b/go.mod index a5e04f8..fcc49ae 100644 --- a/go.mod +++ b/go.mod @@ -69,6 +69,7 @@ require ( k8s.io/klog/v2 v2.130.1 // indirect k8s.io/kube-openapi v0.0.0-20240228011516-70dd3763d340 // indirect sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd // indirect + sigs.k8s.io/secrets-store-csi-driver v1.4.7 // indirect sigs.k8s.io/structured-merge-diff/v4 v4.4.1 // indirect sigs.k8s.io/yaml v1.4.0 // indirect ) diff --git a/go.sum b/go.sum index 995b99c..5de1fc0 100644 --- a/go.sum +++ b/go.sum @@ -403,6 +403,8 @@ k8s.io/utils v0.0.0-20240711033017-18e509b52bc8/go.mod h1:OLgZIPagt7ERELqWJFomSt sigs.k8s.io/json v0.0.0-20211208200746-9f7c6b3444d2/go.mod h1:B+TnT182UBxE84DiCz4CVE26eOSDAeYCpfDnC2kdKMY= sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd h1:EDPBXCAspyGV4jQlpZSudPeMmr1bNJefnuqLsRAsHZo= sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd/go.mod h1:B8JuhiUyNFVKdsE8h686QcCxMaH6HrOAZj4vswFpcB0= +sigs.k8s.io/secrets-store-csi-driver v1.4.7 h1:AyuwmPTW2GoPD2RjyVD3OrH1J9cdPZx+0h2qJvzbGXs= +sigs.k8s.io/secrets-store-csi-driver v1.4.7/go.mod h1:0/wMVOv8qLx7YNVMGU+Sh7S4D6TH6GhyEpouo28OTUU= sigs.k8s.io/structured-merge-diff/v4 v4.0.2/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw= sigs.k8s.io/structured-merge-diff/v4 v4.2.1/go.mod h1:j/nl6xW8vLS49O8YvXW1ocPhZawJtm+Yrr7PPRQ0Vg4= sigs.k8s.io/structured-merge-diff/v4 v4.4.1 h1:150L+0vs/8DA78h1u02ooW1/fFq/Lwr+sGiqlzvrtq4= diff --git a/internal/pkg/cmd/reloader.go b/internal/pkg/cmd/reloader.go index f0aac83..fb7c012 100644 --- a/internal/pkg/cmd/reloader.go +++ b/internal/pkg/cmd/reloader.go @@ -54,6 +54,7 @@ func NewReloaderCommand() *cobra.Command { cmd.PersistentFlags().StringVar(&options.ReloadOnDelete, "reload-on-delete", "false", "Add support to watch delete events") cmd.PersistentFlags().BoolVar(&options.EnableHA, "enable-ha", false, "Adds support for running multiple replicas via leadership election") cmd.PersistentFlags().BoolVar(&options.SyncAfterRestart, "sync-after-restart", false, "Sync add events after reloader restarts") + cmd.PersistentFlags().BoolVar(&options.EnableCSIIntegration, "enable-csi-integration", false, "Watch SecretProviderClassPodStatus for changes") return cmd } @@ -176,6 +177,10 @@ func startReloader(cmd *cobra.Command, args []string) { var controllers []*controller.Controller for k := range kube.ResourceMap { + if k == "secretproviderclasspodstatuses" && !options.EnableCSIIntegration { + continue + } + if ignoredResourcesList.Contains(k) || (len(namespaceLabelSelector) == 0 && k == "namespaces") { continue } diff --git a/internal/pkg/constants/constants.go b/internal/pkg/constants/constants.go index 18d1cc7..6ad3bd5 100644 --- a/internal/pkg/constants/constants.go +++ b/internal/pkg/constants/constants.go @@ -8,6 +8,8 @@ const ( ConfigmapEnvVarPostfix = "CONFIGMAP" // SecretEnvVarPostfix is a postfix for secret envVar SecretEnvVarPostfix = "SECRET" + // SecretEnvVarSecretProviderClassPodStatus is a postfix for secretproviderclasspodstatus envVar + SecretProviderClassEnvVarPostfix = "SECRETPROVIDERCLASS" // EnvVarPrefix is a Prefix for environment variable EnvVarPrefix = "STAKATER_" diff --git a/internal/pkg/controller/controller.go b/internal/pkg/controller/controller.go index 7dc7664..bf8ea4b 100644 --- a/internal/pkg/controller/controller.go +++ b/internal/pkg/controller/controller.go @@ -79,7 +79,16 @@ func NewController( } } - listWatcher := cache.NewFilteredListWatchFromClient(client.CoreV1().RESTClient(), resource, namespace, optionsModifier) + getterRESTClient := client.CoreV1().RESTClient() + if resource == "secretproviderclasspodstatuses" { + csiClient, err := kube.GetCSIClient() + if err != nil { + logrus.Fatal(err) + } + getterRESTClient = csiClient.SecretsstoreV1().RESTClient() + } + + listWatcher := cache.NewFilteredListWatchFromClient(getterRESTClient, resource, namespace, optionsModifier) _, informer := cache.NewInformerWithOptions(cache.InformerOptions{ ListerWatcher: listWatcher, diff --git a/internal/pkg/handler/update.go b/internal/pkg/handler/update.go index 0575e19..6a0baac 100644 --- a/internal/pkg/handler/update.go +++ b/internal/pkg/handler/update.go @@ -7,6 +7,7 @@ import ( "github.com/stakater/Reloader/internal/pkg/util" v1 "k8s.io/api/core/v1" "k8s.io/client-go/tools/record" + csiv1 "sigs.k8s.io/secrets-store-csi-driver/apis/v1" ) // ResourceUpdatedHandler contains updated objects @@ -45,6 +46,9 @@ func (r ResourceUpdatedHandler) GetConfig() (util.Config, string) { } else if _, ok := r.Resource.(*v1.Secret); ok { oldSHAData = util.GetSHAfromSecret(r.OldResource.(*v1.Secret).Data) config = util.GetSecretConfig(r.Resource.(*v1.Secret)) + } else if _, ok := r.Resource.(*csiv1.SecretProviderClassPodStatus); ok { + oldSHAData = util.GetSHAfromSecretProviderClassPodStatus(r.OldResource.(*csiv1.SecretProviderClassPodStatus).Status) + config = util.GetSecretProviderClassPodStatusConfig(r.Resource.(*csiv1.SecretProviderClassPodStatus)) } else { logrus.Warnf("Invalid resource: Resource should be 'Secret' or 'Configmap' but found, %v", r.Resource) } diff --git a/internal/pkg/handler/upgrade.go b/internal/pkg/handler/upgrade.go index 8365fb5..3d04a2c 100644 --- a/internal/pkg/handler/upgrade.go +++ b/internal/pkg/handler/upgrade.go @@ -219,6 +219,7 @@ func PerformAction(clients kube.Clients, config util.Config, upgradeFuncs callba typedAutoAnnotationEnabledValue, foundTypedAuto := annotations[config.TypedAutoAnnotation] excludeConfigmapAnnotationValue, foundExcludeConfigmap := annotations[options.ConfigmapExcludeReloaderAnnotation] excludeSecretAnnotationValue, foundExcludeSecret := annotations[options.SecretExcludeReloaderAnnotation] + excludeSecretProviderClassProviderAnnotationValue, foundExcludeSecretProviderClass := annotations[options.SecretProviderClassExcludeReloaderAnnotation] if !found && !foundAuto && !foundTypedAuto && !foundSearchAnn { annotations = upgradeFuncs.PodAnnotationsFunc(i) @@ -239,6 +240,10 @@ func PerformAction(clients kube.Clients, config util.Config, upgradeFuncs callba if foundExcludeSecret { isResourceExcluded = checkIfResourceIsExcluded(config.ResourceName, excludeSecretAnnotationValue) } + case constants.SecretProviderClassEnvVarPostfix: + if foundExcludeSecretProviderClass { + isResourceExcluded = checkIfResourceIsExcluded(config.ResourceName, excludeSecretProviderClassProviderAnnotationValue) + } } if isResourceExcluded { @@ -355,6 +360,10 @@ func getVolumeMountName(volumes []v1.Volume, mountType string, volumeName string } } } + } else if mountType == constants.SecretProviderClassEnvVarPostfix { + if volumes[i].CSI != nil && volumes[i].CSI.VolumeAttributes["secretProviderClass"] == volumeName { + return volumes[i].Name + } } } diff --git a/internal/pkg/options/flags.go b/internal/pkg/options/flags.go index 081acc3..8a5e9b4 100644 --- a/internal/pkg/options/flags.go +++ b/internal/pkg/options/flags.go @@ -30,6 +30,8 @@ var ( ConfigmapExcludeReloaderAnnotation = "configmaps.exclude.reloader.stakater.com/reload" // SecretExcludeReloaderAnnotation is a comma separated list of secrets that excludes detecting changes on secrets SecretExcludeReloaderAnnotation = "secrets.exclude.reloader.stakater.com/reload" + // SecretProviderClassExcludeReloaderAnnotation is a comma separated list of secret provider classes that excludes detecting changes on secret provider class + SecretProviderClassExcludeReloaderAnnotation = "secretproviderclass.exclude.reloader.stakater.com/reload" // AutoSearchAnnotation is an annotation to detect changes in // configmaps or triggers with the SearchMatchAnnotation AutoSearchAnnotation = "reloader.stakater.com/search" @@ -55,6 +57,8 @@ var ( EnableHA = false // Url to send a request to instead of triggering a reload WebhookUrl = "" + // EnableCsiIntegration Adds support to watch SecretProviderClassPodStatus and restart deployment based on it + EnableCSIIntegration = false ) func ToArgoRolloutStrategy(s string) ArgoRolloutStrategy { diff --git a/internal/pkg/util/config.go b/internal/pkg/util/config.go index 184eb68..460fc24 100644 --- a/internal/pkg/util/config.go +++ b/internal/pkg/util/config.go @@ -4,9 +4,10 @@ import ( "github.com/stakater/Reloader/internal/pkg/constants" "github.com/stakater/Reloader/internal/pkg/options" v1 "k8s.io/api/core/v1" + csiv1 "sigs.k8s.io/secrets-store-csi-driver/apis/v1" ) -//Config contains rolling upgrade configuration parameters +// Config contains rolling upgrade configuration parameters type Config struct { Namespace string ResourceName string @@ -42,3 +43,13 @@ func GetSecretConfig(secret *v1.Secret) Config { Type: constants.SecretEnvVarPostfix, } } + +func GetSecretProviderClassPodStatusConfig(podStatus *csiv1.SecretProviderClassPodStatus) Config { + return Config{ + Namespace: podStatus.Namespace, + ResourceName: podStatus.Status.SecretProviderClassName, + ResourceAnnotations: podStatus.Annotations, + SHAValue: GetSHAfromSecretProviderClassPodStatus(podStatus.Status), + Type: constants.SecretProviderClassEnvVarPostfix, + } +} diff --git a/internal/pkg/util/util.go b/internal/pkg/util/util.go index 1a2696d..f23094b 100644 --- a/internal/pkg/util/util.go +++ b/internal/pkg/util/util.go @@ -8,6 +8,7 @@ import ( "github.com/stakater/Reloader/internal/pkg/crypto" v1 "k8s.io/api/core/v1" + csiv1 "sigs.k8s.io/secrets-store-csi-driver/apis/v1" ) // ConvertToEnvVarName converts the given text into a usable env var @@ -52,6 +53,16 @@ func GetSHAfromSecret(data map[string][]byte) string { return crypto.GenerateSHA(strings.Join(values, ";")) } +func GetSHAfromSecretProviderClassPodStatus(data csiv1.SecretProviderClassPodStatusStatus) string { + values := []string{} + for _, v := range data.Objects { + values = append(values, v.ID+"="+v.Version) + } + values = append(values, "SecretProviderClassName="+data.SecretProviderClassName) + sort.Strings(values) + return crypto.GenerateSHA(strings.Join(values, ";")) +} + type List []string type Map map[string]string diff --git a/pkg/kube/client.go b/pkg/kube/client.go index 4230063..140087d 100644 --- a/pkg/kube/client.go +++ b/pkg/kube/client.go @@ -11,6 +11,7 @@ import ( "github.com/sirupsen/logrus" "k8s.io/client-go/kubernetes" "k8s.io/client-go/rest" + csi "sigs.k8s.io/secrets-store-csi-driver/pkg/client/clientset/versioned" ) // Clients struct exposes interfaces for kubernetes as well as openshift if available @@ -18,6 +19,7 @@ type Clients struct { KubernetesClient kubernetes.Interface OpenshiftAppsClient appsclient.Interface ArgoRolloutClient argorollout.Interface + CSIClient csi.Interface } var ( @@ -48,10 +50,18 @@ func GetClients() Clients { logrus.Warnf("Unable to create ArgoRollout client error = %v", err) } + var csiClient *csi.Clientset + + csiClient, err = GetCSIClient() + if err != nil { + logrus.Warnf("Unable to create CSI client error = %v", err) + } + return Clients{ KubernetesClient: client, OpenshiftAppsClient: appsClient, ArgoRolloutClient: rolloutClient, + CSIClient: csiClient, } } @@ -63,6 +73,14 @@ func GetArgoRolloutClient() (*argorollout.Clientset, error) { return argorollout.NewForConfig(config) } +func GetCSIClient() (*csi.Clientset, error) { + config, err := getConfig() + if err != nil { + return nil, err + } + return csi.NewForConfig(config) +} + func isOpenshift() bool { client, err := GetKubernetesClient() if err != nil { diff --git a/pkg/kube/resourcemapper.go b/pkg/kube/resourcemapper.go index fb42e61..595c35e 100644 --- a/pkg/kube/resourcemapper.go +++ b/pkg/kube/resourcemapper.go @@ -3,11 +3,13 @@ package kube import ( v1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/runtime" + csiv1 "sigs.k8s.io/secrets-store-csi-driver/apis/v1" ) // ResourceMap are resources from where changes are going to be detected var ResourceMap = map[string]runtime.Object{ - "configMaps": &v1.ConfigMap{}, - "secrets": &v1.Secret{}, - "namespaces": &v1.Namespace{}, + "configMaps": &v1.ConfigMap{}, + "secrets": &v1.Secret{}, + "namespaces": &v1.Namespace{}, + "secretproviderclasspodstatuses": &csiv1.SecretProviderClassPodStatus{}, } From 6d1d017aa430f04cae4e8324c3b5a29d04ac90ff Mon Sep 17 00:00:00 2001 From: Zanis <22601571+ZanisO@users.noreply.github.com> Date: Thu, 2 Jan 2025 23:38:17 +0000 Subject: [PATCH 02/24] Don't reload existing config --- internal/pkg/handler/upgrade.go | 25 +++++++++++++++++++++++++ 1 file changed, 25 insertions(+) diff --git a/internal/pkg/handler/upgrade.go b/internal/pkg/handler/upgrade.go index 3d04a2c..865a0a6 100644 --- a/internal/pkg/handler/upgrade.go +++ b/internal/pkg/handler/upgrade.go @@ -479,6 +479,10 @@ func updatePodAnnotations(upgradeFuncs callbacks.RollingUpgradeFuncs, item runti return constants.NotUpdated } + if config.Type == constants.SecretProviderClassEnvVarPostfix && secretProviderClassAnnotationReloaded(pa, config) { + return constants.NotUpdated + } + for k, v := range annotations { pa[k] = v } @@ -493,6 +497,11 @@ func getReloaderAnnotationKey() string { ) } +func secretProviderClassAnnotationReloaded(oldAnnotations map[string]string, newConfig util.Config) bool { + annotaion := oldAnnotations[getReloaderAnnotationKey()] + return strings.Contains(annotaion, newConfig.ResourceName) && strings.Contains(annotaion, newConfig.SHAValue) +} + func createReloadedAnnotations(target *util.ReloadSource) (map[string]string, error) { if target == nil { return nil, errors.New("target is required") @@ -527,6 +536,10 @@ func updateContainerEnvVars(upgradeFuncs callbacks.RollingUpgradeFuncs, item run return constants.NoContainerFound } + if config.Type == constants.SecretProviderClassEnvVarPostfix && secretProviderClassEnvReloaded(upgradeFuncs.ContainersFunc(item), envVar, config.SHAValue) { + return constants.NotUpdated + } + //update if env var exists result = updateEnvVar(upgradeFuncs.ContainersFunc(item), envVar, config.SHAValue) @@ -557,3 +570,15 @@ func updateEnvVar(containers []v1.Container, envVar string, shaData string) cons } return constants.NoEnvVarFound } + +func secretProviderClassEnvReloaded(containers []v1.Container, envVar string, shaData string) bool { + for i := range containers { + envs := containers[i].Env + for j := range envs { + if envs[j].Name == envVar { + return envs[j].Value == shaData + } + } + } + return false +} From 3c39406ca981ec683c7de330c845f1da5b348e41 Mon Sep 17 00:00:00 2001 From: Zanis <22601571+ZanisO@users.noreply.github.com> Date: Thu, 9 Jan 2025 23:54:10 +0000 Subject: [PATCH 03/24] Added capability to use OnChangeAnnotation and TypeAutoAnnotation --- internal/pkg/cmd/reloader.go | 2 ++ internal/pkg/handler/upgrade.go | 17 +++++++++++++++++ internal/pkg/options/flags.go | 7 ++++++- internal/pkg/util/config.go | 5 ++++- pkg/kube/client.go | 10 +++++----- 5 files changed, 34 insertions(+), 7 deletions(-) diff --git a/internal/pkg/cmd/reloader.go b/internal/pkg/cmd/reloader.go index fb7c012..a1e2482 100644 --- a/internal/pkg/cmd/reloader.go +++ b/internal/pkg/cmd/reloader.go @@ -36,9 +36,11 @@ func NewReloaderCommand() *cobra.Command { cmd.PersistentFlags().BoolVar(&options.AutoReloadAll, "auto-reload-all", false, "Auto reload all resources") cmd.PersistentFlags().StringVar(&options.ConfigmapUpdateOnChangeAnnotation, "configmap-annotation", "configmap.reloader.stakater.com/reload", "annotation to detect changes in configmaps, specified by name") cmd.PersistentFlags().StringVar(&options.SecretUpdateOnChangeAnnotation, "secret-annotation", "secret.reloader.stakater.com/reload", "annotation to detect changes in secrets, specified by name") + cmd.PersistentFlags().StringVar(&options.SecretProviderClassUpdateOnChangeAnnotation, "spc-annotation", "secretproviderclass.reloader.stakater.com/reload", "annotation to detect changes in secretproviderclasses, specified by name") cmd.PersistentFlags().StringVar(&options.ReloaderAutoAnnotation, "auto-annotation", "reloader.stakater.com/auto", "annotation to detect changes in secrets/configmaps") cmd.PersistentFlags().StringVar(&options.ConfigmapReloaderAutoAnnotation, "configmap-auto-annotation", "configmap.reloader.stakater.com/auto", "annotation to detect changes in configmaps") cmd.PersistentFlags().StringVar(&options.SecretReloaderAutoAnnotation, "secret-auto-annotation", "secret.reloader.stakater.com/auto", "annotation to detect changes in secrets") + cmd.PersistentFlags().StringVar(&options.SecretProviderClassReloaderAutoAnnotation, "spc-auto-annotation", "secretproviderclass.reloader.stakater.com/auto", "annotation to detect changes in secretproviderclasses") cmd.PersistentFlags().StringVar(&options.AutoSearchAnnotation, "auto-search-annotation", "reloader.stakater.com/search", "annotation to detect changes in configmaps or secrets tagged with special match annotation") cmd.PersistentFlags().StringVar(&options.SearchMatchAnnotation, "search-match-annotation", "reloader.stakater.com/match", "annotation to mark secrets or configmaps to match the search") cmd.PersistentFlags().StringVar(&options.LogFormat, "log-format", "", "Log format to use (empty string for text, or JSON)") diff --git a/internal/pkg/handler/upgrade.go b/internal/pkg/handler/upgrade.go index 865a0a6..4542455 100644 --- a/internal/pkg/handler/upgrade.go +++ b/internal/pkg/handler/upgrade.go @@ -2,6 +2,7 @@ package handler import ( "bytes" + "context" "encoding/json" "errors" "fmt" @@ -23,6 +24,7 @@ import ( "github.com/stakater/Reloader/pkg/kube" v1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/client-go/tools/record" ) @@ -210,6 +212,10 @@ func rollingUpgrade(clients kube.Clients, config util.Config, upgradeFuncs callb func PerformAction(clients kube.Clients, config util.Config, upgradeFuncs callbacks.RollingUpgradeFuncs, collectors metrics.Collectors, recorder record.EventRecorder, strategy invokeStrategy) error { items := upgradeFuncs.ItemsFunc(clients, config.Namespace) + if config.Type == constants.SecretProviderClassEnvVarPostfix { + populateAnnotationsFromSecretProviderClass(clients, &config) + } + for _, i := range items { // find correct annotation and update the resource annotations := upgradeFuncs.AnnotationsFunc(i) @@ -582,3 +588,14 @@ func secretProviderClassEnvReloaded(containers []v1.Container, envVar string, sh } return false } + +func populateAnnotationsFromSecretProviderClass(clients kube.Clients, config *util.Config) { + obj, err := clients.CSIClient.SecretsstoreV1().SecretProviderClasses(config.Namespace).Get(context.TODO(), config.ResourceName, metav1.GetOptions{}) + annotations := make(map[string]string) + if err != nil { + logrus.Infof("Couldn't find secretproviderclass '%s' in '%s' namespace for typed annotation", config.ResourceName, config.Namespace) + } else if obj.Annotations != nil { + annotations = obj.Annotations + } + config.ResourceAnnotations = annotations +} diff --git a/internal/pkg/options/flags.go b/internal/pkg/options/flags.go index 8a5e9b4..8267bed 100644 --- a/internal/pkg/options/flags.go +++ b/internal/pkg/options/flags.go @@ -20,18 +20,23 @@ var ( // SecretUpdateOnChangeAnnotation is an annotation to detect changes in // secrets specified by name SecretUpdateOnChangeAnnotation = "secret.reloader.stakater.com/reload" + // SecretProviderClassUpdateOnChangeAnnotation is an annotation to detect changes in + // secretproviderclasses specified by name + SecretProviderClassUpdateOnChangeAnnotation = "secretproviderclass.reloader.stakater.com/reload" // ReloaderAutoAnnotation is an annotation to detect changes in secrets/configmaps ReloaderAutoAnnotation = "reloader.stakater.com/auto" // ConfigmapReloaderAutoAnnotation is an annotation to detect changes in configmaps ConfigmapReloaderAutoAnnotation = "configmap.reloader.stakater.com/auto" // SecretReloaderAutoAnnotation is an annotation to detect changes in secrets SecretReloaderAutoAnnotation = "secret.reloader.stakater.com/auto" + // SecretProviderClassReloaderAutoAnnotation is an annotation to detect changes in secretproviderclasses + SecretProviderClassReloaderAutoAnnotation = "secretproviderclass.reloader.stakater.com/auto" // ConfigmapReloaderAutoAnnotation is a comma separated list of configmaps that excludes detecting changes on cms ConfigmapExcludeReloaderAnnotation = "configmaps.exclude.reloader.stakater.com/reload" // SecretExcludeReloaderAnnotation is a comma separated list of secrets that excludes detecting changes on secrets SecretExcludeReloaderAnnotation = "secrets.exclude.reloader.stakater.com/reload" // SecretProviderClassExcludeReloaderAnnotation is a comma separated list of secret provider classes that excludes detecting changes on secret provider class - SecretProviderClassExcludeReloaderAnnotation = "secretproviderclass.exclude.reloader.stakater.com/reload" + SecretProviderClassExcludeReloaderAnnotation = "secretproviderclasses.exclude.reloader.stakater.com/reload" // AutoSearchAnnotation is an annotation to detect changes in // configmaps or triggers with the SearchMatchAnnotation AutoSearchAnnotation = "reloader.stakater.com/search" diff --git a/internal/pkg/util/config.go b/internal/pkg/util/config.go index 460fc24..6d6ff21 100644 --- a/internal/pkg/util/config.go +++ b/internal/pkg/util/config.go @@ -45,10 +45,13 @@ func GetSecretConfig(secret *v1.Secret) Config { } func GetSecretProviderClassPodStatusConfig(podStatus *csiv1.SecretProviderClassPodStatus) Config { + // As csi injects SecretProviderClass, we will create config for it instead of SecretProviderClassPodStatus + // ResourceAnnotations will be retrieved during PerformAction call return Config{ Namespace: podStatus.Namespace, ResourceName: podStatus.Status.SecretProviderClassName, - ResourceAnnotations: podStatus.Annotations, + Annotation: options.SecretProviderClassUpdateOnChangeAnnotation, + TypedAutoAnnotation: options.SecretProviderClassReloaderAutoAnnotation, SHAValue: GetSHAfromSecretProviderClassPodStatus(podStatus.Status), Type: constants.SecretProviderClassEnvVarPostfix, } diff --git a/pkg/kube/client.go b/pkg/kube/client.go index 140087d..af67319 100644 --- a/pkg/kube/client.go +++ b/pkg/kube/client.go @@ -11,7 +11,7 @@ import ( "github.com/sirupsen/logrus" "k8s.io/client-go/kubernetes" "k8s.io/client-go/rest" - csi "sigs.k8s.io/secrets-store-csi-driver/pkg/client/clientset/versioned" + csiclient "sigs.k8s.io/secrets-store-csi-driver/pkg/client/clientset/versioned" ) // Clients struct exposes interfaces for kubernetes as well as openshift if available @@ -19,7 +19,7 @@ type Clients struct { KubernetesClient kubernetes.Interface OpenshiftAppsClient appsclient.Interface ArgoRolloutClient argorollout.Interface - CSIClient csi.Interface + CSIClient csiclient.Interface } var ( @@ -50,7 +50,7 @@ func GetClients() Clients { logrus.Warnf("Unable to create ArgoRollout client error = %v", err) } - var csiClient *csi.Clientset + var csiClient *csiclient.Clientset csiClient, err = GetCSIClient() if err != nil { @@ -73,12 +73,12 @@ func GetArgoRolloutClient() (*argorollout.Clientset, error) { return argorollout.NewForConfig(config) } -func GetCSIClient() (*csi.Clientset, error) { +func GetCSIClient() (*csiclient.Clientset, error) { config, err := getConfig() if err != nil { return nil, err } - return csi.NewForConfig(config) + return csiclient.NewForConfig(config) } func isOpenshift() bool { From 75f9a23de30db042080531da3041486702dcd432 Mon Sep 17 00:00:00 2001 From: Zanis <22601571+ZanisO@users.noreply.github.com> Date: Tue, 14 Jan 2025 23:51:15 +0000 Subject: [PATCH 04/24] Added tests --- internal/pkg/handler/upgrade_test.go | 963 +++++++++++++++++++++++++-- internal/pkg/testutil/kube.go | 143 +++- 2 files changed, 1035 insertions(+), 71 deletions(-) diff --git a/internal/pkg/handler/upgrade_test.go b/internal/pkg/handler/upgrade_test.go index 2b71740..35acddb 100644 --- a/internal/pkg/handler/upgrade_test.go +++ b/internal/pkg/handler/upgrade_test.go @@ -21,57 +21,73 @@ import ( v1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" testclient "k8s.io/client-go/kubernetes/fake" + csitestclient "sigs.k8s.io/secrets-store-csi-driver/pkg/client/clientset/versioned/fake" ) var ( - clients = kube.Clients{KubernetesClient: testclient.NewSimpleClientset()} + clients = kube.Clients{ + KubernetesClient: testclient.NewSimpleClientset(), + CSIClient: csitestclient.NewSimpleClientset(), + } - arsNamespace = "test-handler-" + testutil.RandSeq(5) - arsConfigmapName = "testconfigmap-handler-" + testutil.RandSeq(5) - arsSecretName = "testsecret-handler-" + testutil.RandSeq(5) - arsProjectedConfigMapName = "testprojectedconfigmap-handler-" + testutil.RandSeq(5) - arsProjectedSecretName = "testprojectedsecret-handler-" + testutil.RandSeq(5) - arsConfigmapWithInitContainer = "testconfigmapInitContainerhandler-" + testutil.RandSeq(5) - arsSecretWithInitContainer = "testsecretWithInitContainer-handler-" + testutil.RandSeq(5) - arsProjectedConfigMapWithInitContainer = "testProjectedConfigMapWithInitContainer-handler" + testutil.RandSeq(5) - arsProjectedSecretWithInitContainer = "testProjectedSecretWithInitContainer-handler" + testutil.RandSeq(5) - arsConfigmapWithInitEnv = "configmapWithInitEnv-" + testutil.RandSeq(5) - arsSecretWithInitEnv = "secretWithInitEnv-handler-" + testutil.RandSeq(5) - arsConfigmapWithEnvName = "testconfigmapWithEnv-handler-" + testutil.RandSeq(5) - arsConfigmapWithEnvFromName = "testconfigmapWithEnvFrom-handler-" + testutil.RandSeq(5) - arsSecretWithEnvName = "testsecretWithEnv-handler-" + testutil.RandSeq(5) - arsSecretWithEnvFromName = "testsecretWithEnvFrom-handler-" + testutil.RandSeq(5) - arsConfigmapWithPodAnnotations = "testconfigmapPodAnnotations-handler-" + testutil.RandSeq(5) - arsConfigmapWithBothAnnotations = "testconfigmapBothAnnotations-handler-" + testutil.RandSeq(5) - arsConfigmapAnnotated = "testconfigmapAnnotated-handler-" + testutil.RandSeq(5) - arsConfigMapWithNonAnnotatedDeployment = "testconfigmapNonAnnotatedDeployment-handler-" + testutil.RandSeq(5) - arsSecretWithSecretAutoAnnotation = "testsecretwithsecretautoannotationdeployment-handler-" + testutil.RandSeq(5) - arsConfigmapWithConfigMapAutoAnnotation = "testconfigmapwithconfigmapautoannotationdeployment-handler-" + testutil.RandSeq(5) - arsSecretWithExcludeSecretAnnotation = "testsecretwithsecretexcludeannotationdeployment-handler-" + testutil.RandSeq(5) - arsConfigmapWithExcludeConfigMapAnnotation = "testconfigmapwithconfigmapexcludeannotationdeployment-handler-" + testutil.RandSeq(5) + arsNamespace = "test-handler-" + testutil.RandSeq(5) + arsConfigmapName = "testconfigmap-handler-" + testutil.RandSeq(5) + arsSecretName = "testsecret-handler-" + testutil.RandSeq(5) + arsSecretProviderClassName = "testsecretproviderclass-handler-" + testutil.RandSeq(5) + arsProjectedConfigMapName = "testprojectedconfigmap-handler-" + testutil.RandSeq(5) + arsProjectedSecretName = "testprojectedsecret-handler-" + testutil.RandSeq(5) + arsConfigmapWithInitContainer = "testconfigmapInitContainerhandler-" + testutil.RandSeq(5) + arsSecretWithInitContainer = "testsecretWithInitContainer-handler-" + testutil.RandSeq(5) + arsSecretProviderClassWithInitContainer = "testsecretproviderclassWithInitContainer-handler-" + testutil.RandSeq(5) + arsProjectedConfigMapWithInitContainer = "testProjectedConfigMapWithInitContainer-handler" + testutil.RandSeq(5) + arsProjectedSecretWithInitContainer = "testProjectedSecretWithInitContainer-handler" + testutil.RandSeq(5) + arsConfigmapWithInitEnv = "configmapWithInitEnv-" + testutil.RandSeq(5) + arsSecretWithInitEnv = "secretWithInitEnv-handler-" + testutil.RandSeq(5) + arsConfigmapWithEnvName = "testconfigmapWithEnv-handler-" + testutil.RandSeq(5) + arsConfigmapWithEnvFromName = "testconfigmapWithEnvFrom-handler-" + testutil.RandSeq(5) + arsSecretWithEnvName = "testsecretWithEnv-handler-" + testutil.RandSeq(5) + arsSecretWithEnvFromName = "testsecretWithEnvFrom-handler-" + testutil.RandSeq(5) + arsConfigmapWithPodAnnotations = "testconfigmapPodAnnotations-handler-" + testutil.RandSeq(5) + arsConfigmapWithBothAnnotations = "testconfigmapBothAnnotations-handler-" + testutil.RandSeq(5) + arsConfigmapAnnotated = "testconfigmapAnnotated-handler-" + testutil.RandSeq(5) + arsConfigMapWithNonAnnotatedDeployment = "testconfigmapNonAnnotatedDeployment-handler-" + testutil.RandSeq(5) + arsSecretWithSecretAutoAnnotation = "testsecretwithsecretautoannotationdeployment-handler-" + testutil.RandSeq(5) + arsConfigmapWithConfigMapAutoAnnotation = "testconfigmapwithconfigmapautoannotationdeployment-handler-" + testutil.RandSeq(5) + arsSecretProviderClassWithSPCAutoAnnotation = "testsecretproviderclasswithspcautoannotationdeployment-handler-" + testutil.RandSeq(5) + arsSecretWithExcludeSecretAnnotation = "testsecretwithsecretexcludeannotationdeployment-handler-" + testutil.RandSeq(5) + arsConfigmapWithExcludeConfigMapAnnotation = "testconfigmapwithconfigmapexcludeannotationdeployment-handler-" + testutil.RandSeq(5) + arsSecretProviderClassWithExcludeSPCAnnotation = "testsecretproviderclasswithspcexcludeannotationdeployment-handler-" + testutil.RandSeq(5) + arsSecretProviderClassReloadedWithSameConfig = "testsecretproviderclassreloadedwithsameconfig-handler-" + testutil.RandSeq(5) + arsSecretProviderClassReloadedWithDifferentConfig = "testsecretproviderclassreloadedwithdifferentconfig-handler-" + testutil.RandSeq(5) - ersNamespace = "test-handler-" + testutil.RandSeq(5) - ersConfigmapName = "testconfigmap-handler-" + testutil.RandSeq(5) - ersSecretName = "testsecret-handler-" + testutil.RandSeq(5) - ersProjectedConfigMapName = "testprojectedconfigmap-handler-" + testutil.RandSeq(5) - ersProjectedSecretName = "testprojectedsecret-handler-" + testutil.RandSeq(5) - ersConfigmapWithInitContainer = "testconfigmapInitContainerhandler-" + testutil.RandSeq(5) - ersSecretWithInitContainer = "testsecretWithInitContainer-handler-" + testutil.RandSeq(5) - ersProjectedConfigMapWithInitContainer = "testProjectedConfigMapWithInitContainer-handler" + testutil.RandSeq(5) - ersProjectedSecretWithInitContainer = "testProjectedSecretWithInitContainer-handler" + testutil.RandSeq(5) - ersConfigmapWithInitEnv = "configmapWithInitEnv-" + testutil.RandSeq(5) - ersSecretWithInitEnv = "secretWithInitEnv-handler-" + testutil.RandSeq(5) - ersConfigmapWithEnvName = "testconfigmapWithEnv-handler-" + testutil.RandSeq(5) - ersConfigmapWithEnvFromName = "testconfigmapWithEnvFrom-handler-" + testutil.RandSeq(5) - ersSecretWithEnvName = "testsecretWithEnv-handler-" + testutil.RandSeq(5) - ersSecretWithEnvFromName = "testsecretWithEnvFrom-handler-" + testutil.RandSeq(5) - ersConfigmapWithPodAnnotations = "testconfigmapPodAnnotations-handler-" + testutil.RandSeq(5) - ersConfigmapWithBothAnnotations = "testconfigmapBothAnnotations-handler-" + testutil.RandSeq(5) - ersConfigmapAnnotated = "testconfigmapAnnotated-handler-" + testutil.RandSeq(5) - ersSecretWithSecretAutoAnnotation = "testsecretwithsecretautoannotationdeployment-handler-" + testutil.RandSeq(5) - ersConfigmapWithConfigMapAutoAnnotation = "testconfigmapwithconfigmapautoannotationdeployment-handler-" + testutil.RandSeq(5) - ersSecretWithSecretExcludeAnnotation = "testsecretwithsecretexcludeannotationdeployment-handler-" + testutil.RandSeq(5) - ersConfigmapWithConfigMapExcludeAnnotation = "testconfigmapwithconfigmapexcludeannotationdeployment-handler-" + testutil.RandSeq(5) + ersNamespace = "test-handler-" + testutil.RandSeq(5) + ersConfigmapName = "testconfigmap-handler-" + testutil.RandSeq(5) + ersSecretName = "testsecret-handler-" + testutil.RandSeq(5) + ersSecretProviderClassName = "testsecretproviderclass-handler-" + testutil.RandSeq(5) + ersProjectedConfigMapName = "testprojectedconfigmap-handler-" + testutil.RandSeq(5) + ersProjectedSecretName = "testprojectedsecret-handler-" + testutil.RandSeq(5) + ersConfigmapWithInitContainer = "testconfigmapInitContainerhandler-" + testutil.RandSeq(5) + ersSecretWithInitContainer = "testsecretWithInitContainer-handler-" + testutil.RandSeq(5) + ersSecretProviderClassWithInitContainer = "testsecretproviderclassWithInitContainer-handler-" + testutil.RandSeq(5) + ersProjectedConfigMapWithInitContainer = "testProjectedConfigMapWithInitContainer-handler" + testutil.RandSeq(5) + ersProjectedSecretWithInitContainer = "testProjectedSecretWithInitContainer-handler" + testutil.RandSeq(5) + ersConfigmapWithInitEnv = "configmapWithInitEnv-" + testutil.RandSeq(5) + ersSecretWithInitEnv = "secretWithInitEnv-handler-" + testutil.RandSeq(5) + ersConfigmapWithEnvName = "testconfigmapWithEnv-handler-" + testutil.RandSeq(5) + ersConfigmapWithEnvFromName = "testconfigmapWithEnvFrom-handler-" + testutil.RandSeq(5) + ersSecretWithEnvName = "testsecretWithEnv-handler-" + testutil.RandSeq(5) + ersSecretWithEnvFromName = "testsecretWithEnvFrom-handler-" + testutil.RandSeq(5) + ersConfigmapWithPodAnnotations = "testconfigmapPodAnnotations-handler-" + testutil.RandSeq(5) + ersConfigmapWithBothAnnotations = "testconfigmapBothAnnotations-handler-" + testutil.RandSeq(5) + ersConfigmapAnnotated = "testconfigmapAnnotated-handler-" + testutil.RandSeq(5) + ersSecretWithSecretAutoAnnotation = "testsecretwithsecretautoannotationdeployment-handler-" + testutil.RandSeq(5) + ersConfigmapWithConfigMapAutoAnnotation = "testconfigmapwithconfigmapautoannotationdeployment-handler-" + testutil.RandSeq(5) + ersSecretProviderClassWithSPCAutoAnnotation = "testsecretproviderclasswithspcautoannotationdeployment-handler-" + testutil.RandSeq(5) + ersSecretWithSecretExcludeAnnotation = "testsecretwithsecretexcludeannotationdeployment-handler-" + testutil.RandSeq(5) + ersConfigmapWithConfigMapExcludeAnnotation = "testconfigmapwithconfigmapexcludeannotationdeployment-handler-" + testutil.RandSeq(5) + ersSecretProviderClassWithExcludeSPCAnnotation = "testsecretproviderclasswithspcexcludeannotationdeployment-handler-" + testutil.RandSeq(5) + ersSecretProviderClassReloadedWithSameConfig = "testsecretproviderclassreloadedwithsameconfig-handler-" + testutil.RandSeq(5) + ersSecretProviderClassReloadedWithDifferentConfig = "testsecretproviderclassreloadedwithdifferentconfig-handler-" + testutil.RandSeq(5) ) func TestMain(m *testing.M) { @@ -110,6 +126,12 @@ func setupArs() { logrus.Errorf("Error in secret creation: %v", err) } + // Creating secretproviderclass + _, err = testutil.CreateSecretProviderClass(clients.CSIClient, arsNamespace, arsSecretProviderClassName, "testing") + if err != nil { + logrus.Errorf("Error in secretproviderclass creation: %v", err) + } + // Creating configmap will be used in projected volume _, err = testutil.CreateConfigMap(clients.KubernetesClient, arsNamespace, arsProjectedConfigMapName, "www.google.com") if err != nil { @@ -178,6 +200,12 @@ func setupArs() { logrus.Errorf("Error in secret creation: %v", err) } + // Creating secretproviderclass + _, err = testutil.CreateSecretProviderClass(clients.CSIClient, arsNamespace, arsSecretProviderClassWithInitContainer, "testing") + if err != nil { + logrus.Errorf("Error in secretproviderclass creation: %v", err) + } + _, err = testutil.CreateConfigMap(clients.KubernetesClient, arsNamespace, arsConfigmapWithPodAnnotations, "www.google.com") if err != nil { logrus.Errorf("Error in configmap creation: %v", err) @@ -194,6 +222,12 @@ func setupArs() { logrus.Errorf("Error in secret creation: %v", err) } + // Creating secretproviderclass used with secretproviderclass auto annotation + _, err = testutil.CreateSecretProviderClass(clients.CSIClient, arsNamespace, arsSecretProviderClassWithSPCAutoAnnotation, "testing") + if err != nil { + logrus.Errorf("Error in secretproviderclass creation: %v", err) + } + // Creating configmap used with configmap auto annotation _, err = testutil.CreateConfigMap(clients.KubernetesClient, arsNamespace, arsConfigmapWithConfigMapAutoAnnotation, "www.google.com") if err != nil { @@ -206,6 +240,24 @@ func setupArs() { logrus.Errorf("Error in secret creation: %v", err) } + // Creating secretproviderclass used with secret auto annotation + _, err = testutil.CreateSecretProviderClass(clients.CSIClient, arsNamespace, arsSecretProviderClassWithExcludeSPCAnnotation, "testing") + if err != nil { + logrus.Errorf("Error in secretproviderclass creation: %v", err) + } + + // Creating secretproviderclass to reload with same config + _, err = testutil.CreateSecretProviderClass(clients.CSIClient, arsNamespace, arsSecretProviderClassReloadedWithSameConfig, "testing") + if err != nil { + logrus.Errorf("Error in secretproviderclass creation: %v", err) + } + + // Creating secretproviderclass to reload with different config + _, err = testutil.CreateSecretProviderClass(clients.CSIClient, arsNamespace, arsSecretProviderClassReloadedWithDifferentConfig, "testing") + if err != nil { + logrus.Errorf("Error in secretproviderclass creation: %v", err) + } + // Creating configmap used with configmap auto annotation _, err = testutil.CreateConfigMap(clients.KubernetesClient, arsNamespace, arsConfigmapWithExcludeConfigMapAnnotation, "www.google.com") if err != nil { @@ -254,6 +306,12 @@ func setupArs() { logrus.Errorf("Error in Deployment with secret creation: %v", err) } + // Creating Deployment with secretproviderclass mounted in init container + _, err = testutil.CreateDeploymentWithInitContainer(clients.KubernetesClient, arsSecretProviderClassWithInitContainer, arsNamespace, true) + if err != nil { + logrus.Errorf("Error in Deployment with secretproviderclass creation: %v", err) + } + // Creating Deployment with configmap mounted as Env in init container _, err = testutil.CreateDeploymentWithInitContainer(clients.KubernetesClient, arsConfigmapWithInitEnv, arsNamespace, false) if err != nil { @@ -272,6 +330,12 @@ func setupArs() { logrus.Errorf("Error in Deployment with secret creation: %v", err) } + // Creating Deployment with secretproviderclass + _, err = testutil.CreateDeployment(clients.KubernetesClient, arsSecretProviderClassName, arsNamespace, true) + if err != nil { + logrus.Errorf("Error in Deployment with secretproviderclass creation: %v", err) + } + // Creating Deployment with env var source as configmap _, err = testutil.CreateDeployment(clients.KubernetesClient, arsConfigmapWithEnvName, arsNamespace, false) if err != nil { @@ -319,6 +383,12 @@ func setupArs() { logrus.Errorf("Error in Deployment with secret and with secret auto annotation: %v", err) } + // Creating Deployment with secretproviderclass and with secretproviderclass auto annotation + _, err = testutil.CreateDeploymentWithTypedAutoAnnotation(clients.KubernetesClient, arsSecretProviderClassWithSPCAutoAnnotation, arsNamespace, testutil.SecretProviderClassPodStatusResourceType) + if err != nil { + logrus.Errorf("Error in Deployment with secretproviderclass and with secretproviderclass auto annotation: %v", err) + } + // Creating Deployment with secret and with secret auto annotation _, err = testutil.CreateDeploymentWithTypedAutoAnnotation(clients.KubernetesClient, arsConfigmapWithConfigMapAutoAnnotation, arsNamespace, testutil.ConfigmapResourceType) if err != nil { @@ -326,11 +396,29 @@ func setupArs() { } // Creating Deployment with secret and exclude secret annotation - _, err = testutil.CreateDeploymentWithExcludeAnnotation(clients.KubernetesClient, arsSecretWithExcludeSecretAnnotation, arsNamespace, testutil.SecretResourceType) + _, err = testutil.CreateDeploymentWithExcludeAnnotation(clients.KubernetesClient, arsSecretWithExcludeSecretAnnotation, arsNamespace, testutil.ConfigmapResourceType) if err != nil { logrus.Errorf("Error in Deployment with secret and with secret exclude annotation: %v", err) } + // Creating Deployment with secretproviderclass and exclude secretproviderclass annotation + _, err = testutil.CreateDeploymentWithExcludeAnnotation(clients.KubernetesClient, arsSecretProviderClassWithExcludeSPCAnnotation, arsNamespace, testutil.SecretProviderClassPodStatusResourceType) + if err != nil { + logrus.Errorf("Error in Deployment with secretproviderclass and with secretproviderclass exclude annotation: %v", err) + } + + // Creating Deployment with secretproviderclass to reload with same config + _, err = testutil.CreateDeploymentWithTypedAutoAnnotation(clients.KubernetesClient, arsSecretProviderClassReloadedWithSameConfig, arsNamespace, testutil.SecretProviderClassPodStatusResourceType) + if err != nil { + logrus.Errorf("Error in Deployment with secretproviderclass to reload with same config: %v", err) + } + + // Creating Deployment with secretproviderclass to reload with different config + _, err = testutil.CreateDeploymentWithTypedAutoAnnotation(clients.KubernetesClient, arsSecretProviderClassReloadedWithDifferentConfig, arsNamespace, testutil.SecretProviderClassPodStatusResourceType) + if err != nil { + logrus.Errorf("Error in Deployment with secretproviderclass to reload with different config: %v", err) + } + // Creating Deployment with secret and exclude configmap annotation _, err = testutil.CreateDeploymentWithExcludeAnnotation(clients.KubernetesClient, arsConfigmapWithExcludeConfigMapAnnotation, arsNamespace, testutil.ConfigmapResourceType) if err != nil { @@ -349,6 +437,12 @@ func setupArs() { logrus.Errorf("Error in DaemonSet with secret creation: %v", err) } + // Creating DaemonSet with secretproviderclass + _, err = testutil.CreateDaemonSet(clients.KubernetesClient, arsSecretProviderClassName, arsNamespace, true) + if err != nil { + logrus.Errorf("Error in DaemonSet with secretproviderclass creation: %v", err) + } + // Creating DaemonSet with configmap in projected volume _, err = testutil.CreateDaemonSet(clients.KubernetesClient, arsProjectedConfigMapName, arsNamespace, true) if err != nil { @@ -385,6 +479,12 @@ func setupArs() { logrus.Errorf("Error in StatefulSet with secret creation: %v", err) } + // Creating StatefulSet with secretproviderclass + _, err = testutil.CreateStatefulSet(clients.KubernetesClient, arsSecretProviderClassName, arsNamespace, true) + if err != nil { + logrus.Errorf("Error in StatefulSet with secretproviderclass creation: %v", err) + } + // Creating StatefulSet with configmap in projected volume _, err = testutil.CreateStatefulSet(clients.KubernetesClient, arsProjectedConfigMapName, arsNamespace, true) if err != nil { @@ -436,6 +536,12 @@ func teardownArs() { logrus.Errorf("Error while deleting deployment with secret %v", deploymentError) } + // Deleting Deployment with secretproviderclass + deploymentError = testutil.DeleteDeployment(clients.KubernetesClient, arsNamespace, arsSecretProviderClassName) + if deploymentError != nil { + logrus.Errorf("Error while deleting deployment with secretproviderclass %v", deploymentError) + } + // Deleting Deployment with configmap in projected volume deploymentError = testutil.DeleteDeployment(clients.KubernetesClient, arsNamespace, arsProjectedConfigMapName) if deploymentError != nil { @@ -484,6 +590,12 @@ func teardownArs() { logrus.Errorf("Error while deleting deployment with secret mounted in init container %v", deploymentError) } + // Deleting Deployment with secretproviderclass mounted in init container + deploymentError = testutil.DeleteDeployment(clients.KubernetesClient, arsNamespace, arsSecretProviderClassWithInitContainer) + if deploymentError != nil { + logrus.Errorf("Error while deleting deployment with secretproviderclass mounted in init container %v", deploymentError) + } + // Deleting Deployment with configmap mounted as env in init container deploymentError = testutil.DeleteDeployment(clients.KubernetesClient, arsNamespace, arsConfigmapWithInitEnv) if deploymentError != nil { @@ -532,6 +644,12 @@ func teardownArs() { logrus.Errorf("Error while deleting deployment with secret auto annotation %v", deploymentError) } + // Deleting Deployment with secretproviderclass and secretproviderclass auto annotation + deploymentError = testutil.DeleteDeployment(clients.KubernetesClient, arsNamespace, arsSecretProviderClassWithSPCAutoAnnotation) + if deploymentError != nil { + logrus.Errorf("Error while deleting deployment with secretproviderclass auto annotation %v", deploymentError) + } + // Deleting Deployment with configmap and configmap auto annotation deploymentError = testutil.DeleteDeployment(clients.KubernetesClient, arsNamespace, arsConfigmapWithConfigMapAutoAnnotation) if deploymentError != nil { @@ -544,6 +662,24 @@ func teardownArs() { logrus.Errorf("Error while deleting deployment with secret auto annotation %v", deploymentError) } + // Deleting Deployment with secretproviderclass and exclude secretproviderclass annotation + deploymentError = testutil.DeleteDeployment(clients.KubernetesClient, arsNamespace, arsSecretProviderClassWithExcludeSPCAnnotation) + if deploymentError != nil { + logrus.Errorf("Error while deleting deployment with secretproviderclass auto annotation %v", deploymentError) + } + + // Deleting Deployment with secretproviderclass to reload with same config + deploymentError = testutil.DeleteDeployment(clients.KubernetesClient, arsNamespace, arsSecretProviderClassReloadedWithSameConfig) + if deploymentError != nil { + logrus.Errorf("Error while deleting deployment with secretproviderclass to reload with same config %v", deploymentError) + } + + // Deleting Deployment with secretproviderclass to reload with different config + deploymentError = testutil.DeleteDeployment(clients.KubernetesClient, arsNamespace, arsSecretProviderClassReloadedWithDifferentConfig) + if deploymentError != nil { + logrus.Errorf("Error while deleting deployment with secretproviderclass to reload with different config %v", deploymentError) + } + // Deleting Deployment with configmap and exclude configmap annotation deploymentError = testutil.DeleteDeployment(clients.KubernetesClient, arsNamespace, arsConfigmapWithExcludeConfigMapAnnotation) if deploymentError != nil { @@ -556,12 +692,18 @@ func teardownArs() { logrus.Errorf("Error while deleting daemonSet with configmap %v", daemonSetError) } - // Deleting Deployment with secret + // Deleting DeamonSet with secret daemonSetError = testutil.DeleteDaemonSet(clients.KubernetesClient, arsNamespace, arsSecretName) if daemonSetError != nil { logrus.Errorf("Error while deleting daemonSet with secret %v", daemonSetError) } + // Deleting DeamonSet with secretproviderclass + daemonSetError = testutil.DeleteDaemonSet(clients.KubernetesClient, arsNamespace, arsSecretProviderClassName) + if daemonSetError != nil { + logrus.Errorf("Error while deleting daemonSet with secretproviderclass %v", daemonSetError) + } + // Deleting DaemonSet with configmap in projected volume daemonSetError = testutil.DeleteDaemonSet(clients.KubernetesClient, arsNamespace, arsProjectedConfigMapName) if daemonSetError != nil { @@ -592,12 +734,18 @@ func teardownArs() { logrus.Errorf("Error while deleting statefulSet with configmap %v", statefulSetError) } - // Deleting Deployment with secret + // Deleting StatefulSet with secret statefulSetError = testutil.DeleteStatefulSet(clients.KubernetesClient, arsNamespace, arsSecretName) if statefulSetError != nil { logrus.Errorf("Error while deleting statefulSet with secret %v", statefulSetError) } + // Deleting StatefulSet with secretproviderclass + statefulSetError = testutil.DeleteStatefulSet(clients.KubernetesClient, arsNamespace, arsSecretProviderClassName) + if statefulSetError != nil { + logrus.Errorf("Error while deleting statefulSet with secretproviderclass %v", statefulSetError) + } + // Deleting StatefulSet with configmap in projected volume statefulSetError = testutil.DeleteStatefulSet(clients.KubernetesClient, arsNamespace, arsProjectedConfigMapName) if statefulSetError != nil { @@ -634,6 +782,12 @@ func teardownArs() { logrus.Errorf("Error while deleting the secret %v", err) } + // Deleting Secretproviderclass + err = testutil.DeleteSecretProviderClass(clients.CSIClient, arsNamespace, arsSecretProviderClassName) + if err != nil { + logrus.Errorf("Error while deleting the secretproviderclass %v", err) + } + // Deleting configmap used in projected volume err = testutil.DeleteConfigMap(clients.KubernetesClient, arsNamespace, arsProjectedConfigMapName) if err != nil { @@ -682,6 +836,12 @@ func teardownArs() { logrus.Errorf("Error while deleting the secret used in init container %v", err) } + // Deleting Secretproviderclass used in init container + err = testutil.DeleteSecretProviderClass(clients.CSIClient, arsNamespace, arsSecretProviderClassWithInitContainer) + if err != nil { + logrus.Errorf("Error while deleting the secretproviderclass used in init container %v", err) + } + // Deleting Configmap used as env var source err = testutil.DeleteConfigMap(clients.KubernetesClient, arsNamespace, arsConfigmapWithEnvFromName) if err != nil { @@ -717,6 +877,12 @@ func teardownArs() { logrus.Errorf("Error while deleting the secret used with secret auto annotations: %v", err) } + // Deleting SecretProviderClass used with secretproviderclass auto annotation + err = testutil.DeleteSecretProviderClass(clients.CSIClient, arsNamespace, arsSecretProviderClassWithSPCAutoAnnotation) + if err != nil { + logrus.Errorf("Error while deleting the secretproviderclass used with secretproviderclass auto annotations: %v", err) + } + // Deleting ConfigMap used with configmap auto annotation err = testutil.DeleteConfigMap(clients.KubernetesClient, arsNamespace, arsConfigmapWithConfigMapAutoAnnotation) if err != nil { @@ -729,6 +895,24 @@ func teardownArs() { logrus.Errorf("Error while deleting the secret used with secret auto annotations: %v", err) } + // Deleting Secretproviderclass used with exclude secretproviderclass annotation + err = testutil.DeleteSecretProviderClass(clients.CSIClient, arsNamespace, arsSecretProviderClassWithExcludeSPCAnnotation) + if err != nil { + logrus.Errorf("Error while deleting the secretproviderclass used with secretproviderclass auto annotations: %v", err) + } + + // Deleting SecretProviderClass used with secretproviderclass to reload with same config + err = testutil.DeleteSecretProviderClass(clients.CSIClient, arsNamespace, arsSecretProviderClassReloadedWithSameConfig) + if err != nil { + logrus.Errorf("Error while deleting the secretproviderclass used with secretproviderclass to reload with same config: %v", err) + } + + // Deleting SecretProviderClass used with secretproviderclass to reload with different config + err = testutil.DeleteSecretProviderClass(clients.CSIClient, arsNamespace, arsSecretProviderClassReloadedWithDifferentConfig) + if err != nil { + logrus.Errorf("Error while deleting the secretproviderclass used with secretproviderclass to reload with different config: %v", err) + } + // Deleting ConfigMap used with exclude configmap annotation err = testutil.DeleteConfigMap(clients.KubernetesClient, arsNamespace, arsConfigmapWithExcludeConfigMapAnnotation) if err != nil { @@ -754,6 +938,12 @@ func setupErs() { logrus.Errorf("Error in secret creation: %v", err) } + // Creating secretproviderclass + _, err = testutil.CreateSecretProviderClass(clients.CSIClient, ersNamespace, ersSecretProviderClassName, "testing") + if err != nil { + logrus.Errorf("Error in secretproviderclass creation: %v", err) + } + // Creating configmap will be used in projected volume _, err = testutil.CreateConfigMap(clients.KubernetesClient, ersNamespace, ersProjectedConfigMapName, "www.google.com") if err != nil { @@ -822,6 +1012,12 @@ func setupErs() { logrus.Errorf("Error in secret creation: %v", err) } + // Creating secretproviderclass + _, err = testutil.CreateSecretProviderClass(clients.CSIClient, ersNamespace, ersSecretProviderClassWithInitContainer, "testing") + if err != nil { + logrus.Errorf("Error in secretproviderclass creation: %v", err) + } + _, err = testutil.CreateConfigMap(clients.KubernetesClient, ersNamespace, ersConfigmapWithPodAnnotations, "www.google.com") if err != nil { logrus.Errorf("Error in configmap creation: %v", err) @@ -839,6 +1035,12 @@ func setupErs() { logrus.Errorf("Error in configmap creation: %v", err) } + // Creating secretproviderclass used with secretproviderclass auto annotation + _, err = testutil.CreateSecretProviderClass(clients.CSIClient, ersNamespace, ersSecretProviderClassWithSPCAutoAnnotation, "testing") + if err != nil { + logrus.Errorf("Error in secretproviderclass creation: %v", err) + } + // Creating secret used with secret exclude annotation _, err = testutil.CreateSecret(clients.KubernetesClient, ersNamespace, ersSecretWithSecretExcludeAnnotation, data) if err != nil { @@ -851,6 +1053,24 @@ func setupErs() { logrus.Errorf("Error in configmap creation: %v", err) } + // Creating secretproviderclass used with secret exclude annotation + _, err = testutil.CreateSecretProviderClass(clients.CSIClient, ersNamespace, ersSecretProviderClassWithExcludeSPCAnnotation, "testing") + if err != nil { + logrus.Errorf("Error in secretproviderclass creation: %v", err) + } + + // Creating secretproviderclass to reload with same config + _, err = testutil.CreateSecretProviderClass(clients.CSIClient, ersNamespace, ersSecretProviderClassReloadedWithSameConfig, "testing") + if err != nil { + logrus.Errorf("Error in secretproviderclass creation: %v", err) + } + + // Creating secretproviderclass to reload with different config + _, err = testutil.CreateSecretProviderClass(clients.CSIClient, ersNamespace, ersSecretProviderClassReloadedWithDifferentConfig, "testing") + if err != nil { + logrus.Errorf("Error in secretproviderclass creation: %v", err) + } + // Creating Deployment with configmap _, err = testutil.CreateDeployment(clients.KubernetesClient, ersConfigmapName, ersNamespace, true) if err != nil { @@ -893,6 +1113,12 @@ func setupErs() { logrus.Errorf("Error in Deployment with secret creation: %v", err) } + // Creating Deployment with secretproviderclass mounted in init container + _, err = testutil.CreateDeploymentWithInitContainer(clients.KubernetesClient, ersSecretProviderClassWithInitContainer, ersNamespace, true) + if err != nil { + logrus.Errorf("Error in Deployment with secretproviderclass creation: %v", err) + } + // Creating Deployment with configmap mounted as Env in init container _, err = testutil.CreateDeploymentWithInitContainer(clients.KubernetesClient, ersConfigmapWithInitEnv, ersNamespace, false) if err != nil { @@ -911,6 +1137,12 @@ func setupErs() { logrus.Errorf("Error in Deployment with secret creation: %v", err) } + // Creating Deployment with secretproviderclass + _, err = testutil.CreateDeployment(clients.KubernetesClient, ersSecretProviderClassName, ersNamespace, true) + if err != nil { + logrus.Errorf("Error in Deployment with secretproviderclass creation: %v", err) + } + // Creating Deployment with env var source as configmap _, err = testutil.CreateDeployment(clients.KubernetesClient, ersConfigmapWithEnvName, ersNamespace, false) if err != nil { @@ -958,6 +1190,12 @@ func setupErs() { logrus.Errorf("Error in Deployment with configmap and with configmap auto annotation: %v", err) } + // Creating Deployment with secretproviderclass and with secretproviderclass auto annotation + _, err = testutil.CreateDeploymentWithTypedAutoAnnotation(clients.KubernetesClient, ersSecretProviderClassWithSPCAutoAnnotation, ersNamespace, testutil.SecretProviderClassPodStatusResourceType) + if err != nil { + logrus.Errorf("Error in Deployment with secretproviderclass and with secretproviderclass auto annotation: %v", err) + } + // Creating Deployment with secret and with secret exclude annotation _, err = testutil.CreateDeploymentWithExcludeAnnotation(clients.KubernetesClient, ersSecretWithSecretExcludeAnnotation, ersNamespace, testutil.SecretResourceType) if err != nil { @@ -970,6 +1208,12 @@ func setupErs() { logrus.Errorf("Error in Deployment with configmap and with configmap exclude annotation: %v", err) } + // Creating Deployment with secretproviderclass and with secretproviderclass exclude annotation + _, err = testutil.CreateDeploymentWithExcludeAnnotation(clients.KubernetesClient, ersSecretProviderClassWithExcludeSPCAnnotation, ersNamespace, testutil.SecretProviderClassPodStatusResourceType) + if err != nil { + logrus.Errorf("Error in Deployment with secretproviderclass and with secretproviderclass exclude annotation: %v", err) + } + // Creating DaemonSet with configmap _, err = testutil.CreateDaemonSet(clients.KubernetesClient, ersConfigmapName, ersNamespace, true) if err != nil { @@ -982,6 +1226,12 @@ func setupErs() { logrus.Errorf("Error in DaemonSet with secret creation: %v", err) } + // Creating DaemonSet with secretproviderclass + _, err = testutil.CreateDaemonSet(clients.KubernetesClient, ersSecretProviderClassName, ersNamespace, true) + if err != nil { + logrus.Errorf("Error in DaemonSet with secretproviderclass creation: %v", err) + } + // Creating DaemonSet with configmap in projected volume _, err = testutil.CreateDaemonSet(clients.KubernetesClient, ersProjectedConfigMapName, ersNamespace, true) if err != nil { @@ -1018,6 +1268,12 @@ func setupErs() { logrus.Errorf("Error in StatefulSet with secret creation: %v", err) } + // Creating StatefulSet with secretproviderclass + _, err = testutil.CreateStatefulSet(clients.KubernetesClient, ersSecretProviderClassName, ersNamespace, true) + if err != nil { + logrus.Errorf("Error in StatefulSet with secretproviderclass creation: %v", err) + } + // Creating StatefulSet with configmap in projected volume _, err = testutil.CreateStatefulSet(clients.KubernetesClient, ersProjectedConfigMapName, ersNamespace, true) if err != nil { @@ -1053,6 +1309,18 @@ func setupErs() { if err != nil { logrus.Errorf("Error in Deployment with both annotations: %v", err) } + + // Creating Deployment with secretproviderclass to reload with same config + _, err = testutil.CreateDeploymentWithTypedAutoAnnotation(clients.KubernetesClient, ersSecretProviderClassReloadedWithSameConfig, ersNamespace, testutil.SecretProviderClassPodStatusResourceType) + if err != nil { + logrus.Errorf("Error in Deployment with secretproviderclass to reload with same config: %v", err) + } + + // Creating Deployment with secretproviderclass to reload with different config + _, err = testutil.CreateDeploymentWithTypedAutoAnnotation(clients.KubernetesClient, ersSecretProviderClassReloadedWithDifferentConfig, ersNamespace, testutil.SecretProviderClassPodStatusResourceType) + if err != nil { + logrus.Errorf("Error in Deployment with secretproviderclass to reload with different config: %v", err) + } } func teardownErs() { @@ -1068,6 +1336,12 @@ func teardownErs() { logrus.Errorf("Error while deleting deployment with secret %v", deploymentError) } + // Deleting Deployment with secretproviderclass + deploymentError = testutil.DeleteDeployment(clients.KubernetesClient, ersNamespace, ersSecretProviderClassName) + if deploymentError != nil { + logrus.Errorf("Error while deleting deployment with secretprovider class %v", deploymentError) + } + // Deleting Deployment with configmap in projected volume deploymentError = testutil.DeleteDeployment(clients.KubernetesClient, ersNamespace, ersProjectedConfigMapName) if deploymentError != nil { @@ -1116,6 +1390,12 @@ func teardownErs() { logrus.Errorf("Error while deleting deployment with secret mounted in init container %v", deploymentError) } + // Deleting Deployment with secretproviderclass mounted in init container + deploymentError = testutil.DeleteDeployment(clients.KubernetesClient, ersNamespace, ersSecretProviderClassWithInitContainer) + if deploymentError != nil { + logrus.Errorf("Error while deleting deployment with secretproviderclass mounted in init container %v", deploymentError) + } + // Deleting Deployment with configmap mounted as env in init container deploymentError = testutil.DeleteDeployment(clients.KubernetesClient, ersNamespace, ersConfigmapWithInitEnv) if deploymentError != nil { @@ -1170,6 +1450,12 @@ func teardownErs() { logrus.Errorf("Error while deleting deployment with configmap auto annotation %v", deploymentError) } + // Deleting Deployment with secretproviderclass and secretproviderclass auto annotation + deploymentError = testutil.DeleteDeployment(clients.KubernetesClient, ersNamespace, ersSecretProviderClassWithSPCAutoAnnotation) + if deploymentError != nil { + logrus.Errorf("Error while deleting deployment with secretproviderclass auto annotation %v", deploymentError) + } + // Deleting Deployment with secret and secret exclude annotation deploymentError = testutil.DeleteDeployment(clients.KubernetesClient, ersNamespace, ersSecretWithSecretExcludeAnnotation) if deploymentError != nil { @@ -1182,18 +1468,42 @@ func teardownErs() { logrus.Errorf("Error while deleting deployment with configmap exclude annotation %v", deploymentError) } + // Deleting Deployment with secretproviderclass and secretproviderclass exclude annotation + deploymentError = testutil.DeleteDeployment(clients.KubernetesClient, ersNamespace, ersSecretProviderClassWithExcludeSPCAnnotation) + if deploymentError != nil { + logrus.Errorf("Error while deleting deployment with secretproviderclass exclude annotation %v", deploymentError) + } + + // Deleting Deployment with secretproviderclass to reload with same config + deploymentError = testutil.DeleteDeployment(clients.KubernetesClient, ersNamespace, ersSecretProviderClassReloadedWithSameConfig) + if deploymentError != nil { + logrus.Errorf("Error while deleting deployment with secretproviderclass to reload with same config %v", deploymentError) + } + + // Deleting Deployment with secretproviderclass to reload with different config + deploymentError = testutil.DeleteDeployment(clients.KubernetesClient, ersNamespace, ersSecretProviderClassReloadedWithDifferentConfig) + if deploymentError != nil { + logrus.Errorf("Error while deleting deployment with secretproviderclass to reload with different config %v", deploymentError) + } + // Deleting DaemonSet with configmap daemonSetError := testutil.DeleteDaemonSet(clients.KubernetesClient, ersNamespace, ersConfigmapName) if daemonSetError != nil { logrus.Errorf("Error while deleting daemonSet with configmap %v", daemonSetError) } - // Deleting Deployment with secret + // Deleting DaemonSet with secret daemonSetError = testutil.DeleteDaemonSet(clients.KubernetesClient, ersNamespace, ersSecretName) if daemonSetError != nil { logrus.Errorf("Error while deleting daemonSet with secret %v", daemonSetError) } + // Deleting DaemonSet with secretproviderclass + daemonSetError = testutil.DeleteDaemonSet(clients.KubernetesClient, ersNamespace, ersSecretProviderClassName) + if daemonSetError != nil { + logrus.Errorf("Error while deleting daemonSet with secretproviderclass %v", daemonSetError) + } + // Deleting DaemonSet with configmap in projected volume daemonSetError = testutil.DeleteDaemonSet(clients.KubernetesClient, ersNamespace, ersProjectedConfigMapName) if daemonSetError != nil { @@ -1224,12 +1534,18 @@ func teardownErs() { logrus.Errorf("Error while deleting statefulSet with configmap %v", statefulSetError) } - // Deleting Deployment with secret + // Deleting StatefulSet with secret statefulSetError = testutil.DeleteStatefulSet(clients.KubernetesClient, ersNamespace, ersSecretName) if statefulSetError != nil { logrus.Errorf("Error while deleting statefulSet with secret %v", statefulSetError) } + // Deleting StatefulSet with secretproviderclass + statefulSetError = testutil.DeleteStatefulSet(clients.KubernetesClient, ersNamespace, ersSecretProviderClassName) + if statefulSetError != nil { + logrus.Errorf("Error while deleting statefulSet with secretproviderclass %v", statefulSetError) + } + // Deleting StatefulSet with configmap in projected volume statefulSetError = testutil.DeleteStatefulSet(clients.KubernetesClient, ersNamespace, ersProjectedConfigMapName) if statefulSetError != nil { @@ -1266,6 +1582,12 @@ func teardownErs() { logrus.Errorf("Error while deleting the secret %v", err) } + // Deleting SecretProviderClass + err = testutil.DeleteSecretProviderClass(clients.CSIClient, ersNamespace, ersSecretProviderClassName) + if err != nil { + logrus.Errorf("Error while deleting the secretproviderclass %v", err) + } + // Deleting configmap used in projected volume err = testutil.DeleteConfigMap(clients.KubernetesClient, ersNamespace, ersProjectedConfigMapName) if err != nil { @@ -1314,6 +1636,12 @@ func teardownErs() { logrus.Errorf("Error while deleting the secret used in init container %v", err) } + // Deleting SecretProviderClass used in init container + err = testutil.DeleteSecretProviderClass(clients.CSIClient, ersNamespace, ersSecretProviderClassWithInitContainer) + if err != nil { + logrus.Errorf("Error while deleting the secretproviderclass used in init container %v", err) + } + // Deleting Configmap used as env var source err = testutil.DeleteConfigMap(clients.KubernetesClient, ersNamespace, ersConfigmapWithEnvFromName) if err != nil { @@ -1355,6 +1683,12 @@ func teardownErs() { logrus.Errorf("Error while deleting the configmap used with configmap auto annotation: %v", err) } + // Deleting SecretProviderClass used with secretproviderclass auto annotation + err = testutil.DeleteSecretProviderClass(clients.CSIClient, ersNamespace, ersSecretProviderClassWithSPCAutoAnnotation) + if err != nil { + logrus.Errorf("Error while deleting the secretproviderclass used with secretproviderclass auto annotation: %v", err) + } + // Deleting Secret used with secret exclude annotation err = testutil.DeleteSecret(clients.KubernetesClient, ersNamespace, ersSecretWithSecretExcludeAnnotation) if err != nil { @@ -1367,6 +1701,24 @@ func teardownErs() { logrus.Errorf("Error while deleting the configmap used with configmap exclude annotation: %v", err) } + // Deleting SecretProviderClass used with secretproviderclass exclude annotation + err = testutil.DeleteSecretProviderClass(clients.CSIClient, ersNamespace, ersSecretProviderClassWithExcludeSPCAnnotation) + if err != nil { + logrus.Errorf("Error while deleting the secretproviderclass used with secretproviderclass exclude annotation: %v", err) + } + + // Deleting SecretProviderClass used with secretproviderclass to reload with same config + err = testutil.DeleteSecretProviderClass(clients.CSIClient, ersNamespace, ersSecretProviderClassReloadedWithSameConfig) + if err != nil { + logrus.Errorf("Error while deleting the secretproviderclass used with secretproviderclass to reload with same config: %v", err) + } + + // Deleting SecretProviderClass used with secretproviderclass to reload with different config + err = testutil.DeleteSecretProviderClass(clients.CSIClient, ersNamespace, ersSecretProviderClassReloadedWithDifferentConfig) + if err != nil { + logrus.Errorf("Error while deleting the secretproviderclass used with secretproviderclass to reload with different config: %v", err) + } + // Deleting namespace testutil.DeleteNamespace(ersNamespace, clients.KubernetesClient) @@ -1838,6 +2190,38 @@ func TestRollingUpgradeForDeploymentWithSecretUsingArs(t *testing.T) { testRollingUpgradeInvokeDeleteStrategyArs(t, clients, config, deploymentFuncs, collectors, envVarPostfix) } +func TestRollingUpgradeForDeploymentWithSecretProviderClassUsingArs(t *testing.T) { + options.ReloadStrategy = constants.AnnotationsReloadStrategy + envVarPostfix := constants.SecretProviderClassEnvVarPostfix + + shaData := testutil.ConvertResourceToSHA(testutil.SecretProviderClassPodStatusResourceType, arsNamespace, arsSecretProviderClassName, "testing1") + config := getConfigWithAnnotations(envVarPostfix, arsSecretProviderClassName, shaData, options.SecretProviderClassUpdateOnChangeAnnotation, options.SecretProviderClassReloaderAutoAnnotation) + deploymentFuncs := GetDeploymentRollingUpgradeFuncs() + collectors := getCollectors() + + err := PerformAction(clients, config, deploymentFuncs, collectors, nil, invokeReloadStrategy) + time.Sleep(5 * time.Second) + if err != nil { + t.Errorf("Rolling upgrade failed for Deployment with SecretProviderClass") + } + + logrus.Infof("Verifying deployment update") + updated := testutil.VerifyResourceAnnotationUpdate(clients, config, deploymentFuncs) + if !updated { + t.Errorf("Deployment was not updated") + } + + if promtestutil.ToFloat64(collectors.Reloaded.With(labelSucceeded)) != 1 { + t.Errorf("Counter was not increased") + } + + if promtestutil.ToFloat64(collectors.ReloadedByNamespace.With(prometheus.Labels{"success": "true", "namespace": arsNamespace})) != 1 { + t.Errorf("Counter by namespace was not increased") + } + + testRollingUpgradeInvokeDeleteStrategyArs(t, clients, config, deploymentFuncs, collectors, envVarPostfix) +} + func TestRollingUpgradeForDeploymentWithSecretInProjectedVolumeUsingArs(t *testing.T) { options.ReloadStrategy = constants.AnnotationsReloadStrategy envVarPostfix := constants.SecretEnvVarPostfix @@ -1902,6 +2286,38 @@ func TestRollingUpgradeForDeploymentWithSecretinInitContainerUsingArs(t *testing testRollingUpgradeInvokeDeleteStrategyArs(t, clients, config, deploymentFuncs, collectors, envVarPostfix) } +func TestRollingUpgradeForDeploymentWithSecretproviderclassInInitContainerUsingArs(t *testing.T) { + options.ReloadStrategy = constants.AnnotationsReloadStrategy + envVarPostfix := constants.SecretProviderClassEnvVarPostfix + + shaData := testutil.ConvertResourceToSHA(testutil.SecretProviderClassPodStatusResourceType, arsNamespace, arsSecretProviderClassWithInitContainer, "testing1") + config := getConfigWithAnnotations(envVarPostfix, arsSecretProviderClassWithInitContainer, shaData, options.SecretProviderClassUpdateOnChangeAnnotation, options.SecretProviderClassReloaderAutoAnnotation) + deploymentFuncs := GetDeploymentRollingUpgradeFuncs() + collectors := getCollectors() + + err := PerformAction(clients, config, deploymentFuncs, collectors, nil, invokeReloadStrategy) + time.Sleep(5 * time.Second) + if err != nil { + t.Errorf("Rolling upgrade failed for Deployment with SecretProviderClass") + } + + logrus.Infof("Verifying deployment update") + updated := testutil.VerifyResourceAnnotationUpdate(clients, config, deploymentFuncs) + if !updated { + t.Errorf("Deployment was not updated") + } + + if promtestutil.ToFloat64(collectors.Reloaded.With(labelSucceeded)) != 1 { + t.Errorf("Counter was not increased") + } + + if promtestutil.ToFloat64(collectors.ReloadedByNamespace.With(prometheus.Labels{"success": "true", "namespace": arsNamespace})) != 1 { + t.Errorf("Counter by namespace was not increased") + } + + testRollingUpgradeInvokeDeleteStrategyArs(t, clients, config, deploymentFuncs, collectors, envVarPostfix) +} + func TestRollingUpgradeForDeploymentWithSecretInProjectedVolumeinInitContainerUsingArs(t *testing.T) { options.ReloadStrategy = constants.AnnotationsReloadStrategy envVarPostfix := constants.SecretEnvVarPostfix @@ -2050,6 +2466,100 @@ func TestRollingUpgradeForDeploymentWithSecretExcludeAnnotationUsingArs(t *testi } } +func TestRollingUpgradeForDeploymentWithSecretproviderclassExcludeAnnotationUsingArs(t *testing.T) { + options.ReloadStrategy = constants.AnnotationsReloadStrategy + envVarPostfix := constants.SecretProviderClassEnvVarPostfix + + shaData := testutil.ConvertResourceToSHA(testutil.SecretProviderClassPodStatusResourceType, arsNamespace, arsSecretProviderClassWithExcludeSPCAnnotation, "testing1") + config := getConfigWithAnnotations(envVarPostfix, arsSecretProviderClassWithExcludeSPCAnnotation, shaData, "", options.SecretProviderClassReloaderAutoAnnotation) + deploymentFuncs := GetDeploymentRollingUpgradeFuncs() + collectors := getCollectors() + + err := PerformAction(clients, config, deploymentFuncs, collectors, nil, invokeReloadStrategy) + if err != nil { + t.Errorf("Rolling upgrade failed for Deployment with SecretProviderClass") + } + + logrus.Infof("Verifying deployment did not update") + updated := testutil.VerifyResourceAnnotationUpdate(clients, config, deploymentFuncs) + if updated { + t.Errorf("Deployment which had to be exluded was updated") + } +} + +func TestRollingUpgradeForDeploymentWithSecretProviderClassReloadedWithSameConfigUsingArs(t *testing.T) { + options.ReloadStrategy = constants.AnnotationsReloadStrategy + envVarPostfix := constants.SecretProviderClassEnvVarPostfix + + shaData := testutil.ConvertResourceToSHA(testutil.SecretProviderClassPodStatusResourceType, arsNamespace, arsSecretProviderClassReloadedWithSameConfig, "testing1") + config := getConfigWithAnnotations(envVarPostfix, arsSecretProviderClassReloadedWithSameConfig, shaData, "", options.SecretProviderClassReloaderAutoAnnotation) + deploymentFuncs := GetDeploymentRollingUpgradeFuncs() + collectors := getCollectors() + + err := PerformAction(clients, config, deploymentFuncs, collectors, nil, invokeReloadStrategy) + time.Sleep(5 * time.Second) + if err != nil { + t.Errorf("Rolling upgrade failed for Deployment with same config") + } + + logrus.Infof("Verifying deployment did update") + updated := testutil.VerifyResourceAnnotationUpdate(clients, config, deploymentFuncs) + if !updated { + t.Errorf("Deployment was not updated") + } + + logrus.Infof("Performing reload using same config") + err = PerformAction(clients, config, deploymentFuncs, collectors, nil, invokeReloadStrategy) + time.Sleep(5 * time.Second) + if err != nil { + t.Errorf("Second rolling upgrade failed for Deployment with same config") + } + + logrus.Infof("Verifying second reload did not reload") + if promtestutil.ToFloat64(collectors.Reloaded.With(labelSucceeded)) != 1 && + promtestutil.ToFloat64(collectors.Reloaded.With(labelFailed)) != 0 { + t.Errorf("Second reload with same config updated Deployment") + } +} + +func TestRollingUpgradeForDeploymentWithSecretProviderClassReloadedWithDifferentConfigUsingArs(t *testing.T) { + options.ReloadStrategy = constants.AnnotationsReloadStrategy + envVarPostfix := constants.SecretProviderClassEnvVarPostfix + + shaData := testutil.ConvertResourceToSHA(testutil.SecretProviderClassPodStatusResourceType, arsNamespace, arsSecretProviderClassReloadedWithDifferentConfig, "testing1") + config := getConfigWithAnnotations(envVarPostfix, arsSecretProviderClassReloadedWithDifferentConfig, shaData, "", options.SecretProviderClassReloaderAutoAnnotation) + deploymentFuncs := GetDeploymentRollingUpgradeFuncs() + collectors := getCollectors() + + err := PerformAction(clients, config, deploymentFuncs, collectors, nil, invokeReloadStrategy) + time.Sleep(5 * time.Second) + if err != nil { + t.Errorf("Rolling upgrade failed for Deployment with different config") + } + + logrus.Infof("Verifying deployment did update") + updated := testutil.VerifyResourceAnnotationUpdate(clients, config, deploymentFuncs) + if !updated { + t.Errorf("Deployment was not updated") + } + + logrus.Infof("Applying different config") + shaData = testutil.ConvertResourceToSHA(testutil.SecretProviderClassPodStatusResourceType, arsNamespace, arsSecretProviderClassReloadedWithDifferentConfig, "testing2") + config.SHAValue = shaData + + err = PerformAction(clients, config, deploymentFuncs, collectors, nil, invokeReloadStrategy) + time.Sleep(5 * time.Second) + if err != nil { + t.Errorf("Second rolling upgrade failed for Deployment with different config") + } + + logrus.Infof("Verifying deployment did update") + if promtestutil.ToFloat64(collectors.Reloaded.With(labelSucceeded)) != 2 && + promtestutil.ToFloat64(collectors.Reloaded.With(labelFailed)) != 0 { + t.Errorf("Second reload with different config did not update Deployment") + } +} + func TestRollingUpgradeForDeploymentWithSecretAutoAnnotationUsingArs(t *testing.T) { options.ReloadStrategy = constants.AnnotationsReloadStrategy envVarPostfix := constants.SecretEnvVarPostfix @@ -2082,6 +2592,38 @@ func TestRollingUpgradeForDeploymentWithSecretAutoAnnotationUsingArs(t *testing. testRollingUpgradeInvokeDeleteStrategyArs(t, clients, config, deploymentFuncs, collectors, envVarPostfix) } +func TestRollingUpgradeForDeploymentWithSecretProviderClassAutoAnnotationUsingArs(t *testing.T) { + options.ReloadStrategy = constants.AnnotationsReloadStrategy + envVarPostfix := constants.SecretProviderClassEnvVarPostfix + + shaData := testutil.ConvertResourceToSHA(testutil.SecretProviderClassPodStatusResourceType, arsNamespace, arsSecretProviderClassWithSPCAutoAnnotation, "testing1") + config := getConfigWithAnnotations(envVarPostfix, arsSecretProviderClassWithSPCAutoAnnotation, shaData, "", options.SecretProviderClassReloaderAutoAnnotation) + deploymentFuncs := GetDeploymentRollingUpgradeFuncs() + collectors := getCollectors() + + err := PerformAction(clients, config, deploymentFuncs, collectors, nil, invokeReloadStrategy) + time.Sleep(5 * time.Second) + if err != nil { + t.Errorf("Rolling upgrade failed for Deployment with SecretProviderClass") + } + + logrus.Infof("Verifying deployment update") + updated := testutil.VerifyResourceAnnotationUpdate(clients, config, deploymentFuncs) + if !updated { + t.Errorf("Deployment was not updated") + } + + if promtestutil.ToFloat64(collectors.Reloaded.With(labelSucceeded)) != 1 { + t.Errorf("Counter was not increased") + } + + if promtestutil.ToFloat64(collectors.ReloadedByNamespace.With(prometheus.Labels{"success": "true", "namespace": arsNamespace})) != 1 { + t.Errorf("Counter by namespace was not increased") + } + + testRollingUpgradeInvokeDeleteStrategyArs(t, clients, config, deploymentFuncs, collectors, envVarPostfix) +} + func TestRollingUpgradeForDeploymentWithExcludeConfigMapAnnotationUsingArs(t *testing.T) { options.ReloadStrategy = constants.AnnotationsReloadStrategy envVarPostfix := constants.ConfigmapEnvVarPostfix @@ -2262,6 +2804,38 @@ func TestRollingUpgradeForDaemonSetWithSecretUsingArs(t *testing.T) { testRollingUpgradeInvokeDeleteStrategyArs(t, clients, config, daemonSetFuncs, collectors, envVarPostfix) } +func TestRollingUpgradeForDaemonSetWithSecretProviderClassUsingArs(t *testing.T) { + options.ReloadStrategy = constants.AnnotationsReloadStrategy + envVarPostfix := constants.SecretProviderClassEnvVarPostfix + + shaData := testutil.ConvertResourceToSHA(testutil.SecretProviderClassPodStatusResourceType, arsNamespace, arsSecretProviderClassName, "testing1") + config := getConfigWithAnnotations(envVarPostfix, arsSecretProviderClassName, shaData, options.SecretUpdateOnChangeAnnotation, options.SecretReloaderAutoAnnotation) + daemonSetFuncs := GetDaemonSetRollingUpgradeFuncs() + collectors := getCollectors() + + err := PerformAction(clients, config, daemonSetFuncs, collectors, nil, invokeReloadStrategy) + time.Sleep(5 * time.Second) + if err != nil { + t.Errorf("Rolling upgrade failed for DaemonSet with SecretProviderClass") + } + + logrus.Infof("Verifying daemonSet update") + updated := testutil.VerifyResourceAnnotationUpdate(clients, config, daemonSetFuncs) + if !updated { + t.Errorf("DaemonSet was not updated") + } + + if promtestutil.ToFloat64(collectors.Reloaded.With(labelSucceeded)) != 1 { + t.Errorf("Counter was not increased") + } + + if promtestutil.ToFloat64(collectors.ReloadedByNamespace.With(prometheus.Labels{"success": "true", "namespace": arsNamespace})) != 1 { + t.Errorf("Counter by namespace was not increased") + } + + testRollingUpgradeInvokeDeleteStrategyArs(t, clients, config, daemonSetFuncs, collectors, envVarPostfix) +} + func TestRollingUpgradeForDaemonSetWithSecretInProjectedVolumeUsingArs(t *testing.T) { options.ReloadStrategy = constants.AnnotationsReloadStrategy envVarPostfix := constants.SecretEnvVarPostfix @@ -2390,6 +2964,38 @@ func TestRollingUpgradeForStatefulSetWithSecretUsingArs(t *testing.T) { testRollingUpgradeInvokeDeleteStrategyArs(t, clients, config, statefulSetFuncs, collectors, envVarPostfix) } +func TestRollingUpgradeForStatefulSetWithSecretProviderClassUsingArs(t *testing.T) { + options.ReloadStrategy = constants.AnnotationsReloadStrategy + envVarPostfix := constants.SecretProviderClassEnvVarPostfix + + shaData := testutil.ConvertResourceToSHA(testutil.SecretProviderClassPodStatusResourceType, arsNamespace, arsSecretProviderClassName, "testing1") + config := getConfigWithAnnotations(envVarPostfix, arsSecretProviderClassName, shaData, options.SecretUpdateOnChangeAnnotation, options.SecretReloaderAutoAnnotation) + statefulSetFuncs := GetStatefulSetRollingUpgradeFuncs() + collectors := getCollectors() + + err := PerformAction(clients, config, statefulSetFuncs, collectors, nil, invokeReloadStrategy) + time.Sleep(5 * time.Second) + if err != nil { + t.Errorf("Rolling upgrade failed for StatefulSet with SecretProviderClass") + } + + logrus.Infof("Verifying statefulSet update") + updated := testutil.VerifyResourceAnnotationUpdate(clients, config, statefulSetFuncs) + if !updated { + t.Errorf("StatefulSet was not updated") + } + + if promtestutil.ToFloat64(collectors.Reloaded.With(labelSucceeded)) != 1 { + t.Errorf("Counter was not increased") + } + + if promtestutil.ToFloat64(collectors.ReloadedByNamespace.With(prometheus.Labels{"success": "true", "namespace": arsNamespace})) != 1 { + t.Errorf("Counter by namespace was not increased") + } + + testRollingUpgradeInvokeDeleteStrategyArs(t, clients, config, statefulSetFuncs, collectors, envVarPostfix) +} + func TestRollingUpgradeForStatefulSetWithSecretInProjectedVolumeUsingArs(t *testing.T) { options.ReloadStrategy = constants.AnnotationsReloadStrategy envVarPostfix := constants.SecretEnvVarPostfix @@ -2880,6 +3486,38 @@ func TestRollingUpgradeForDeploymentWithSecretUsingErs(t *testing.T) { testRollingUpgradeInvokeDeleteStrategyErs(t, clients, config, deploymentFuncs, collectors, envVarPostfix) } +func TestRollingUpgradeForDeploymentWithSecretProviderClassUsingErs(t *testing.T) { + options.ReloadStrategy = constants.EnvVarsReloadStrategy + envVarPostfix := constants.SecretProviderClassEnvVarPostfix + + shaData := testutil.ConvertResourceToSHA(testutil.SecretProviderClassPodStatusResourceType, ersNamespace, ersSecretProviderClassName, "testing1") + config := getConfigWithAnnotations(envVarPostfix, ersSecretProviderClassName, shaData, options.SecretProviderClassUpdateOnChangeAnnotation, options.SecretProviderClassReloaderAutoAnnotation) + deploymentFuncs := GetDeploymentRollingUpgradeFuncs() + collectors := getCollectors() + + err := PerformAction(clients, config, deploymentFuncs, collectors, nil, invokeReloadStrategy) + time.Sleep(5 * time.Second) + if err != nil { + t.Errorf("Rolling upgrade failed for Deployment with SecretProviderClass") + } + + logrus.Infof("Verifying deployment update") + updated := testutil.VerifyResourceEnvVarUpdate(clients, config, envVarPostfix, deploymentFuncs) + if !updated { + t.Errorf("Deployment was not updated") + } + + if promtestutil.ToFloat64(collectors.Reloaded.With(labelSucceeded)) != 1 { + t.Errorf("Counter was not increased") + } + + if promtestutil.ToFloat64(collectors.ReloadedByNamespace.With(prometheus.Labels{"success": "true", "namespace": ersNamespace})) != 1 { + t.Errorf("Counter by namespace was not increased") + } + + testRollingUpgradeInvokeDeleteStrategyErs(t, clients, config, deploymentFuncs, collectors, envVarPostfix) +} + func TestRollingUpgradeForDeploymentWithSecretInProjectedVolumeUsingErs(t *testing.T) { options.ReloadStrategy = constants.EnvVarsReloadStrategy envVarPostfix := constants.SecretEnvVarPostfix @@ -2944,6 +3582,38 @@ func TestRollingUpgradeForDeploymentWithSecretinInitContainerUsingErs(t *testing testRollingUpgradeInvokeDeleteStrategyErs(t, clients, config, deploymentFuncs, collectors, envVarPostfix) } +func TestRollingUpgradeForDeploymentWithSecretProviderClassinInitContainerUsingErs(t *testing.T) { + options.ReloadStrategy = constants.EnvVarsReloadStrategy + envVarPostfix := constants.SecretProviderClassEnvVarPostfix + + shaData := testutil.ConvertResourceToSHA(testutil.SecretProviderClassPodStatusResourceType, ersNamespace, ersSecretProviderClassWithInitContainer, "testing1") + config := getConfigWithAnnotations(envVarPostfix, ersSecretProviderClassWithInitContainer, shaData, options.SecretProviderClassUpdateOnChangeAnnotation, options.SecretProviderClassReloaderAutoAnnotation) + deploymentFuncs := GetDeploymentRollingUpgradeFuncs() + collectors := getCollectors() + + err := PerformAction(clients, config, deploymentFuncs, collectors, nil, invokeReloadStrategy) + time.Sleep(5 * time.Second) + if err != nil { + t.Errorf("Rolling upgrade failed for Deployment with SecretProviderClass") + } + + logrus.Infof("Verifying deployment update") + updated := testutil.VerifyResourceEnvVarUpdate(clients, config, envVarPostfix, deploymentFuncs) + if !updated { + t.Errorf("Deployment was not updated") + } + + if promtestutil.ToFloat64(collectors.Reloaded.With(labelSucceeded)) != 1 { + t.Errorf("Counter was not increased") + } + + if promtestutil.ToFloat64(collectors.ReloadedByNamespace.With(prometheus.Labels{"success": "true", "namespace": ersNamespace})) != 1 { + t.Errorf("Counter by namespace was not increased") + } + + testRollingUpgradeInvokeDeleteStrategyErs(t, clients, config, deploymentFuncs, collectors, envVarPostfix) +} + func TestRollingUpgradeForDeploymentWithSecretInProjectedVolumeinInitContainerUsingErs(t *testing.T) { options.ReloadStrategy = constants.EnvVarsReloadStrategy envVarPostfix := constants.SecretEnvVarPostfix @@ -3094,6 +3764,101 @@ func TestRollingUpgradeForDeploymentWithSecretExcludeAnnotationUsingErs(t *testi } } +func TestRollingUpgradeForDeploymentWithSecretProviderClassExcludeAnnotationUsingErs(t *testing.T) { + options.ReloadStrategy = constants.EnvVarsReloadStrategy + envVarPostfix := constants.SecretProviderClassEnvVarPostfix + + shaData := testutil.ConvertResourceToSHA(testutil.SecretProviderClassPodStatusResourceType, ersNamespace, ersSecretProviderClassWithExcludeSPCAnnotation, "testing1") + config := getConfigWithAnnotations(envVarPostfix, ersSecretProviderClassWithExcludeSPCAnnotation, shaData, "", options.SecretProviderClassReloaderAutoAnnotation) + deploymentFuncs := GetDeploymentRollingUpgradeFuncs() + collectors := getCollectors() + + err := PerformAction(clients, config, deploymentFuncs, collectors, nil, invokeReloadStrategy) + time.Sleep(5 * time.Second) + if err != nil { + t.Errorf("Rolling upgrade failed for Deployment with exclude Secret") + } + + logrus.Infof("Verifying deployment did not update") + updated := testutil.VerifyResourceEnvVarUpdate(clients, config, envVarPostfix, deploymentFuncs) + if updated { + t.Errorf("Deployment that had to be excluded was updated") + } +} + +func TestRollingUpgradeForDeploymentWithSecretProviderClassReloadedWithSameConfigUsingErs(t *testing.T) { + options.ReloadStrategy = constants.EnvVarsReloadStrategy + envVarPostfix := constants.SecretProviderClassEnvVarPostfix + + shaData := testutil.ConvertResourceToSHA(testutil.SecretProviderClassPodStatusResourceType, ersNamespace, ersSecretProviderClassReloadedWithSameConfig, "testing1") + config := getConfigWithAnnotations(envVarPostfix, ersSecretProviderClassReloadedWithSameConfig, shaData, "", options.SecretProviderClassReloaderAutoAnnotation) + deploymentFuncs := GetDeploymentRollingUpgradeFuncs() + collectors := getCollectors() + + err := PerformAction(clients, config, deploymentFuncs, collectors, nil, invokeReloadStrategy) + time.Sleep(5 * time.Second) + if err != nil { + t.Errorf("Rolling upgrade failed for Deployment with same config") + } + + logrus.Infof("Verifying deployment did update") + updated := testutil.VerifyResourceEnvVarUpdate(clients, config, envVarPostfix, deploymentFuncs) + if !updated { + t.Errorf("Deployment was not updated") + } + + logrus.Infof("Performing reload using same config") + err = PerformAction(clients, config, deploymentFuncs, collectors, nil, invokeReloadStrategy) + time.Sleep(5 * time.Second) + if err != nil { + t.Errorf("Second rolling upgrade failed for Deployment with same config") + } + + logrus.Infof("Verifying second reload did not reload") + if promtestutil.ToFloat64(collectors.Reloaded.With(labelSucceeded)) != 1 && + promtestutil.ToFloat64(collectors.Reloaded.With(labelFailed)) != 0 { + t.Errorf("Second reload with same config updated Deployment") + } +} + +func TestRollingUpgradeForDeploymentWithSecretProviderClassReloadedWithDifferentConfigUsingErs(t *testing.T) { + options.ReloadStrategy = constants.EnvVarsReloadStrategy + envVarPostfix := constants.SecretProviderClassEnvVarPostfix + + shaData := testutil.ConvertResourceToSHA(testutil.SecretProviderClassPodStatusResourceType, ersNamespace, ersSecretProviderClassReloadedWithDifferentConfig, "testing1") + config := getConfigWithAnnotations(envVarPostfix, ersSecretProviderClassReloadedWithDifferentConfig, shaData, "", options.SecretProviderClassReloaderAutoAnnotation) + deploymentFuncs := GetDeploymentRollingUpgradeFuncs() + collectors := getCollectors() + + err := PerformAction(clients, config, deploymentFuncs, collectors, nil, invokeReloadStrategy) + time.Sleep(5 * time.Second) + if err != nil { + t.Errorf("Rolling upgrade failed for Deployment with different config") + } + + logrus.Infof("Verifying deployment did update") + updated := testutil.VerifyResourceEnvVarUpdate(clients, config, envVarPostfix, deploymentFuncs) + if !updated { + t.Errorf("Deployment was not updated") + } + + logrus.Infof("Applying different config") + shaData = testutil.ConvertResourceToSHA(testutil.SecretProviderClassPodStatusResourceType, ersNamespace, ersSecretProviderClassReloadedWithDifferentConfig, "testing2") + config.SHAValue = shaData + + err = PerformAction(clients, config, deploymentFuncs, collectors, nil, invokeReloadStrategy) + time.Sleep(5 * time.Second) + if err != nil { + t.Errorf("Second rolling upgrade failed for Deployment with different config") + } + + logrus.Infof("Verifying deployment did update") + if promtestutil.ToFloat64(collectors.Reloaded.With(labelSucceeded)) != 2 && + promtestutil.ToFloat64(collectors.Reloaded.With(labelFailed)) != 0 { + t.Errorf("Second reload with different config did not update Deployment") + } +} + func TestRollingUpgradeForDeploymentWithSecretAutoAnnotationUsingErs(t *testing.T) { options.ReloadStrategy = constants.EnvVarsReloadStrategy envVarPostfix := constants.SecretEnvVarPostfix @@ -3126,6 +3891,38 @@ func TestRollingUpgradeForDeploymentWithSecretAutoAnnotationUsingErs(t *testing. testRollingUpgradeInvokeDeleteStrategyErs(t, clients, config, deploymentFuncs, collectors, envVarPostfix) } +func TestRollingUpgradeForDeploymentWithSecretProviderClassAutoAnnotationUsingErs(t *testing.T) { + options.ReloadStrategy = constants.EnvVarsReloadStrategy + envVarPostfix := constants.SecretProviderClassEnvVarPostfix + + shaData := testutil.ConvertResourceToSHA(testutil.SecretProviderClassPodStatusResourceType, ersNamespace, ersSecretProviderClassWithSPCAutoAnnotation, "testing1") + config := getConfigWithAnnotations(envVarPostfix, ersSecretProviderClassWithSPCAutoAnnotation, shaData, "", options.SecretProviderClassReloaderAutoAnnotation) + deploymentFuncs := GetDeploymentRollingUpgradeFuncs() + collectors := getCollectors() + + err := PerformAction(clients, config, deploymentFuncs, collectors, nil, invokeReloadStrategy) + time.Sleep(5 * time.Second) + if err != nil { + t.Errorf("Rolling upgrade failed for Deployment with SecretProviderClass") + } + + logrus.Infof("Verifying deployment update") + updated := testutil.VerifyResourceEnvVarUpdate(clients, config, envVarPostfix, deploymentFuncs) + if !updated { + t.Errorf("Deployment was not updated") + } + + if promtestutil.ToFloat64(collectors.Reloaded.With(labelSucceeded)) != 1 { + t.Errorf("Counter was not increased") + } + + if promtestutil.ToFloat64(collectors.ReloadedByNamespace.With(prometheus.Labels{"success": "true", "namespace": ersNamespace})) != 1 { + t.Errorf("Counter by namespace was not increased") + } + + testRollingUpgradeInvokeDeleteStrategyErs(t, clients, config, deploymentFuncs, collectors, envVarPostfix) +} + func TestRollingUpgradeForDeploymentWithConfigMapExcludeAnnotationUsingErs(t *testing.T) { options.ReloadStrategy = constants.EnvVarsReloadStrategy envVarPostfix := constants.ConfigmapEnvVarPostfix @@ -3308,6 +4105,38 @@ func TestRollingUpgradeForDaemonSetWithSecretUsingErs(t *testing.T) { testRollingUpgradeInvokeDeleteStrategyErs(t, clients, config, daemonSetFuncs, collectors, envVarPostfix) } +func TestRollingUpgradeForDaemonSetWithSecretProviderClassUsingErs(t *testing.T) { + options.ReloadStrategy = constants.EnvVarsReloadStrategy + envVarPostfix := constants.SecretProviderClassEnvVarPostfix + + shaData := testutil.ConvertResourceToSHA(testutil.SecretProviderClassPodStatusResourceType, ersNamespace, ersSecretProviderClassName, "testing1") + config := getConfigWithAnnotations(envVarPostfix, ersSecretProviderClassName, shaData, options.SecretProviderClassUpdateOnChangeAnnotation, options.SecretProviderClassReloaderAutoAnnotation) + daemonSetFuncs := GetDaemonSetRollingUpgradeFuncs() + collectors := getCollectors() + + err := PerformAction(clients, config, daemonSetFuncs, collectors, nil, invokeReloadStrategy) + time.Sleep(5 * time.Second) + if err != nil { + t.Errorf("Rolling upgrade failed for DaemonSet with SecretProviderClass") + } + + logrus.Infof("Verifying daemonSet update") + updated := testutil.VerifyResourceEnvVarUpdate(clients, config, envVarPostfix, daemonSetFuncs) + if !updated { + t.Errorf("DaemonSet was not updated") + } + + if promtestutil.ToFloat64(collectors.Reloaded.With(labelSucceeded)) != 1 { + t.Errorf("Counter was not increased") + } + + if promtestutil.ToFloat64(collectors.ReloadedByNamespace.With(prometheus.Labels{"success": "true", "namespace": ersNamespace})) != 1 { + t.Errorf("Counter by namespace was not increased") + } + + testRollingUpgradeInvokeDeleteStrategyErs(t, clients, config, daemonSetFuncs, collectors, envVarPostfix) +} + func TestRollingUpgradeForDaemonSetWithSecretInProjectedVolumeUsingErs(t *testing.T) { options.ReloadStrategy = constants.EnvVarsReloadStrategy envVarPostfix := constants.SecretEnvVarPostfix @@ -3436,6 +4265,38 @@ func TestRollingUpgradeForStatefulSetWithSecretUsingErs(t *testing.T) { testRollingUpgradeInvokeDeleteStrategyErs(t, clients, config, statefulSetFuncs, collectors, envVarPostfix) } +func TestRollingUpgradeForStatefulSetWithSecretProviderClassUsingErs(t *testing.T) { + options.ReloadStrategy = constants.EnvVarsReloadStrategy + envVarPostfix := constants.SecretProviderClassEnvVarPostfix + + shaData := testutil.ConvertResourceToSHA(testutil.SecretProviderClassPodStatusResourceType, ersNamespace, ersSecretProviderClassName, "testing1") + config := getConfigWithAnnotations(envVarPostfix, ersSecretProviderClassName, shaData, options.SecretProviderClassUpdateOnChangeAnnotation, options.SecretProviderClassReloaderAutoAnnotation) + statefulSetFuncs := GetStatefulSetRollingUpgradeFuncs() + collectors := getCollectors() + + err := PerformAction(clients, config, statefulSetFuncs, collectors, nil, invokeReloadStrategy) + time.Sleep(5 * time.Second) + if err != nil { + t.Errorf("Rolling upgrade failed for StatefulSet with SecretProviderClass") + } + + logrus.Infof("Verifying statefulSet update") + updated := testutil.VerifyResourceEnvVarUpdate(clients, config, envVarPostfix, statefulSetFuncs) + if !updated { + t.Errorf("StatefulSet was not updated") + } + + if promtestutil.ToFloat64(collectors.Reloaded.With(labelSucceeded)) != 1 { + t.Errorf("Counter was not increased") + } + + if promtestutil.ToFloat64(collectors.ReloadedByNamespace.With(prometheus.Labels{"success": "true", "namespace": ersNamespace})) != 1 { + t.Errorf("Counter by namespace was not increased") + } + + testRollingUpgradeInvokeDeleteStrategyErs(t, clients, config, statefulSetFuncs, collectors, envVarPostfix) +} + func TestRollingUpgradeForStatefulSetWithSecretInProjectedVolumeUsingErs(t *testing.T) { options.ReloadStrategy = constants.EnvVarsReloadStrategy envVarPostfix := constants.SecretEnvVarPostfix diff --git a/internal/pkg/testutil/kube.go b/internal/pkg/testutil/kube.go index 1f779ab..29e4fd7 100644 --- a/internal/pkg/testutil/kube.go +++ b/internal/pkg/testutil/kube.go @@ -29,6 +29,9 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/client-go/kubernetes" core_v1 "k8s.io/client-go/kubernetes/typed/core/v1" + csiv1 "sigs.k8s.io/secrets-store-csi-driver/apis/v1" + csiclient "sigs.k8s.io/secrets-store-csi-driver/pkg/client/clientset/versioned" + csiclient_v1 "sigs.k8s.io/secrets-store-csi-driver/pkg/client/clientset/versioned/typed/apis/v1" ) var ( @@ -37,6 +40,8 @@ var ( ConfigmapResourceType = "configMaps" // SecretResourceType is a resource type which controller watches for changes SecretResourceType = "secrets" + // SecretproviderclasspodstatusResourceType is a resource type which controller watches for changes + SecretProviderClassPodStatusResourceType = "secretproviderclasspodstatuses" ) var ( @@ -72,16 +77,16 @@ func DeleteNamespace(namespace string, client kubernetes.Interface) { } } -func getObjectMeta(namespace string, name string, autoReload bool, secretAutoReload bool, configmapAutoReload bool, extraAnnotations map[string]string) metav1.ObjectMeta { +func getObjectMeta(namespace string, name string, autoReload bool, secretAutoReload bool, configmapAutoReload bool, secretproviderclass bool, extraAnnotations map[string]string) metav1.ObjectMeta { return metav1.ObjectMeta{ Name: name, Namespace: namespace, Labels: map[string]string{"firstLabel": "temp"}, - Annotations: getAnnotations(name, autoReload, secretAutoReload, configmapAutoReload, extraAnnotations), + Annotations: getAnnotations(name, autoReload, secretAutoReload, configmapAutoReload, secretproviderclass, extraAnnotations), } } -func getAnnotations(name string, autoReload bool, secretAutoReload bool, configmapAutoReload bool, extraAnnotations map[string]string) map[string]string { +func getAnnotations(name string, autoReload bool, secretAutoReload bool, configmapAutoReload bool, secretproviderclass bool, extraAnnotations map[string]string) map[string]string { annotations := make(map[string]string) if autoReload { annotations[options.ReloaderAutoAnnotation] = "true" @@ -96,7 +101,9 @@ func getAnnotations(name string, autoReload bool, secretAutoReload bool, configm if !(len(annotations) > 0) { annotations = map[string]string{ options.ConfigmapUpdateOnChangeAnnotation: name, - options.SecretUpdateOnChangeAnnotation: name} + options.SecretUpdateOnChangeAnnotation: name, + options.SecretProviderClassUpdateOnChangeAnnotation: name, + } } for k, v := range extraAnnotations { annotations[k] = v @@ -175,6 +182,15 @@ func getVolumes(name string) []v1.Volume { }, }, }, + { + Name: "secretproviderclass", + VolumeSource: v1.VolumeSource{ + CSI: &v1.CSIVolumeSource{ + Driver: "secrets-store.csi.k8s.io", + VolumeAttributes: map[string]string{"secretProviderClass": name}, + }, + }, + }, } } @@ -188,6 +204,10 @@ func getVolumeMounts() []v1.VolumeMount { MountPath: "etc/sec", Name: "secret", }, + { + MountPath: "etc/spc", + Name: "secretproviderclass", + }, { MountPath: "etc/projectedconfig", Name: "projectedconfigmap", @@ -347,7 +367,7 @@ func getPodTemplateSpecWithInitContainerAndEnv(name string) v1.PodTemplateSpec { func GetDeployment(namespace string, deploymentName string) *appsv1.Deployment { replicaset := int32(1) return &appsv1.Deployment{ - ObjectMeta: getObjectMeta(namespace, deploymentName, false, false, false, map[string]string{}), + ObjectMeta: getObjectMeta(namespace, deploymentName, false, false, false, false, map[string]string{}), Spec: appsv1.DeploymentSpec{ Selector: &metav1.LabelSelector{ MatchLabels: map[string]string{"secondLabel": "temp"}, @@ -366,7 +386,7 @@ func GetDeploymentConfig(namespace string, deploymentConfigName string) *openshi replicaset := int32(1) podTemplateSpecWithVolume := getPodTemplateSpecWithVolumes(deploymentConfigName) return &openshiftv1.DeploymentConfig{ - ObjectMeta: getObjectMeta(namespace, deploymentConfigName, false, false, false, map[string]string{}), + ObjectMeta: getObjectMeta(namespace, deploymentConfigName, false, false, false, false, map[string]string{}), Spec: openshiftv1.DeploymentConfigSpec{ Replicas: replicaset, Strategy: openshiftv1.DeploymentStrategy{ @@ -381,7 +401,7 @@ func GetDeploymentConfig(namespace string, deploymentConfigName string) *openshi func GetDeploymentWithInitContainer(namespace string, deploymentName string) *appsv1.Deployment { replicaset := int32(1) return &appsv1.Deployment{ - ObjectMeta: getObjectMeta(namespace, deploymentName, false, false, false, map[string]string{}), + ObjectMeta: getObjectMeta(namespace, deploymentName, false, false, false, false, map[string]string{}), Spec: appsv1.DeploymentSpec{ Selector: &metav1.LabelSelector{ MatchLabels: map[string]string{"secondLabel": "temp"}, @@ -399,7 +419,7 @@ func GetDeploymentWithInitContainer(namespace string, deploymentName string) *ap func GetDeploymentWithInitContainerAndEnv(namespace string, deploymentName string) *appsv1.Deployment { replicaset := int32(1) return &appsv1.Deployment{ - ObjectMeta: getObjectMeta(namespace, deploymentName, true, false, false, map[string]string{}), + ObjectMeta: getObjectMeta(namespace, deploymentName, true, false, false, false, map[string]string{}), Spec: appsv1.DeploymentSpec{ Selector: &metav1.LabelSelector{ MatchLabels: map[string]string{"secondLabel": "temp"}, @@ -416,7 +436,7 @@ func GetDeploymentWithInitContainerAndEnv(namespace string, deploymentName strin func GetDeploymentWithEnvVars(namespace string, deploymentName string) *appsv1.Deployment { replicaset := int32(1) return &appsv1.Deployment{ - ObjectMeta: getObjectMeta(namespace, deploymentName, true, false, false, map[string]string{}), + ObjectMeta: getObjectMeta(namespace, deploymentName, true, false, false, false, map[string]string{}), Spec: appsv1.DeploymentSpec{ Selector: &metav1.LabelSelector{ MatchLabels: map[string]string{"secondLabel": "temp"}, @@ -434,7 +454,7 @@ func GetDeploymentConfigWithEnvVars(namespace string, deploymentConfigName strin replicaset := int32(1) podTemplateSpecWithEnvVars := getPodTemplateSpecWithEnvVars(deploymentConfigName) return &openshiftv1.DeploymentConfig{ - ObjectMeta: getObjectMeta(namespace, deploymentConfigName, false, false, false, map[string]string{}), + ObjectMeta: getObjectMeta(namespace, deploymentConfigName, false, false, false, false, map[string]string{}), Spec: openshiftv1.DeploymentConfigSpec{ Replicas: replicaset, Strategy: openshiftv1.DeploymentStrategy{ @@ -448,7 +468,7 @@ func GetDeploymentConfigWithEnvVars(namespace string, deploymentConfigName strin func GetDeploymentWithEnvVarSources(namespace string, deploymentName string) *appsv1.Deployment { replicaset := int32(1) return &appsv1.Deployment{ - ObjectMeta: getObjectMeta(namespace, deploymentName, true, false, false, map[string]string{}), + ObjectMeta: getObjectMeta(namespace, deploymentName, true, false, false, false, map[string]string{}), Spec: appsv1.DeploymentSpec{ Selector: &metav1.LabelSelector{ MatchLabels: map[string]string{"secondLabel": "temp"}, @@ -465,7 +485,7 @@ func GetDeploymentWithEnvVarSources(namespace string, deploymentName string) *ap func GetDeploymentWithPodAnnotations(namespace string, deploymentName string, both bool) *appsv1.Deployment { replicaset := int32(1) deployment := &appsv1.Deployment{ - ObjectMeta: getObjectMeta(namespace, deploymentName, false, false, false, map[string]string{}), + ObjectMeta: getObjectMeta(namespace, deploymentName, false, false, false, false, map[string]string{}), Spec: appsv1.DeploymentSpec{ Selector: &metav1.LabelSelector{ MatchLabels: map[string]string{"secondLabel": "temp"}, @@ -480,7 +500,7 @@ func GetDeploymentWithPodAnnotations(namespace string, deploymentName string, bo if !both { deployment.ObjectMeta.Annotations = nil } - deployment.Spec.Template.ObjectMeta.Annotations = getAnnotations(deploymentName, true, false, false, map[string]string{}) + deployment.Spec.Template.ObjectMeta.Annotations = getAnnotations(deploymentName, true, false, false, false, map[string]string{}) return deployment } @@ -488,9 +508,11 @@ func GetDeploymentWithTypedAutoAnnotation(namespace string, deploymentName strin replicaset := int32(1) var objectMeta metav1.ObjectMeta if resourceType == SecretResourceType { - objectMeta = getObjectMeta(namespace, deploymentName, false, true, false, map[string]string{}) + objectMeta = getObjectMeta(namespace, deploymentName, false, true, false, false, map[string]string{}) } else if resourceType == ConfigmapResourceType { - objectMeta = getObjectMeta(namespace, deploymentName, false, false, true, map[string]string{}) + objectMeta = getObjectMeta(namespace, deploymentName, false, false, true, false, map[string]string{}) + } else if resourceType == SecretProviderClassPodStatusResourceType { + objectMeta = getObjectMeta(namespace, deploymentName, false, false, false, true, map[string]string{}) } return &appsv1.Deployment{ @@ -517,6 +539,8 @@ func GetDeploymentWithExcludeAnnotation(namespace string, deploymentName string, annotation[options.SecretExcludeReloaderAnnotation] = deploymentName } else if resourceType == ConfigmapResourceType { annotation[options.ConfigmapExcludeReloaderAnnotation] = deploymentName + } else if resourceType == SecretProviderClassPodStatusResourceType { + annotation[options.SecretProviderClassExcludeReloaderAnnotation] = deploymentName } return &appsv1.Deployment{ @@ -542,7 +566,7 @@ func GetDeploymentWithExcludeAnnotation(namespace string, deploymentName string, // GetDaemonSet provides daemonset for testing func GetDaemonSet(namespace string, daemonsetName string) *appsv1.DaemonSet { return &appsv1.DaemonSet{ - ObjectMeta: getObjectMeta(namespace, daemonsetName, false, false, false, map[string]string{}), + ObjectMeta: getObjectMeta(namespace, daemonsetName, false, false, false, false, map[string]string{}), Spec: appsv1.DaemonSetSpec{ Selector: &metav1.LabelSelector{ MatchLabels: map[string]string{"secondLabel": "temp"}, @@ -557,7 +581,7 @@ func GetDaemonSet(namespace string, daemonsetName string) *appsv1.DaemonSet { func GetDaemonSetWithEnvVars(namespace string, daemonSetName string) *appsv1.DaemonSet { return &appsv1.DaemonSet{ - ObjectMeta: getObjectMeta(namespace, daemonSetName, true, false, false, map[string]string{}), + ObjectMeta: getObjectMeta(namespace, daemonSetName, true, false, false, false, map[string]string{}), Spec: appsv1.DaemonSetSpec{ Selector: &metav1.LabelSelector{ MatchLabels: map[string]string{"secondLabel": "temp"}, @@ -573,7 +597,7 @@ func GetDaemonSetWithEnvVars(namespace string, daemonSetName string) *appsv1.Dae // GetStatefulSet provides statefulset for testing func GetStatefulSet(namespace string, statefulsetName string) *appsv1.StatefulSet { return &appsv1.StatefulSet{ - ObjectMeta: getObjectMeta(namespace, statefulsetName, false, false, false, map[string]string{}), + ObjectMeta: getObjectMeta(namespace, statefulsetName, false, false, false, false, map[string]string{}), Spec: appsv1.StatefulSetSpec{ Selector: &metav1.LabelSelector{ MatchLabels: map[string]string{"secondLabel": "temp"}, @@ -589,7 +613,7 @@ func GetStatefulSet(namespace string, statefulsetName string) *appsv1.StatefulSe // GetStatefulSet provides statefulset for testing func GetStatefulSetWithEnvVar(namespace string, statefulsetName string) *appsv1.StatefulSet { return &appsv1.StatefulSet{ - ObjectMeta: getObjectMeta(namespace, statefulsetName, true, false, false, map[string]string{}), + ObjectMeta: getObjectMeta(namespace, statefulsetName, true, false, false, false, map[string]string{}), Spec: appsv1.StatefulSetSpec{ Selector: &metav1.LabelSelector{ MatchLabels: map[string]string{"secondLabel": "temp"}, @@ -614,6 +638,42 @@ func GetConfigmap(namespace string, configmapName string, testData string) *v1.C } } +func GetSecretProviderClass(namespace string, secretProviderClassName string, data string) *csiv1.SecretProviderClass { + return &csiv1.SecretProviderClass{ + ObjectMeta: metav1.ObjectMeta{ + Name: secretProviderClassName, + Namespace: namespace, + }, + Spec: csiv1.SecretProviderClassSpec{ + Provider: "Test", + Parameters: map[string]string{ + "parameter1": data, + }, + }, + } +} + +func GetSecretProviderClassPodStatus(namespace string, secretProviderClassPodStatusName string, data string) *csiv1.SecretProviderClassPodStatus { + return &csiv1.SecretProviderClassPodStatus{ + ObjectMeta: metav1.ObjectMeta{ + Name: secretProviderClassPodStatusName, + Namespace: namespace, + }, + Status: csiv1.SecretProviderClassPodStatusStatus{ + PodName: "test123", + SecretProviderClassName: secretProviderClassPodStatusName, + TargetPath: "/var/lib/kubelet/d8771ddf-935a-4199-a20b-f35f71c1d9e7/volumes/kubernetes.io~csi/secrets-store-inline/mount", + Mounted: true, + Objects: []csiv1.SecretProviderClassObject{ + { + ID: "parameter1", + Version: data, + }, + }, + }, + } +} + // GetConfigmapWithUpdatedLabel provides configmap for testing func GetConfigmapWithUpdatedLabel(namespace string, configmapName string, testLabel string, testData string) *v1.ConfigMap { return &v1.ConfigMap{ @@ -743,7 +803,7 @@ func GetResourceSHAFromAnnotation(podAnnotations map[string]string) string { return last.Hash } -// ConvertResourceToSHA generates SHA from secret or configmap data +// ConvertResourceToSHA generates SHA from secret, configmap or secretproviderclasspodstatus data func ConvertResourceToSHA(resourceType string, namespace string, resourceName string, data string) string { values := []string{} if resourceType == SecretResourceType { @@ -756,6 +816,12 @@ func ConvertResourceToSHA(resourceType string, namespace string, resourceName st for k, v := range configmap.Data { values = append(values, k+"="+v) } + } else if resourceType == SecretProviderClassPodStatusResourceType { + secretproviderclasspodstatus := GetSecretProviderClassPodStatus(namespace, resourceName, data) + for _, v := range secretproviderclasspodstatus.Status.Objects { + values = append(values, v.ID+"="+v.Version) + } + values = append(values, "SecretProviderClassName="+secretproviderclasspodstatus.Status.SecretProviderClassName) } sort.Strings(values) return crypto.GenerateSHA(strings.Join(values, ";")) @@ -770,6 +836,35 @@ func CreateConfigMap(client kubernetes.Interface, namespace string, configmapNam return configmapClient, err } +// CreateSecretProviderClass creates a SecretProviderClass in given namespace and returns the SecretProviderClassInterface +func CreateSecretProviderClass(client csiclient.Interface, namespace string, secretProviderClassName string, data string) (csiclient_v1.SecretProviderClassInterface, error) { + logrus.Infof("Creating SecretProviderClass") + secretProviderClassClient := client.SecretsstoreV1().SecretProviderClasses(namespace) + _, err := secretProviderClassClient.Create(context.TODO(), GetSecretProviderClass(namespace, secretProviderClassName, data), metav1.CreateOptions{}) + time.Sleep(3 * time.Second) + return secretProviderClassClient, err +} + +// CreateSecretProviderClass creates a SecretProviderClassPodStatus in given namespace and returns the SecretProviderClassInterface +func CreateSecretProviderClassPodStatus(client csiclient.Interface, namespace string, secretProviderClassPodStatusName string, data string) (csiclient_v1.SecretProviderClassPodStatusInterface, error) { + logrus.Infof("Creating SecretProviderClassPodStatus") + secretProviderClassPodStatusClient := client.SecretsstoreV1().SecretProviderClassPodStatuses(namespace) + secretProviderClassPodStatus := GetSecretProviderClassPodStatus(namespace, secretProviderClassPodStatusName, data) + _, err := secretProviderClassPodStatusClient.Create(context.TODO(), secretProviderClassPodStatus, metav1.CreateOptions{}) + time.Sleep(3 * time.Second) + return secretProviderClassPodStatusClient, err +} + +// CreateSecretProviderClassAndPodStatus creates a SecretProviderClass and SecretProviderClassPodStatus in given namespace +func CreateSecretProviderClassAndPodStatus(client csiclient.Interface, namespace string, name string, data string) error { + _, err := CreateSecretProviderClass(client, namespace, name, data) + if err != nil { + return err + } + _, err = CreateSecretProviderClassPodStatus(client, namespace, name, data) + return err +} + // CreateSecret creates a secret in given namespace and returns the SecretInterface func CreateSecret(client kubernetes.Interface, namespace string, secretName string, data string) (core_v1.SecretInterface, error) { logrus.Infof("Creating secret") @@ -1012,6 +1107,14 @@ func DeleteSecret(client kubernetes.Interface, namespace string, secretName stri return err } +// DeleteSecretProviderClass deletes a secretproviderclass in given namespace and returns the error if any +func DeleteSecretProviderClass(client csiclient.Interface, namespace string, secretProviderClassName string) error { + logrus.Infof("Deleting secretproviderclass %q.\n", secretProviderClassName) + err := client.SecretsstoreV1().SecretProviderClasses(namespace).Delete(context.TODO(), secretProviderClassName, metav1.DeleteOptions{}) + time.Sleep(3 * time.Second) + return err +} + // RandSeq generates a random sequence func RandSeq(n int) string { b := make([]rune, n) From 717291f1737d4e5d148acb9541b154f9ffc8f951 Mon Sep 17 00:00:00 2001 From: Zanis <22601571+ZanisO@users.noreply.github.com> Date: Sun, 19 Jan 2025 23:46:29 +0000 Subject: [PATCH 05/24] Added check to see if CSI CRDs are installed before running controller --- internal/pkg/cmd/reloader.go | 10 ++++++++-- pkg/kube/client.go | 24 +++++++++++++++++++++--- 2 files changed, 29 insertions(+), 5 deletions(-) diff --git a/internal/pkg/cmd/reloader.go b/internal/pkg/cmd/reloader.go index a1e2482..f17b2a2 100644 --- a/internal/pkg/cmd/reloader.go +++ b/internal/pkg/cmd/reloader.go @@ -179,8 +179,14 @@ func startReloader(cmd *cobra.Command, args []string) { var controllers []*controller.Controller for k := range kube.ResourceMap { - if k == "secretproviderclasspodstatuses" && !options.EnableCSIIntegration { - continue + if k == "secretproviderclasspodstatuses" { + if !options.EnableCSIIntegration { + continue + } + if !kube.IsCSIInstalled { + logrus.Infof("Can't run CSI controller as CSI CRDs are not installed") + continue + } } if ignoredResourcesList.Contains(k) || (len(namespaceLabelSelector) == 0 && k == "namespaces") { diff --git a/pkg/kube/client.go b/pkg/kube/client.go index af67319..9582929 100644 --- a/pkg/kube/client.go +++ b/pkg/kube/client.go @@ -25,6 +25,8 @@ type Clients struct { var ( // IsOpenshift is true if environment is Openshift, it is false if environment is Kubernetes IsOpenshift = isOpenshift() + // IsCSIEnabled is true if environment has CSI provider installed, otherwise false + IsCSIInstalled = isCSIInstalled() ) // GetClients returns a `Clients` object containing both openshift and kubernetes clients with an openshift identifier @@ -52,9 +54,11 @@ func GetClients() Clients { var csiClient *csiclient.Clientset - csiClient, err = GetCSIClient() - if err != nil { - logrus.Warnf("Unable to create CSI client error = %v", err) + if IsCSIInstalled { + csiClient, err = GetCSIClient() + if err != nil { + logrus.Warnf("Unable to create CSI client error = %v", err) + } } return Clients{ @@ -73,6 +77,20 @@ func GetArgoRolloutClient() (*argorollout.Clientset, error) { return argorollout.NewForConfig(config) } +func isCSIInstalled() bool { + client, err := GetKubernetesClient() + if err != nil { + logrus.Fatalf("Unable to create Kubernetes client error = %v", err) + } + _, err = client.RESTClient().Get().AbsPath("/apis/secrets-store.csi.x-k8s.io/v1").Do(context.TODO()).Raw() + if err == nil { + logrus.Info("CSI provider is installed") + return true + } + logrus.Info("CSI provider is not installed") + return false +} + func GetCSIClient() (*csiclient.Clientset, error) { config, err := getConfig() if err != nil { From 69b0d93f31e5fed80c1c65a1cf699f14c7e4c556 Mon Sep 17 00:00:00 2001 From: Zanis <22601571+ZanisO@users.noreply.github.com> Date: Mon, 20 Jan 2025 23:26:07 +0000 Subject: [PATCH 06/24] Added controller tests --- internal/pkg/controller/controller_test.go | 441 ++++++++++++++++++++- internal/pkg/testutil/kube.go | 39 +- 2 files changed, 462 insertions(+), 18 deletions(-) diff --git a/internal/pkg/controller/controller_test.go b/internal/pkg/controller/controller_test.go index ccef5df..ae42c59 100644 --- a/internal/pkg/controller/controller_test.go +++ b/internal/pkg/controller/controller_test.go @@ -25,14 +25,15 @@ import ( ) var ( - clients = kube.GetClients() - namespace = "test-reloader-" + testutil.RandSeq(5) - configmapNamePrefix = "testconfigmap-reloader" - secretNamePrefix = "testsecret-reloader" - data = "dGVzdFNlY3JldEVuY29kaW5nRm9yUmVsb2FkZXI=" - newData = "dGVzdE5ld1NlY3JldEVuY29kaW5nRm9yUmVsb2FkZXI=" - updatedData = "dGVzdFVwZGF0ZWRTZWNyZXRFbmNvZGluZ0ZvclJlbG9hZGVy" - collectors = metrics.NewCollectors() + clients = kube.GetClients() + namespace = "test-reloader-" + testutil.RandSeq(5) + configmapNamePrefix = "testconfigmap-reloader" + secretNamePrefix = "testsecret-reloader" + secretProviderClassPodStatusPrefix = "testsecretproviderclasspodstatus-reloader" + data = "dGVzdFNlY3JldEVuY29kaW5nRm9yUmVsb2FkZXI=" + newData = "dGVzdE5ld1NlY3JldEVuY29kaW5nRm9yUmVsb2FkZXI=" + updatedData = "dGVzdFVwZGF0ZWRTZWNyZXRFbmNvZGluZ0ZvclJlbG9hZGVy" + collectors = metrics.NewCollectors() ) const ( @@ -45,6 +46,10 @@ func TestMain(m *testing.M) { logrus.Infof("Creating controller") for k := range kube.ResourceMap { + // Don't create controller if CSI provider is not installed + if k == "secretproviderclasspodstatuses" && !kube.IsCSIInstalled { + continue + } if k == "namespaces" { continue } @@ -636,6 +641,217 @@ func TestControllerUpdatingSecretLabelsShouldNotCreateOrUpdatePodAnnotationInDep time.Sleep(sleepDuration) } +// Perform rolling upgrade on deployment and create pod annotation var upon updating the secretclassproviderpodstatus +func TestControllerUpdatingSecretProviderClassPodStatusShouldCreatePodAnnotationInDeployment(t *testing.T) { + options.ReloadStrategy = constants.AnnotationsReloadStrategy + + if !kube.IsCSIInstalled { + return + } + + // Creating secretclassprovider + secretproviderclasspodstatusName := secretProviderClassPodStatusPrefix + "-update-" + testutil.RandSeq(5) + _, err := testutil.CreateSecretProviderClass(clients.CSIClient, namespace, secretproviderclasspodstatusName, data) + if err != nil { + t.Errorf("Error while creating the secretclassprovider %v", err) + } + + // Creating secretproviderclasspodstatus + spcpsClient, err := testutil.CreateSecretProviderClassPodStatus(clients.CSIClient, namespace, secretproviderclasspodstatusName, data) + if err != nil { + t.Errorf("Error while creating the secretclasssproviderpodstatus %v", err) + } + + // Creating deployment + _, err = testutil.CreateDeployment(clients.KubernetesClient, secretproviderclasspodstatusName, namespace, true) + if err != nil { + t.Errorf("Error in deployment creation: %v", err) + } + + // Updating secretproviderclasspodstatus for first time + updateErr := testutil.UpdateSecretProviderClassPodStatus(spcpsClient, namespace, secretproviderclasspodstatusName, "", newData) + if updateErr != nil { + t.Errorf("Secretproviderclasspodstatus was not updated") + } + + // Verifying deployment update + logrus.Infof("Verifying pod annotation has been created") + shaData := testutil.ConvertResourceToSHA(testutil.SecretProviderClassPodStatusResourceType, namespace, secretproviderclasspodstatusName, newData) + config := util.Config{ + Namespace: namespace, + ResourceName: secretproviderclasspodstatusName, + SHAValue: shaData, + Annotation: options.SecretProviderClassUpdateOnChangeAnnotation, + } + deploymentFuncs := handler.GetDeploymentRollingUpgradeFuncs() + updated := testutil.VerifyResourceAnnotationUpdate(clients, config, deploymentFuncs) + if !updated { + t.Errorf("Deployment was not updated") + } + time.Sleep(sleepDuration) + + // Deleting deployment + err = testutil.DeleteDeployment(clients.KubernetesClient, namespace, secretproviderclasspodstatusName) + if err != nil { + logrus.Errorf("Error while deleting the deployment %v", err) + } + + // Deleting secretproviderclass + err = testutil.DeleteSecretProviderClass(clients.CSIClient, namespace, secretproviderclasspodstatusName) + if err != nil { + logrus.Errorf("Error while deleting the secretproviderclass %v", err) + } + + // Deleting secretproviderclasspodstatus + err = testutil.DeleteSecretProviderClassPodStatus(clients.CSIClient, namespace, secretproviderclasspodstatusName) + if err != nil { + logrus.Errorf("Error while deleting the secretproviderclasspodstatus %v", err) + } + time.Sleep(sleepDuration) +} + +// Perform rolling upgrade on deployment and update pod annotation var upon updating the secretproviderclasspodstatus +func TestControllerUpdatingSecretProviderClassPodStatusShouldUpdatePodAnnotationInDeployment(t *testing.T) { + options.ReloadStrategy = constants.AnnotationsReloadStrategy + + if !kube.IsCSIInstalled { + return + } + + // Creating secretclassprovider + secretproviderclasspodstatusName := secretProviderClassPodStatusPrefix + "-update-" + testutil.RandSeq(5) + _, err := testutil.CreateSecretProviderClass(clients.CSIClient, namespace, secretproviderclasspodstatusName, data) + if err != nil { + t.Errorf("Error while creating the secretclassprovider %v", err) + } + + // Creating secretproviderclasspodstatus + spcpsClient, err := testutil.CreateSecretProviderClassPodStatus(clients.CSIClient, namespace, secretproviderclasspodstatusName, data) + if err != nil { + t.Errorf("Error while creating the secretclasssproviderpodstatus %v", err) + } + + // Creating deployment + _, err = testutil.CreateDeployment(clients.KubernetesClient, secretproviderclasspodstatusName, namespace, true) + if err != nil { + t.Errorf("Error in deployment creation: %v", err) + } + + // Updating Secret + err = testutil.UpdateSecretProviderClassPodStatus(spcpsClient, namespace, secretproviderclasspodstatusName, "", newData) + if err != nil { + t.Errorf("Error while updating secretproviderclasspodstatus %v", err) + } + + // Updating Secret + err = testutil.UpdateSecretProviderClassPodStatus(spcpsClient, namespace, secretproviderclasspodstatusName, "", updatedData) + if err != nil { + t.Errorf("Error while updating secretproviderclasspodstatus %v", err) + } + + // Verifying Upgrade + logrus.Infof("Verifying pod annotation has been updated") + shaData := testutil.ConvertResourceToSHA(testutil.SecretProviderClassPodStatusResourceType, namespace, secretproviderclasspodstatusName, updatedData) + config := util.Config{ + Namespace: namespace, + ResourceName: secretproviderclasspodstatusName, + SHAValue: shaData, + Annotation: options.SecretProviderClassUpdateOnChangeAnnotation, + } + deploymentFuncs := handler.GetDeploymentRollingUpgradeFuncs() + updated := testutil.VerifyResourceAnnotationUpdate(clients, config, deploymentFuncs) + if !updated { + t.Errorf("Deployment was not updated") + } + + // Deleting Deployment + err = testutil.DeleteDeployment(clients.KubernetesClient, namespace, secretproviderclasspodstatusName) + if err != nil { + logrus.Errorf("Error while deleting the deployment %v", err) + } + + // Deleting secretproviderclass + err = testutil.DeleteSecretProviderClass(clients.CSIClient, namespace, secretproviderclasspodstatusName) + if err != nil { + logrus.Errorf("Error while deleting the secretproviderclass %v", err) + } + + // Deleting secretproviderclasspodstatus + err = testutil.DeleteSecretProviderClassPodStatus(clients.CSIClient, namespace, secretproviderclasspodstatusName) + if err != nil { + logrus.Errorf("Error while deleting the secretproviderclasspodstatus %v", err) + } + time.Sleep(sleepDuration) + +} + +// Do not Perform rolling upgrade on pod and create or update a pod annotation upon updating the label in secretproviderclasspodstatus +func TestControllerUpdatingSecretProviderClassPodStatusWithSameDataShouldNotCreateOrUpdatePodAnnotationInDeployment(t *testing.T) { + options.ReloadStrategy = constants.AnnotationsReloadStrategy + + if !kube.IsCSIInstalled { + return + } + + // Creating secretclassprovider + secretproviderclasspodstatusName := secretProviderClassPodStatusPrefix + "-update-" + testutil.RandSeq(5) + _, err := testutil.CreateSecretProviderClass(clients.CSIClient, namespace, secretproviderclasspodstatusName, data) + if err != nil { + t.Errorf("Error while creating the secretclassprovider %v", err) + } + + // Creating secretproviderclasspodstatus + spcpsClient, err := testutil.CreateSecretProviderClassPodStatus(clients.CSIClient, namespace, secretproviderclasspodstatusName, data) + if err != nil { + t.Errorf("Error while creating the secretclasssproviderpodstatus %v", err) + } + + // Creating deployment + _, err = testutil.CreateDeployment(clients.KubernetesClient, secretproviderclasspodstatusName, namespace, true) + if err != nil { + t.Errorf("Error in deployment creation: %v", err) + } + + err = testutil.UpdateSecretProviderClassPodStatus(spcpsClient, namespace, secretproviderclasspodstatusName, "", data) + if err != nil { + t.Errorf("Error while updating secretproviderclasspodstatus %v", err) + } + + // Verifying Upgrade + logrus.Infof("Verifying pod annotation has been created") + shaData := testutil.ConvertResourceToSHA(testutil.SecretProviderClassPodStatusResourceType, namespace, secretproviderclasspodstatusName, data) + config := util.Config{ + Namespace: namespace, + ResourceName: secretproviderclasspodstatusName, + SHAValue: shaData, + Annotation: options.SecretProviderClassUpdateOnChangeAnnotation, + } + deploymentFuncs := handler.GetDeploymentRollingUpgradeFuncs() + updated := testutil.VerifyResourceAnnotationUpdate(clients, config, deploymentFuncs) + if updated { + t.Errorf("Deployment should not be updated by changing in secret") + } + + // Deleting Deployment + err = testutil.DeleteDeployment(clients.KubernetesClient, namespace, secretproviderclasspodstatusName) + if err != nil { + logrus.Errorf("Error while deleting the deployment %v", err) + } + + // Deleting secretproviderclass + err = testutil.DeleteSecretProviderClass(clients.CSIClient, namespace, secretproviderclasspodstatusName) + if err != nil { + logrus.Errorf("Error while deleting the secretproviderclass %v", err) + } + + // Deleting secretproviderclasspodstatus + err = testutil.DeleteSecretProviderClassPodStatus(clients.CSIClient, namespace, secretproviderclasspodstatusName) + if err != nil { + logrus.Errorf("Error while deleting the secretproviderclasspodstatus %v", err) + } + time.Sleep(sleepDuration) +} + // Perform rolling upgrade on DaemonSet and create pod annotation var upon updating the configmap func TestControllerUpdatingConfigmapShouldCreatePodAnnotationInDaemonSet(t *testing.T) { options.ReloadStrategy = constants.AnnotationsReloadStrategy @@ -1646,6 +1862,215 @@ func TestControllerUpdatingSecretLabelsShouldNotCreateOrUpdateEnvInDeployment(t time.Sleep(sleepDuration) } +// Perform rolling upgrade on pod and create a env var upon updating the secretproviderclasspodstatus +func TestControllerUpdatingSecretProviderClassPodStatusShouldCreateEnvInDeployment(t *testing.T) { + options.ReloadStrategy = constants.EnvVarsReloadStrategy + + if !kube.IsCSIInstalled { + return + } + + // Creating secretclassprovider + secretproviderclasspodstatusName := secretProviderClassPodStatusPrefix + "-update-" + testutil.RandSeq(5) + _, err := testutil.CreateSecretProviderClass(clients.CSIClient, namespace, secretproviderclasspodstatusName, data) + if err != nil { + t.Errorf("Error while creating the secretclassprovider %v", err) + } + + // Creating secretproviderclasspodstatus + spcpsClient, err := testutil.CreateSecretProviderClassPodStatus(clients.CSIClient, namespace, secretproviderclasspodstatusName, data) + if err != nil { + t.Errorf("Error while creating the secretclasssproviderpodstatus %v", err) + } + + // Creating deployment + _, err = testutil.CreateDeployment(clients.KubernetesClient, secretproviderclasspodstatusName, namespace, true) + if err != nil { + t.Errorf("Error in deployment creation: %v", err) + } + + // Updating Secret + err = testutil.UpdateSecretProviderClassPodStatus(spcpsClient, namespace, secretproviderclasspodstatusName, "", newData) + if err != nil { + t.Errorf("Error while updating secretproviderclasspodstatus %v", err) + } + + // Verifying Upgrade + logrus.Infof("Verifying env var has been created") + shaData := testutil.ConvertResourceToSHA(testutil.SecretProviderClassPodStatusResourceType, namespace, secretproviderclasspodstatusName, newData) + config := util.Config{ + Namespace: namespace, + ResourceName: secretproviderclasspodstatusName, + SHAValue: shaData, + Annotation: options.SecretProviderClassUpdateOnChangeAnnotation, + } + deploymentFuncs := handler.GetDeploymentRollingUpgradeFuncs() + updated := testutil.VerifyResourceEnvVarUpdate(clients, config, constants.SecretProviderClassEnvVarPostfix, deploymentFuncs) + if !updated { + t.Errorf("Deployment was not updated") + } + + // Deleting Deployment + err = testutil.DeleteDeployment(clients.KubernetesClient, namespace, secretproviderclasspodstatusName) + if err != nil { + logrus.Errorf("Error while deleting the deployment %v", err) + } + + // Deleting secretproviderclass + err = testutil.DeleteSecretProviderClass(clients.CSIClient, namespace, secretproviderclasspodstatusName) + if err != nil { + logrus.Errorf("Error while deleting the secretproviderclass %v", err) + } + + // Deleting secretproviderclasspodstatus + err = testutil.DeleteSecretProviderClassPodStatus(clients.CSIClient, namespace, secretproviderclasspodstatusName) + if err != nil { + logrus.Errorf("Error while deleting the secretproviderclasspodstatus %v", err) + } + time.Sleep(sleepDuration) +} + +// Perform rolling upgrade on deployment and update env var upon updating the secretproviderclasspodstatus +func TestControllerUpdatingSecretProviderClassPodStatusShouldUpdateEnvInDeployment(t *testing.T) { + options.ReloadStrategy = constants.EnvVarsReloadStrategy + + if !kube.IsCSIInstalled { + return + } + + // Creating secretclassprovider + secretproviderclasspodstatusName := secretProviderClassPodStatusPrefix + "-update-" + testutil.RandSeq(5) + _, err := testutil.CreateSecretProviderClass(clients.CSIClient, namespace, secretproviderclasspodstatusName, data) + if err != nil { + t.Errorf("Error while creating the secretclassprovider %v", err) + } + + // Creating secretproviderclasspodstatus + spcpsClient, err := testutil.CreateSecretProviderClassPodStatus(clients.CSIClient, namespace, secretproviderclasspodstatusName, data) + if err != nil { + t.Errorf("Error while creating the secretclasssproviderpodstatus %v", err) + } + + // Creating deployment + _, err = testutil.CreateDeployment(clients.KubernetesClient, secretproviderclasspodstatusName, namespace, true) + if err != nil { + t.Errorf("Error in deployment creation: %v", err) + } + + // Updating secretproviderclasspodstatus + err = testutil.UpdateSecretProviderClassPodStatus(spcpsClient, namespace, secretproviderclasspodstatusName, "", newData) + if err != nil { + t.Errorf("Error while updating secretproviderclasspodstatus %v", err) + } + + // Updating secretproviderclasspodstatus + err = testutil.UpdateSecretProviderClassPodStatus(spcpsClient, namespace, secretproviderclasspodstatusName, "", updatedData) + if err != nil { + t.Errorf("Error while updating secretproviderclasspodstatus %v", err) + } + + // Verifying Upgrade + logrus.Infof("Verifying env var has been updated") + shaData := testutil.ConvertResourceToSHA(testutil.SecretProviderClassPodStatusResourceType, namespace, secretproviderclasspodstatusName, updatedData) + config := util.Config{ + Namespace: namespace, + ResourceName: secretproviderclasspodstatusName, + SHAValue: shaData, + Annotation: options.SecretProviderClassUpdateOnChangeAnnotation, + } + deploymentFuncs := handler.GetDeploymentRollingUpgradeFuncs() + updated := testutil.VerifyResourceEnvVarUpdate(clients, config, constants.SecretProviderClassEnvVarPostfix, deploymentFuncs) + if !updated { + t.Errorf("Deployment was not updated") + } + + // Deleting Deployment + err = testutil.DeleteDeployment(clients.KubernetesClient, namespace, secretproviderclasspodstatusName) + if err != nil { + logrus.Errorf("Error while deleting the deployment %v", err) + } + + // Deleting secretproviderclass + err = testutil.DeleteSecretProviderClass(clients.CSIClient, namespace, secretproviderclasspodstatusName) + if err != nil { + logrus.Errorf("Error while deleting the secretproviderclass %v", err) + } + + // Deleting secretproviderclasspodstatus + err = testutil.DeleteSecretProviderClassPodStatus(clients.CSIClient, namespace, secretproviderclasspodstatusName) + if err != nil { + logrus.Errorf("Error while deleting the secretproviderclasspodstatus %v", err) + } + time.Sleep(sleepDuration) +} + +// Do not Perform rolling upgrade on pod and create or update a env var upon updating the label in secretclasssproviderpodstatus +func TestControllerUpdatingSecretProviderClassPodStatusLabelsShouldNotCreateOrUpdateEnvInDeployment(t *testing.T) { + options.ReloadStrategy = constants.EnvVarsReloadStrategy + + if !kube.IsCSIInstalled { + return + } + + // Creating secretclassprovider + secretproviderclasspodstatusName := secretProviderClassPodStatusPrefix + "-update-" + testutil.RandSeq(5) + _, err := testutil.CreateSecretProviderClass(clients.CSIClient, namespace, secretproviderclasspodstatusName, data) + if err != nil { + t.Errorf("Error while creating the secretclassprovider %v", err) + } + + // Creating secretproviderclasspodstatus + spcpsClient, err := testutil.CreateSecretProviderClassPodStatus(clients.CSIClient, namespace, secretproviderclasspodstatusName, data) + if err != nil { + t.Errorf("Error while creating the secretclasssproviderpodstatus %v", err) + } + + // Creating deployment + _, err = testutil.CreateDeployment(clients.KubernetesClient, secretproviderclasspodstatusName, namespace, true) + if err != nil { + t.Errorf("Error in deployment creation: %v", err) + } + + err = testutil.UpdateSecretProviderClassPodStatus(spcpsClient, namespace, secretproviderclasspodstatusName, "test", data) + if err != nil { + t.Errorf("Error while updating secret %v", err) + } + + // Verifying Upgrade + logrus.Infof("Verifying env var has been created") + shaData := testutil.ConvertResourceToSHA(testutil.SecretProviderClassPodStatusResourceType, namespace, secretproviderclasspodstatusName, data) + config := util.Config{ + Namespace: namespace, + ResourceName: secretproviderclasspodstatusName, + SHAValue: shaData, + Annotation: options.SecretProviderClassUpdateOnChangeAnnotation, + } + deploymentFuncs := handler.GetDeploymentRollingUpgradeFuncs() + updated := testutil.VerifyResourceEnvVarUpdate(clients, config, constants.SecretProviderClassEnvVarPostfix, deploymentFuncs) + if updated { + t.Errorf("Deployment should not be updated by changing label in secret") + } + + // Deleting Deployment + err = testutil.DeleteDeployment(clients.KubernetesClient, namespace, secretproviderclasspodstatusName) + if err != nil { + logrus.Errorf("Error while deleting the deployment %v", err) + } + + // Deleting secretproviderclass + err = testutil.DeleteSecretProviderClass(clients.CSIClient, namespace, secretproviderclasspodstatusName) + if err != nil { + logrus.Errorf("Error while deleting the secretproviderclass %v", err) + } + + // Deleting secretproviderclasspodstatus + err = testutil.DeleteSecretProviderClassPodStatus(clients.CSIClient, namespace, secretproviderclasspodstatusName) + if err != nil { + logrus.Errorf("Error while deleting the secretproviderclasspodstatus %v", err) + } + time.Sleep(sleepDuration) +} + // Perform rolling upgrade on DaemonSet and create env var upon updating the configmap func TestControllerUpdatingConfigmapShouldCreateEnvInDaemonSet(t *testing.T) { options.ReloadStrategy = constants.EnvVarsReloadStrategy diff --git a/internal/pkg/testutil/kube.go b/internal/pkg/testutil/kube.go index 29e4fd7..3d8b502 100644 --- a/internal/pkg/testutil/kube.go +++ b/internal/pkg/testutil/kube.go @@ -855,16 +855,6 @@ func CreateSecretProviderClassPodStatus(client csiclient.Interface, namespace st return secretProviderClassPodStatusClient, err } -// CreateSecretProviderClassAndPodStatus creates a SecretProviderClass and SecretProviderClassPodStatus in given namespace -func CreateSecretProviderClassAndPodStatus(client csiclient.Interface, namespace string, name string, data string) error { - _, err := CreateSecretProviderClass(client, namespace, name, data) - if err != nil { - return err - } - _, err = CreateSecretProviderClassPodStatus(client, namespace, name, data) - return err -} - // CreateSecret creates a secret in given namespace and returns the SecretInterface func CreateSecret(client kubernetes.Interface, namespace string, secretName string, data string) (core_v1.SecretInterface, error) { logrus.Infof("Creating secret") @@ -1091,6 +1081,27 @@ func UpdateSecret(secretClient core_v1.SecretInterface, namespace string, secret return updateErr } +// UpdateSecretProviderClassPodStatus updates a secretproviderclasspodstatus in given namespace and returns the error if any +func UpdateSecretProviderClassPodStatus(spcpsClient csiclient_v1.SecretProviderClassPodStatusInterface, namespace string, spcpsName string, label string, data string) error { + logrus.Infof("Updating secretproviderclasspodstatus %q.\n", spcpsName) + updatedStatus := GetSecretProviderClassPodStatus(namespace, spcpsName, data).Status + secretproviderclasspodstatus, err := spcpsClient.Get(context.TODO(), spcpsName, metav1.GetOptions{}) + if err != nil { + return err + } + secretproviderclasspodstatus.Status = updatedStatus + if label != "" { + labels := secretproviderclasspodstatus.Labels + if labels == nil { + labels = make(map[string]string) + } + labels["firstLabel"] = label + } + _, updateErr := spcpsClient.Update(context.TODO(), secretproviderclasspodstatus, metav1.UpdateOptions{}) + time.Sleep(3 * time.Second) + return updateErr +} + // DeleteConfigMap deletes a configmap in given namespace and returns the error if any func DeleteConfigMap(client kubernetes.Interface, namespace string, configmapName string) error { logrus.Infof("Deleting configmap %q.\n", configmapName) @@ -1115,6 +1126,14 @@ func DeleteSecretProviderClass(client csiclient.Interface, namespace string, sec return err } +// DeleteSecretProviderClassPodStatus deletes a secretproviderclasspodstatus in given namespace and returns the error if any +func DeleteSecretProviderClassPodStatus(client csiclient.Interface, namespace string, secretProviderClassPodStatusName string) error { + logrus.Infof("Deleting secretproviderclasspodstatus %q.\n", secretProviderClassPodStatusName) + err := client.SecretsstoreV1().SecretProviderClassPodStatuses(namespace).Delete(context.TODO(), secretProviderClassPodStatusName, metav1.DeleteOptions{}) + time.Sleep(3 * time.Second) + return err +} + // RandSeq generates a random sequence func RandSeq(n int) string { b := make([]rune, n) From 570649e56bd58c1cda9a9b2538d01b7e9707a241 Mon Sep 17 00:00:00 2001 From: Zanis <22601571+ZanisO@users.noreply.github.com> Date: Wed, 5 Feb 2025 21:43:56 +0000 Subject: [PATCH 07/24] Minor improvements to tests and handlers --- internal/pkg/cmd/reloader.go | 6 ++--- internal/pkg/constants/constants.go | 2 +- internal/pkg/controller/controller.go | 22 +++++++++++----- internal/pkg/controller/controller_test.go | 30 +++++++++++----------- internal/pkg/handler/upgrade_test.go | 6 ++--- internal/pkg/options/flags.go | 2 +- internal/pkg/testutil/kube.go | 2 +- 7 files changed, 40 insertions(+), 30 deletions(-) diff --git a/internal/pkg/cmd/reloader.go b/internal/pkg/cmd/reloader.go index f17b2a2..03b6262 100644 --- a/internal/pkg/cmd/reloader.go +++ b/internal/pkg/cmd/reloader.go @@ -36,11 +36,11 @@ func NewReloaderCommand() *cobra.Command { cmd.PersistentFlags().BoolVar(&options.AutoReloadAll, "auto-reload-all", false, "Auto reload all resources") cmd.PersistentFlags().StringVar(&options.ConfigmapUpdateOnChangeAnnotation, "configmap-annotation", "configmap.reloader.stakater.com/reload", "annotation to detect changes in configmaps, specified by name") cmd.PersistentFlags().StringVar(&options.SecretUpdateOnChangeAnnotation, "secret-annotation", "secret.reloader.stakater.com/reload", "annotation to detect changes in secrets, specified by name") - cmd.PersistentFlags().StringVar(&options.SecretProviderClassUpdateOnChangeAnnotation, "spc-annotation", "secretproviderclass.reloader.stakater.com/reload", "annotation to detect changes in secretproviderclasses, specified by name") + cmd.PersistentFlags().StringVar(&options.SecretProviderClassUpdateOnChangeAnnotation, "secretproviderclass-annotation", "secretproviderclass.reloader.stakater.com/reload", "annotation to detect changes in secretproviderclasses, specified by name") cmd.PersistentFlags().StringVar(&options.ReloaderAutoAnnotation, "auto-annotation", "reloader.stakater.com/auto", "annotation to detect changes in secrets/configmaps") cmd.PersistentFlags().StringVar(&options.ConfigmapReloaderAutoAnnotation, "configmap-auto-annotation", "configmap.reloader.stakater.com/auto", "annotation to detect changes in configmaps") cmd.PersistentFlags().StringVar(&options.SecretReloaderAutoAnnotation, "secret-auto-annotation", "secret.reloader.stakater.com/auto", "annotation to detect changes in secrets") - cmd.PersistentFlags().StringVar(&options.SecretProviderClassReloaderAutoAnnotation, "spc-auto-annotation", "secretproviderclass.reloader.stakater.com/auto", "annotation to detect changes in secretproviderclasses") + cmd.PersistentFlags().StringVar(&options.SecretProviderClassReloaderAutoAnnotation, "secretproviderclass-auto-annotation", "secretproviderclass.reloader.stakater.com/auto", "annotation to detect changes in secretproviderclasses") cmd.PersistentFlags().StringVar(&options.AutoSearchAnnotation, "auto-search-annotation", "reloader.stakater.com/search", "annotation to detect changes in configmaps or secrets tagged with special match annotation") cmd.PersistentFlags().StringVar(&options.SearchMatchAnnotation, "search-match-annotation", "reloader.stakater.com/match", "annotation to mark secrets or configmaps to match the search") cmd.PersistentFlags().StringVar(&options.LogFormat, "log-format", "", "Log format to use (empty string for text, or JSON)") @@ -184,7 +184,7 @@ func startReloader(cmd *cobra.Command, args []string) { continue } if !kube.IsCSIInstalled { - logrus.Infof("Can't run CSI controller as CSI CRDs are not installed") + logrus.Infof("Can't run secretproviderclasspodstatuses controller as CSI CRDs are not installed") continue } } diff --git a/internal/pkg/constants/constants.go b/internal/pkg/constants/constants.go index 6ad3bd5..0d1f1c7 100644 --- a/internal/pkg/constants/constants.go +++ b/internal/pkg/constants/constants.go @@ -8,7 +8,7 @@ const ( ConfigmapEnvVarPostfix = "CONFIGMAP" // SecretEnvVarPostfix is a postfix for secret envVar SecretEnvVarPostfix = "SECRET" - // SecretEnvVarSecretProviderClassPodStatus is a postfix for secretproviderclasspodstatus envVar + // SecretProviderClassEnvVarPostfix is a postfix for secretproviderclasspodstatus envVar SecretProviderClassEnvVarPostfix = "SECRETPROVIDERCLASS" // EnvVarPrefix is a Prefix for environment variable EnvVarPrefix = "STAKATER_" diff --git a/internal/pkg/controller/controller.go b/internal/pkg/controller/controller.go index bf8ea4b..dca6625 100644 --- a/internal/pkg/controller/controller.go +++ b/internal/pkg/controller/controller.go @@ -22,6 +22,7 @@ import ( "k8s.io/client-go/util/workqueue" "k8s.io/kubectl/pkg/scheme" "k8s.io/utils/strings/slices" + csiv1 "sigs.k8s.io/secrets-store-csi-driver/apis/v1" ) // Controller for checking events @@ -117,6 +118,8 @@ func (c *Controller) Add(obj interface{}) { case *v1.Namespace: c.addSelectedNamespaceToCache(*object) return + case *csiv1.SecretProviderClassPodStatus: + return } if options.ReloadOnCreate == "true" { @@ -136,6 +139,8 @@ func (c *Controller) resourceInIgnoredNamespace(raw interface{}) bool { return c.ignoredNamespaces.Contains(object.ObjectMeta.Namespace) case *v1.Secret: return c.ignoredNamespaces.Contains(object.ObjectMeta.Namespace) + case *csiv1.SecretProviderClassPodStatus: + return c.ignoredNamespaces.Contains(object.ObjectMeta.Namespace) } return false } @@ -154,6 +159,10 @@ func (c *Controller) resourceInSelectedNamespaces(raw interface{}) bool { if slices.Contains(selectedNamespacesCache, object.GetNamespace()) { return true } + case *csiv1.SecretProviderClassPodStatus: + if slices.Contains(selectedNamespacesCache, object.GetNamespace()) { + return true + } } return false } @@ -192,6 +201,13 @@ func (c *Controller) Update(old interface{}, new interface{}) { // Delete function to add an object to the queue in case of deleting a resource func (c *Controller) Delete(old interface{}) { + switch object := old.(type) { + case *v1.Namespace: + c.removeSelectedNamespaceFromCache(*object) + return + case *csiv1.SecretProviderClassPodStatus: + return + } if options.ReloadOnDelete == "true" { if !c.resourceInIgnoredNamespace(old) && c.resourceInSelectedNamespaces(old) && secretControllerInitialized && configmapControllerInitialized { @@ -202,12 +218,6 @@ func (c *Controller) Delete(old interface{}) { }) } } - - switch object := old.(type) { - case *v1.Namespace: - c.removeSelectedNamespaceFromCache(*object) - return - } } // Run function for controller which handles the queue diff --git a/internal/pkg/controller/controller_test.go b/internal/pkg/controller/controller_test.go index ae42c59..f599923 100644 --- a/internal/pkg/controller/controller_test.go +++ b/internal/pkg/controller/controller_test.go @@ -649,11 +649,11 @@ func TestControllerUpdatingSecretProviderClassPodStatusShouldCreatePodAnnotation return } - // Creating secretclassprovider + // Creating secretproviderclass secretproviderclasspodstatusName := secretProviderClassPodStatusPrefix + "-update-" + testutil.RandSeq(5) _, err := testutil.CreateSecretProviderClass(clients.CSIClient, namespace, secretproviderclasspodstatusName, data) if err != nil { - t.Errorf("Error while creating the secretclassprovider %v", err) + t.Errorf("Error while creating the secretproviderclass %v", err) } // Creating secretproviderclasspodstatus @@ -718,11 +718,11 @@ func TestControllerUpdatingSecretProviderClassPodStatusShouldUpdatePodAnnotation return } - // Creating secretclassprovider + // Creating secretproviderclass secretproviderclasspodstatusName := secretProviderClassPodStatusPrefix + "-update-" + testutil.RandSeq(5) _, err := testutil.CreateSecretProviderClass(clients.CSIClient, namespace, secretproviderclasspodstatusName, data) if err != nil { - t.Errorf("Error while creating the secretclassprovider %v", err) + t.Errorf("Error while creating the secretproviderclass %v", err) } // Creating secretproviderclasspodstatus @@ -793,11 +793,11 @@ func TestControllerUpdatingSecretProviderClassPodStatusWithSameDataShouldNotCrea return } - // Creating secretclassprovider + // Creating secretproviderclass secretproviderclasspodstatusName := secretProviderClassPodStatusPrefix + "-update-" + testutil.RandSeq(5) _, err := testutil.CreateSecretProviderClass(clients.CSIClient, namespace, secretproviderclasspodstatusName, data) if err != nil { - t.Errorf("Error while creating the secretclassprovider %v", err) + t.Errorf("Error while creating the secretproviderclass %v", err) } // Creating secretproviderclasspodstatus @@ -829,7 +829,7 @@ func TestControllerUpdatingSecretProviderClassPodStatusWithSameDataShouldNotCrea deploymentFuncs := handler.GetDeploymentRollingUpgradeFuncs() updated := testutil.VerifyResourceAnnotationUpdate(clients, config, deploymentFuncs) if updated { - t.Errorf("Deployment should not be updated by changing in secret") + t.Errorf("Deployment should not be updated by changing in secretproviderclasspodstatus") } // Deleting Deployment @@ -1870,11 +1870,11 @@ func TestControllerUpdatingSecretProviderClassPodStatusShouldCreateEnvInDeployme return } - // Creating secretclassprovider + // Creating secretproviderclass secretproviderclasspodstatusName := secretProviderClassPodStatusPrefix + "-update-" + testutil.RandSeq(5) _, err := testutil.CreateSecretProviderClass(clients.CSIClient, namespace, secretproviderclasspodstatusName, data) if err != nil { - t.Errorf("Error while creating the secretclassprovider %v", err) + t.Errorf("Error while creating the secretproviderclass %v", err) } // Creating secretproviderclasspodstatus @@ -1938,11 +1938,11 @@ func TestControllerUpdatingSecretProviderClassPodStatusShouldUpdateEnvInDeployme return } - // Creating secretclassprovider + // Creating secretproviderclass secretproviderclasspodstatusName := secretProviderClassPodStatusPrefix + "-update-" + testutil.RandSeq(5) _, err := testutil.CreateSecretProviderClass(clients.CSIClient, namespace, secretproviderclasspodstatusName, data) if err != nil { - t.Errorf("Error while creating the secretclassprovider %v", err) + t.Errorf("Error while creating the secretproviderclass %v", err) } // Creating secretproviderclasspodstatus @@ -2012,11 +2012,11 @@ func TestControllerUpdatingSecretProviderClassPodStatusLabelsShouldNotCreateOrUp return } - // Creating secretclassprovider + // Creating secretproviderclass secretproviderclasspodstatusName := secretProviderClassPodStatusPrefix + "-update-" + testutil.RandSeq(5) _, err := testutil.CreateSecretProviderClass(clients.CSIClient, namespace, secretproviderclasspodstatusName, data) if err != nil { - t.Errorf("Error while creating the secretclassprovider %v", err) + t.Errorf("Error while creating the secretproviderclass %v", err) } // Creating secretproviderclasspodstatus @@ -2033,7 +2033,7 @@ func TestControllerUpdatingSecretProviderClassPodStatusLabelsShouldNotCreateOrUp err = testutil.UpdateSecretProviderClassPodStatus(spcpsClient, namespace, secretproviderclasspodstatusName, "test", data) if err != nil { - t.Errorf("Error while updating secret %v", err) + t.Errorf("Error while updating secretproviderclasspodstatus %v", err) } // Verifying Upgrade @@ -2048,7 +2048,7 @@ func TestControllerUpdatingSecretProviderClassPodStatusLabelsShouldNotCreateOrUp deploymentFuncs := handler.GetDeploymentRollingUpgradeFuncs() updated := testutil.VerifyResourceEnvVarUpdate(clients, config, constants.SecretProviderClassEnvVarPostfix, deploymentFuncs) if updated { - t.Errorf("Deployment should not be updated by changing label in secret") + t.Errorf("Deployment should not be updated by changing label in secretproviderclasspodstatus") } // Deleting Deployment diff --git a/internal/pkg/handler/upgrade_test.go b/internal/pkg/handler/upgrade_test.go index 35acddb..a0fb657 100644 --- a/internal/pkg/handler/upgrade_test.go +++ b/internal/pkg/handler/upgrade_test.go @@ -2809,7 +2809,7 @@ func TestRollingUpgradeForDaemonSetWithSecretProviderClassUsingArs(t *testing.T) envVarPostfix := constants.SecretProviderClassEnvVarPostfix shaData := testutil.ConvertResourceToSHA(testutil.SecretProviderClassPodStatusResourceType, arsNamespace, arsSecretProviderClassName, "testing1") - config := getConfigWithAnnotations(envVarPostfix, arsSecretProviderClassName, shaData, options.SecretUpdateOnChangeAnnotation, options.SecretReloaderAutoAnnotation) + config := getConfigWithAnnotations(envVarPostfix, arsSecretProviderClassName, shaData, options.SecretProviderClassUpdateOnChangeAnnotation, options.SecretProviderClassReloaderAutoAnnotation) daemonSetFuncs := GetDaemonSetRollingUpgradeFuncs() collectors := getCollectors() @@ -2969,7 +2969,7 @@ func TestRollingUpgradeForStatefulSetWithSecretProviderClassUsingArs(t *testing. envVarPostfix := constants.SecretProviderClassEnvVarPostfix shaData := testutil.ConvertResourceToSHA(testutil.SecretProviderClassPodStatusResourceType, arsNamespace, arsSecretProviderClassName, "testing1") - config := getConfigWithAnnotations(envVarPostfix, arsSecretProviderClassName, shaData, options.SecretUpdateOnChangeAnnotation, options.SecretReloaderAutoAnnotation) + config := getConfigWithAnnotations(envVarPostfix, arsSecretProviderClassName, shaData, options.SecretProviderClassUpdateOnChangeAnnotation, options.SecretProviderClassReloaderAutoAnnotation) statefulSetFuncs := GetStatefulSetRollingUpgradeFuncs() collectors := getCollectors() @@ -3776,7 +3776,7 @@ func TestRollingUpgradeForDeploymentWithSecretProviderClassExcludeAnnotationUsin err := PerformAction(clients, config, deploymentFuncs, collectors, nil, invokeReloadStrategy) time.Sleep(5 * time.Second) if err != nil { - t.Errorf("Rolling upgrade failed for Deployment with exclude Secret") + t.Errorf("Rolling upgrade failed for Deployment with exclude SecretProviderClass") } logrus.Infof("Verifying deployment did not update") diff --git a/internal/pkg/options/flags.go b/internal/pkg/options/flags.go index 8267bed..dcefade 100644 --- a/internal/pkg/options/flags.go +++ b/internal/pkg/options/flags.go @@ -62,7 +62,7 @@ var ( EnableHA = false // Url to send a request to instead of triggering a reload WebhookUrl = "" - // EnableCsiIntegration Adds support to watch SecretProviderClassPodStatus and restart deployment based on it + // EnableCSIIntegration Adds support to watch SecretProviderClassPodStatus and restart deployment based on it EnableCSIIntegration = false ) diff --git a/internal/pkg/testutil/kube.go b/internal/pkg/testutil/kube.go index 3d8b502..8c843d7 100644 --- a/internal/pkg/testutil/kube.go +++ b/internal/pkg/testutil/kube.go @@ -845,7 +845,7 @@ func CreateSecretProviderClass(client csiclient.Interface, namespace string, sec return secretProviderClassClient, err } -// CreateSecretProviderClass creates a SecretProviderClassPodStatus in given namespace and returns the SecretProviderClassInterface +// CreateSecretProviderClassPodStatus creates a SecretProviderClassPodStatus in given namespace and returns the SecretProviderClassPodStatusInterface func CreateSecretProviderClassPodStatus(client csiclient.Interface, namespace string, secretProviderClassPodStatusName string, data string) (csiclient_v1.SecretProviderClassPodStatusInterface, error) { logrus.Infof("Creating SecretProviderClassPodStatus") secretProviderClassPodStatusClient := client.SecretsstoreV1().SecretProviderClassPodStatuses(namespace) From e7e095cb4bb24ebfe5ab16907f1f878ef11650e4 Mon Sep 17 00:00:00 2001 From: Zanis <22601571+ZanisO@users.noreply.github.com> Date: Wed, 5 Feb 2025 23:36:08 +0000 Subject: [PATCH 08/24] Fixed tests --- internal/pkg/testutil/kube.go | 17 ++++++++++------- 1 file changed, 10 insertions(+), 7 deletions(-) diff --git a/internal/pkg/testutil/kube.go b/internal/pkg/testutil/kube.go index 8c843d7..a61b63d 100644 --- a/internal/pkg/testutil/kube.go +++ b/internal/pkg/testutil/kube.go @@ -97,11 +97,14 @@ func getAnnotations(name string, autoReload bool, secretAutoReload bool, configm if configmapAutoReload { annotations[options.ConfigmapReloaderAutoAnnotation] = "true" } + if secretproviderclass { + annotations[options.SecretProviderClassReloaderAutoAnnotation] = "true" + } if !(len(annotations) > 0) { annotations = map[string]string{ - options.ConfigmapUpdateOnChangeAnnotation: name, - options.SecretUpdateOnChangeAnnotation: name, + options.ConfigmapUpdateOnChangeAnnotation: name, + options.SecretUpdateOnChangeAnnotation: name, options.SecretProviderClassUpdateOnChangeAnnotation: name, } } @@ -700,7 +703,7 @@ func GetSecret(namespace string, secretName string, data string) *v1.Secret { func GetCronJob(namespace string, cronJobName string) *batchv1.CronJob { return &batchv1.CronJob{ - ObjectMeta: getObjectMeta(namespace, cronJobName, false, false, false, map[string]string{}), + ObjectMeta: getObjectMeta(namespace, cronJobName, false, false, false, false, map[string]string{}), Spec: batchv1.CronJobSpec{ Schedule: "*/5 * * * *", // Run every 5 minutes JobTemplate: batchv1.JobTemplateSpec{ @@ -717,7 +720,7 @@ func GetCronJob(namespace string, cronJobName string) *batchv1.CronJob { func GetJob(namespace string, jobName string) *batchv1.Job { return &batchv1.Job{ - ObjectMeta: getObjectMeta(namespace, jobName, false, false, false, map[string]string{}), + ObjectMeta: getObjectMeta(namespace, jobName, false, false, false, false, map[string]string{}), Spec: batchv1.JobSpec{ Selector: &metav1.LabelSelector{ MatchLabels: map[string]string{"secondLabel": "temp"}, @@ -729,7 +732,7 @@ func GetJob(namespace string, jobName string) *batchv1.Job { func GetCronJobWithEnvVar(namespace string, cronJobName string) *batchv1.CronJob { return &batchv1.CronJob{ - ObjectMeta: getObjectMeta(namespace, cronJobName, true, false, false, map[string]string{}), + ObjectMeta: getObjectMeta(namespace, cronJobName, true, false, false, false, map[string]string{}), Spec: batchv1.CronJobSpec{ Schedule: "*/5 * * * *", // Run every 5 minutes JobTemplate: batchv1.JobTemplateSpec{ @@ -746,7 +749,7 @@ func GetCronJobWithEnvVar(namespace string, cronJobName string) *batchv1.CronJob func GetJobWithEnvVar(namespace string, jobName string) *batchv1.Job { return &batchv1.Job{ - ObjectMeta: getObjectMeta(namespace, jobName, true, false, false, map[string]string{}), + ObjectMeta: getObjectMeta(namespace, jobName, true, false, false, false, map[string]string{}), Spec: batchv1.JobSpec{ Selector: &metav1.LabelSelector{ MatchLabels: map[string]string{"secondLabel": "temp"}, @@ -1291,7 +1294,7 @@ func GetSHAfromEmptyData() string { func GetRollout(namespace string, rolloutName string, annotations map[string]string) *argorolloutv1alpha1.Rollout { replicaset := int32(1) return &argorolloutv1alpha1.Rollout{ - ObjectMeta: getObjectMeta(namespace, rolloutName, false, false, false, annotations), + ObjectMeta: getObjectMeta(namespace, rolloutName, false, false, false, false, annotations), Spec: argorolloutv1alpha1.RolloutSpec{ Selector: &metav1.LabelSelector{ MatchLabels: map[string]string{"secondLabel": "temp"}, From 4f8b22e9546bc065dc07d67d17baa8a660b450cd Mon Sep 17 00:00:00 2001 From: Safwan Date: Tue, 25 Nov 2025 01:48:54 +0500 Subject: [PATCH 09/24] resolved comments --- internal/pkg/handler/update.go | 17 +++++++++-------- 1 file changed, 9 insertions(+), 8 deletions(-) diff --git a/internal/pkg/handler/update.go b/internal/pkg/handler/update.go index 262399d..cc1e16b 100644 --- a/internal/pkg/handler/update.go +++ b/internal/pkg/handler/update.go @@ -41,17 +41,18 @@ func (r ResourceUpdatedHandler) Handle() error { func (r ResourceUpdatedHandler) GetConfig() (common.Config, string) { var oldSHAData string var config common.Config - if _, ok := r.Resource.(*v1.ConfigMap); ok { + switch res := r.Resource.(type) { + case *v1.ConfigMap: oldSHAData = util.GetSHAfromConfigmap(r.OldResource.(*v1.ConfigMap)) - config = common.GetConfigmapConfig(r.Resource.(*v1.ConfigMap)) - } else if _, ok := r.Resource.(*v1.Secret); ok { + config = common.GetConfigmapConfig(res) + case *v1.Secret: oldSHAData = util.GetSHAfromSecret(r.OldResource.(*v1.Secret).Data) - config = common.GetSecretConfig(r.Resource.(*v1.Secret)) - } else if _, ok := r.Resource.(*csiv1.SecretProviderClassPodStatus); ok { + config = common.GetSecretConfig(res) + case *csiv1.SecretProviderClassPodStatus: oldSHAData = util.GetSHAfromSecretProviderClassPodStatus(r.OldResource.(*csiv1.SecretProviderClassPodStatus).Status) - config = common.GetSecretProviderClassPodStatusConfig(r.Resource.(*csiv1.SecretProviderClassPodStatus)) - } else { - logrus.Warnf("Invalid resource: Resource should be 'Secret' or 'Configmap' but found, %v", r.Resource) + config = common.GetSecretProviderClassPodStatusConfig(res) + default: + logrus.Warnf("Invalid resource: Resource should be 'Secret', 'Configmap' or 'SecretProviderClassPodStatus' but found, %v", r.Resource) } return config, oldSHAData } From 1725f17b0bc5aff13990792dbdc3087247bf7ef5 Mon Sep 17 00:00:00 2001 From: Safwan Date: Tue, 25 Nov 2025 02:05:41 +0500 Subject: [PATCH 10/24] fixed namespace behavior issue --- internal/pkg/controller/controller.go | 12 +++++++----- 1 file changed, 7 insertions(+), 5 deletions(-) diff --git a/internal/pkg/controller/controller.go b/internal/pkg/controller/controller.go index f2a0143..a670d81 100644 --- a/internal/pkg/controller/controller.go +++ b/internal/pkg/controller/controller.go @@ -201,11 +201,7 @@ func (c *Controller) Update(old interface{}, new interface{}) { // Delete function to add an object to the queue in case of deleting a resource func (c *Controller) Delete(old interface{}) { - switch object := old.(type) { - case *v1.Namespace: - c.removeSelectedNamespaceFromCache(*object) - return - case *csiv1.SecretProviderClassPodStatus: + if _, ok := old.(*csiv1.SecretProviderClassPodStatus); ok { return } @@ -218,6 +214,12 @@ func (c *Controller) Delete(old interface{}) { }) } } + + switch object := old.(type) { + case *v1.Namespace: + c.removeSelectedNamespaceFromCache(*object) + return + } } // Run function for controller which handles the queue From c9cab4f6e0131de1676786887523f132fe42ac6f Mon Sep 17 00:00:00 2001 From: faizanahmad055 Date: Sat, 3 Jan 2026 19:32:43 +0100 Subject: [PATCH 11/24] Update chart for CSI driver Signed-off-by: faizanahmad055 --- .../chart/reloader/templates/clusterrole.yaml | 11 +++++++++++ .../chart/reloader/templates/deployment.yaml | 5 ++++- deployments/kubernetes/chart/reloader/values.yaml | 1 + internal/pkg/cmd/reloader.go | 1 + internal/pkg/util/util.go | 1 + 5 files changed, 18 insertions(+), 1 deletion(-) diff --git a/deployments/kubernetes/chart/reloader/templates/clusterrole.yaml b/deployments/kubernetes/chart/reloader/templates/clusterrole.yaml index 9f655aa..bd14dfe 100644 --- a/deployments/kubernetes/chart/reloader/templates/clusterrole.yaml +++ b/deployments/kubernetes/chart/reloader/templates/clusterrole.yaml @@ -105,6 +105,17 @@ rules: - create - get - update +{{- end}} +{{- if .Values.reloader.enableCSIIntegration }} + - apiGroups: + - "secrets-store.csi.x-k8s.io" + resources: + - secretproviderclasspodstatuses + - secretproviderclasses + verbs: + - list + - get + - watch {{- end}} - apiGroups: - "" diff --git a/deployments/kubernetes/chart/reloader/templates/deployment.yaml b/deployments/kubernetes/chart/reloader/templates/deployment.yaml index 16564b2..e568f9f 100644 --- a/deployments/kubernetes/chart/reloader/templates/deployment.yaml +++ b/deployments/kubernetes/chart/reloader/templates/deployment.yaml @@ -210,7 +210,7 @@ spec: {{- . | toYaml | nindent 10 }} {{- end }} {{- end }} - {{- if or (.Values.reloader.logFormat) (.Values.reloader.logLevel) (.Values.reloader.ignoreSecrets) (.Values.reloader.ignoreNamespaces) (include "reloader-namespaceSelector" .) (.Values.reloader.resourceLabelSelector) (.Values.reloader.ignoreConfigMaps) (.Values.reloader.custom_annotations) (eq .Values.reloader.isArgoRollouts true) (eq .Values.reloader.reloadOnCreate true) (eq .Values.reloader.reloadOnDelete true) (ne .Values.reloader.reloadStrategy "default") (.Values.reloader.enableHA) (.Values.reloader.autoReloadAll) (.Values.reloader.ignoreJobs) (.Values.reloader.ignoreCronJobs)}} + {{- if or (.Values.reloader.logFormat) (.Values.reloader.logLevel) (.Values.reloader.ignoreSecrets) (.Values.reloader.ignoreNamespaces) (include "reloader-namespaceSelector" .) (.Values.reloader.resourceLabelSelector) (.Values.reloader.ignoreConfigMaps) (.Values.reloader.custom_annotations) (eq .Values.reloader.isArgoRollouts true) (eq .Values.reloader.reloadOnCreate true) (eq .Values.reloader.reloadOnDelete true) (ne .Values.reloader.reloadStrategy "default") (.Values.reloader.enableHA) (.Values.reloader.autoReloadAll) (.Values.reloader.ignoreJobs) (.Values.reloader.ignoreCronJobs) (.Values.reloader.enableCSIIntegration)}} args: {{- if .Values.reloader.logFormat }} - "--log-format={{ .Values.reloader.logFormat }}" @@ -246,6 +246,9 @@ spec: - "--pprof-addr={{ .Values.reloader.pprofAddr }}" {{- end }} {{- end }} + {{- if .Values.reloader.enableCSIIntegration }} + - "--enable-csi-integration=true" + {{- end }} {{- if .Values.reloader.custom_annotations }} {{- if .Values.reloader.custom_annotations.configmap }} - "--configmap-annotation" diff --git a/deployments/kubernetes/chart/reloader/values.yaml b/deployments/kubernetes/chart/reloader/values.yaml index c9a46a0..a607491 100644 --- a/deployments/kubernetes/chart/reloader/values.yaml +++ b/deployments/kubernetes/chart/reloader/values.yaml @@ -49,6 +49,7 @@ reloader: enableHA: false # Set to true to enable pprof for profiling enablePProf: false + enableCSIIntegration: false # Address to start pprof server on. Default is ":6060" pprofAddr: ":6060" # Set to true if you have a pod security policy that enforces readOnlyRootFilesystem diff --git a/internal/pkg/cmd/reloader.go b/internal/pkg/cmd/reloader.go index f20e0b8..6bdb339 100644 --- a/internal/pkg/cmd/reloader.go +++ b/internal/pkg/cmd/reloader.go @@ -162,6 +162,7 @@ func startReloader(cmd *cobra.Command, args []string) { for k := range kube.ResourceMap { if k == "secretproviderclasspodstatuses" { if !options.EnableCSIIntegration { + logrus.Infof("EnableCSIIntegration is set to false, won't run secretproviderclasspodstatuses controller") continue } if !kube.IsCSIInstalled { diff --git a/internal/pkg/util/util.go b/internal/pkg/util/util.go index 53846f3..047d068 100644 --- a/internal/pkg/util/util.go +++ b/internal/pkg/util/util.go @@ -106,6 +106,7 @@ func ConfigureReloaderFlags(cmd *cobra.Command) { cmd.PersistentFlags().BoolVar(&options.SyncAfterRestart, "sync-after-restart", false, "Sync add events after reloader restarts") cmd.PersistentFlags().BoolVar(&options.EnablePProf, "enable-pprof", false, "Enable pprof for profiling") cmd.PersistentFlags().StringVar(&options.PProfAddr, "pprof-addr", ":6060", "Address to start pprof server on. Default is :6060") + cmd.PersistentFlags().BoolVar(&options.EnableCSIIntegration, "enable-csi-integration", false, "Enables CSI integration. Default is :true") } func GetIgnoredResourcesList() (List, error) { From 109971d8b70bf7072093fd2e9d3c7b67d514f602 Mon Sep 17 00:00:00 2001 From: Safwan Date: Sat, 27 Dec 2025 21:57:04 +0500 Subject: [PATCH 12/24] prioritize named resource --- pkg/common/common.go | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/pkg/common/common.go b/pkg/common/common.go index b6fe3b5..7c9d61e 100644 --- a/pkg/common/common.go +++ b/pkg/common/common.go @@ -266,15 +266,6 @@ func ShouldReload(config Config, resourceType string, annotations Map, podAnnota } } - reloaderEnabled, _ := strconv.ParseBool(reloaderEnabledValue) - typedAutoAnnotationEnabled, _ := strconv.ParseBool(typedAutoAnnotationEnabledValue) - if reloaderEnabled || typedAutoAnnotationEnabled || reloaderEnabledValue == "" && typedAutoAnnotationEnabledValue == "" && options.AutoReloadAll { - return ReloadCheckResult{ - ShouldReload: true, - AutoReload: true, - } - } - values := strings.Split(annotationValue, ",") for _, value := range values { value = strings.TrimSpace(value) @@ -297,6 +288,15 @@ func ShouldReload(config Config, resourceType string, annotations Map, podAnnota } } + reloaderEnabled, _ := strconv.ParseBool(reloaderEnabledValue) + typedAutoAnnotationEnabled, _ := strconv.ParseBool(typedAutoAnnotationEnabledValue) + if reloaderEnabled || typedAutoAnnotationEnabled || reloaderEnabledValue == "" && typedAutoAnnotationEnabledValue == "" && options.AutoReloadAll { + return ReloadCheckResult{ + ShouldReload: true, + AutoReload: true, + } + } + return ReloadCheckResult{ ShouldReload: false, } From 9c8c511ae5e38820e2bbcc33fc7f9c4b8f57d254 Mon Sep 17 00:00:00 2001 From: faizanahmad055 Date: Sun, 4 Jan 2026 00:45:15 +0100 Subject: [PATCH 13/24] Update dependencies and fix shouldReload issue Signed-off-by: faizanahmad055 --- go.mod | 68 +++++++++++++------------- go.sum | 149 ++++++++++++++++++++++++++++++--------------------------- 2 files changed, 112 insertions(+), 105 deletions(-) diff --git a/go.mod b/go.mod index 1b51b65..48f13d8 100644 --- a/go.mod +++ b/go.mod @@ -4,19 +4,19 @@ go 1.25.5 require ( github.com/argoproj/argo-rollouts v1.8.3 - github.com/openshift/api v0.0.0-20250411135543-10a8fa583797 - github.com/openshift/client-go v0.0.0-20250402181141-b3bad3b645f2 + github.com/openshift/api v0.0.0-20260102143802-d2ec16864f86 + github.com/openshift/client-go v0.0.0-20251223102348-558b0eef16bc github.com/parnurzeal/gorequest v0.3.0 - github.com/prometheus/client_golang v1.22.0 + github.com/prometheus/client_golang v1.23.2 github.com/sirupsen/logrus v1.9.3 - github.com/spf13/cobra v1.10.1 - github.com/stretchr/testify v1.10.0 - k8s.io/api v0.32.3 - k8s.io/apimachinery v0.32.3 - k8s.io/client-go v0.32.3 - k8s.io/kubectl v0.32.3 - k8s.io/utils v0.0.0-20251002143259-bc988d571ff4 - sigs.k8s.io/secrets-store-csi-driver v1.5.4 + github.com/spf13/cobra v1.10.2 + github.com/stretchr/testify v1.11.1 + k8s.io/api v0.35.0 + k8s.io/apimachinery v0.35.0 + k8s.io/client-go v0.35.0 + k8s.io/kubectl v0.35.0 + k8s.io/utils v0.0.0-20251222233032-718f0e51e6d2 + sigs.k8s.io/secrets-store-csi-driver v1.5.5 ) require ( @@ -25,16 +25,14 @@ require ( github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect github.com/elazarl/goproxy v0.0.0-20240726154733-8b0c20506380 // indirect github.com/emicklei/go-restful/v3 v3.12.2 // indirect - github.com/fxamacker/cbor/v2 v2.8.0 // indirect - github.com/go-logr/logr v1.4.2 // indirect + github.com/fxamacker/cbor/v2 v2.9.0 // indirect + github.com/go-logr/logr v1.4.3 // indirect github.com/go-openapi/jsonpointer v0.21.1 // indirect github.com/go-openapi/jsonreference v0.21.0 // indirect github.com/go-openapi/swag v0.23.1 // indirect github.com/gogo/protobuf v1.3.2 // indirect - github.com/golang/protobuf v1.5.4 // indirect - github.com/google/gnostic-models v0.6.9 // indirect + github.com/google/gnostic-models v0.7.0 // indirect github.com/google/go-cmp v0.7.0 // indirect - github.com/google/gofuzz v1.2.0 // indirect github.com/google/uuid v1.6.0 // indirect github.com/inconshreveable/mousetrap v1.1.0 // indirect github.com/josharian/intern v1.0.0 // indirect @@ -42,41 +40,43 @@ require ( github.com/kylelemons/godebug v1.1.0 // indirect github.com/mailru/easyjson v0.9.0 // indirect github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect - github.com/modern-go/reflect2 v1.0.2 // indirect + github.com/modern-go/reflect2 v1.0.3-0.20250322232337-35a7c28c31ee // indirect github.com/moul/http2curl v1.0.0 // indirect github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect github.com/pkg/errors v0.9.1 // indirect github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect github.com/prometheus/client_model v0.6.2 // indirect - github.com/prometheus/common v0.63.0 // indirect - github.com/prometheus/procfs v0.16.0 // indirect + github.com/prometheus/common v0.66.1 // indirect + github.com/prometheus/procfs v0.16.1 // indirect github.com/smartystreets/goconvey v1.7.2 // indirect github.com/spf13/pflag v1.0.9 // indirect github.com/x448/float16 v0.8.4 // indirect - golang.org/x/net v0.39.0 // indirect - golang.org/x/oauth2 v0.29.0 // indirect - golang.org/x/sys v0.32.0 // indirect - golang.org/x/term v0.31.0 // indirect - golang.org/x/text v0.24.0 // indirect + go.yaml.in/yaml/v2 v2.4.3 // indirect + go.yaml.in/yaml/v3 v3.0.4 // indirect + golang.org/x/net v0.47.0 // indirect + golang.org/x/oauth2 v0.30.0 // indirect + golang.org/x/sys v0.39.0 // indirect + golang.org/x/term v0.38.0 // indirect + golang.org/x/text v0.32.0 // indirect golang.org/x/time v0.11.0 // indirect - google.golang.org/protobuf v1.36.6 // indirect - gopkg.in/evanphx/json-patch.v4 v4.12.0 // indirect + google.golang.org/protobuf v1.36.8 // indirect + gopkg.in/evanphx/json-patch.v4 v4.13.0 // indirect gopkg.in/inf.v0 v0.9.1 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect k8s.io/klog/v2 v2.130.1 // indirect - k8s.io/kube-openapi v0.0.0-20250318190949-c8a335a9a2ff // indirect - sigs.k8s.io/json v0.0.0-20241014173422-cfa47c3a1cc8 // indirect + k8s.io/kube-openapi v0.0.0-20250910181357-589584f1c912 // indirect + sigs.k8s.io/json v0.0.0-20250730193827-2d320260d730 // indirect sigs.k8s.io/randfill v1.0.0 // indirect - sigs.k8s.io/structured-merge-diff/v4 v4.6.0 // indirect - sigs.k8s.io/yaml v1.4.0 // indirect + sigs.k8s.io/structured-merge-diff/v6 v6.3.0 // indirect + sigs.k8s.io/yaml v1.6.0 // indirect ) // Replacements for argo-rollouts replace ( github.com/go-check/check => github.com/go-check/check v0.0.0-20201130134442-10cb98267c6c - k8s.io/api v0.0.0 => k8s.io/api v0.32.3 - k8s.io/apimachinery v0.0.0 => k8s.io/apimachinery v0.32.3 - k8s.io/client-go v0.0.0 => k8s.io/client-go v0.32.3 + k8s.io/api v0.0.0 => k8s.io/api v0.35.0 + k8s.io/apimachinery v0.0.0 => k8s.io/apimachinery v0.35.0 + k8s.io/client-go v0.0.0 => k8s.io/client-go v0.35.0 k8s.io/cloud-provider v0.0.0 => k8s.io/cloud-provider v0.24.2 k8s.io/controller-manager v0.0.0 => k8s.io/controller-manager v0.24.2 k8s.io/cri-api v0.0.0 => k8s.io/cri-api v0.20.5-rc.0 @@ -85,7 +85,7 @@ replace ( k8s.io/kube-controller-manager v0.0.0 => k8s.io/kube-controller-manager v0.24.2 k8s.io/kube-proxy v0.0.0 => k8s.io/kube-proxy v0.24.2 k8s.io/kube-scheduler v0.0.0 => k8s.io/kube-scheduler v0.24.2 - k8s.io/kubectl v0.0.0 => k8s.io/kubectl v0.32.3 + k8s.io/kubectl v0.0.0 => k8s.io/kubectl v0.35.0 k8s.io/kubelet v0.0.0 => k8s.io/kubelet v0.24.2 k8s.io/legacy-cloud-providers v0.0.0 => k8s.io/legacy-cloud-providers v0.24.2 k8s.io/mount-utils v0.0.0 => k8s.io/mount-utils v0.20.5-rc.0 diff --git a/go.sum b/go.sum index 05738d7..a1b7e7d 100644 --- a/go.sum +++ b/go.sum @@ -1,3 +1,5 @@ +github.com/Masterminds/semver/v3 v3.4.0 h1:Zog+i5UMtVoCU8oKka5P7i9q9HgrJeGzI9SA1Xbatp0= +github.com/Masterminds/semver/v3 v3.4.0/go.mod h1:4V+yj/TJE1HU9XfppCwVMZq3I84lprf4nC11bSS5beM= github.com/argoproj/argo-rollouts v1.8.3 h1:blbtQva4IK9r6gFh+dWkCrLnFdPOWiv9ubQYu36qeaA= github.com/argoproj/argo-rollouts v1.8.3/go.mod h1:kCAUvIfMGfOyVf3lvQbBt0nqQn4Pd+zB5/YwKv+UBa8= github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= @@ -13,10 +15,10 @@ github.com/elazarl/goproxy v0.0.0-20240726154733-8b0c20506380 h1:1NyRx2f4W4WBRyg github.com/elazarl/goproxy v0.0.0-20240726154733-8b0c20506380/go.mod h1:thX175TtLTzLj3p7N/Q9IiKZ7NF+p72cvL91emV0hzo= github.com/emicklei/go-restful/v3 v3.12.2 h1:DhwDP0vY3k8ZzE0RunuJy8GhNpPL6zqLkDf9B/a0/xU= github.com/emicklei/go-restful/v3 v3.12.2/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= -github.com/fxamacker/cbor/v2 v2.8.0 h1:fFtUGXUzXPHTIUdne5+zzMPTfffl3RD5qYnkY40vtxU= -github.com/fxamacker/cbor/v2 v2.8.0/go.mod h1:vM4b+DJCtHn+zz7h3FFp/hDAI9WNWCsZj23V5ytsSxQ= -github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY= -github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= +github.com/fxamacker/cbor/v2 v2.9.0 h1:NpKPmjDBgUfBms6tr6JZkTHtfFGcMKsw3eGcmD/sapM= +github.com/fxamacker/cbor/v2 v2.9.0/go.mod h1:vM4b+DJCtHn+zz7h3FFp/hDAI9WNWCsZj23V5ytsSxQ= +github.com/go-logr/logr v1.4.3 h1:CjnDlHq8ikf6E492q6eKboGOC0T8CDaOvkHCIg8idEI= +github.com/go-logr/logr v1.4.3/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= github.com/go-openapi/jsonpointer v0.21.1 h1:whnzv/pNXtK2FbX/W9yJfRmE2gsmkfahjMKB0fZvcic= github.com/go-openapi/jsonpointer v0.21.1/go.mod h1:50I1STOfbY1ycR8jGz8DaMeLCdXiI6aDteEdRNNzpdk= github.com/go-openapi/jsonreference v0.21.0 h1:Rs+Y7hSXT83Jacb7kFyjn4ijOuVGSvOdF2+tg1TRrwQ= @@ -27,18 +29,13 @@ github.com/go-task/slim-sprig/v3 v3.0.0 h1:sUs3vkvUymDpBKi3qH1YSqBQk9+9D/8M2mN1v github.com/go-task/slim-sprig/v3 v3.0.0/go.mod h1:W848ghGpv3Qj3dhTPRyJypKRiqCdHZiAzKg9hl15HA8= github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= -github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek= -github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps= -github.com/google/gnostic-models v0.6.9 h1:MU/8wDLif2qCXZmzncUQ/BOfxWfthHi63KqpoNbWqVw= -github.com/google/gnostic-models v0.6.9/go.mod h1:CiWsm0s6BSQd1hRn8/QmxqB6BesYcbSZxsz9b0KuDBw= -github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +github.com/google/gnostic-models v0.7.0 h1:qwTtogB15McXDaNqTZdzPJRHvaVJlAl+HVQnLmJEJxo= +github.com/google/gnostic-models v0.7.0/go.mod h1:whL5G0m6dmc5cPxKc5bdKdEN3UjI7OUGxBlw57miDrQ= github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8= github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= -github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0= -github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= -github.com/google/pprof v0.0.0-20241029153458-d1b30febd7db h1:097atOisP2aRj7vFgYQBbFN4U4JNXUNYpxael3UzMyo= -github.com/google/pprof v0.0.0-20241029153458-d1b30febd7db/go.mod h1:vavhavw2zAxS5dIdcRluK6cSGGPlZynqzFM8NdvU144= +github.com/google/pprof v0.0.0-20250403155104-27863c87afa6 h1:BHT72Gu3keYf3ZEu2J0b1vyeLSOYI8bm5wbJM/8yDe8= +github.com/google/pprof v0.0.0-20250403155104-27863c87afa6/go.mod h1:boTsfXsheKC2y+lKOCMpSfarhxDeIzfZG1jqGcPl3cA= github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1 h1:EGx4pi6eqNxGaHF6qqu48+N2wcFQ5qg5FXgOdqsJ5d8= @@ -66,21 +63,22 @@ github.com/mailru/easyjson v0.9.0/go.mod h1:1+xMtQp2MRNVL/V1bOzuP3aP8VNwRW55fQUt github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= -github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M= github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= +github.com/modern-go/reflect2 v1.0.3-0.20250322232337-35a7c28c31ee h1:W5t00kpgFdJifH4BDsTlE89Zl93FEloxaWZfGcifgq8= +github.com/modern-go/reflect2 v1.0.3-0.20250322232337-35a7c28c31ee/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= github.com/moul/http2curl v1.0.0 h1:dRMWoAtb+ePxMlLkrCbAqh4TlPHXvoGUSQ323/9Zahs= github.com/moul/http2curl v1.0.0/go.mod h1:8UbvGypXm98wA/IqH45anm5Y2Z6ep6O31QGOAZ3H0fQ= github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA= github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= github.com/onsi/ginkgo v1.16.5 h1:8xi0RTUf59SOSfEtZMvwTvXYMzG4gV23XVHOZiXNtnE= -github.com/onsi/ginkgo/v2 v2.21.0 h1:7rg/4f3rB88pb5obDgNZrNHrQ4e6WpjonchcpuBRnZM= -github.com/onsi/ginkgo/v2 v2.21.0/go.mod h1:7Du3c42kxCUegi0IImZ1wUQzMBVecgIHjR1C+NkhLQo= -github.com/onsi/gomega v1.35.1 h1:Cwbd75ZBPxFSuZ6T+rN/WCb/gOc6YgFBXLlZLhC7Ds4= -github.com/onsi/gomega v1.35.1/go.mod h1:PvZbdDc8J6XJEpDK4HCuRBm8a6Fzp9/DmhC9C7yFlog= -github.com/openshift/api v0.0.0-20250411135543-10a8fa583797 h1:8x3G8QOZqo2bRAL8JFlPz/odqQECI/XmlZeRwnFxJ8I= -github.com/openshift/api v0.0.0-20250411135543-10a8fa583797/go.mod h1:yk60tHAmHhtVpJQo3TwVYq2zpuP70iJIFDCmeKMIzPw= -github.com/openshift/client-go v0.0.0-20250402181141-b3bad3b645f2 h1:bPXR0R8zp1o12nSUphN26hSM+OKYq5pMorbDCpApzDQ= -github.com/openshift/client-go v0.0.0-20250402181141-b3bad3b645f2/go.mod h1:dT1cJyVTperQ53GvVRa+GZ27r02fDZy2k5j+9QoQsCo= +github.com/onsi/ginkgo/v2 v2.27.2 h1:LzwLj0b89qtIy6SSASkzlNvX6WktqurSHwkk2ipF/Ns= +github.com/onsi/ginkgo/v2 v2.27.2/go.mod h1:ArE1D/XhNXBXCBkKOLkbsb2c81dQHCRcF5zwn/ykDRo= +github.com/onsi/gomega v1.38.2 h1:eZCjf2xjZAqe+LeWvKb5weQ+NcPwX84kqJ0cZNxok2A= +github.com/onsi/gomega v1.38.2/go.mod h1:W2MJcYxRGV63b418Ai34Ud0hEdTVXq9NW9+Sx6uXf3k= +github.com/openshift/api v0.0.0-20260102143802-d2ec16864f86 h1:Vsqg+WqSA91LjrwK5lzkSCjztK/B+T8MPKI3MIALx3w= +github.com/openshift/api v0.0.0-20260102143802-d2ec16864f86/go.mod h1:d5uzF0YN2nQQFA0jIEWzzOZ+edmo6wzlGLvx5Fhz4uY= +github.com/openshift/client-go v0.0.0-20251223102348-558b0eef16bc h1:nIlRaJfr/yGjPV15MNF5eVHLAGyXFjcUzO+hXeWDDk8= +github.com/openshift/client-go v0.0.0-20251223102348-558b0eef16bc/go.mod h1:cs9BwTu96sm2vQvy7r9rOiltgu90M6ju2qIHFG9WU+o= github.com/parnurzeal/gorequest v0.3.0 h1:SoFyqCDC9COr1xuS6VA8fC8RU7XyrJZN2ona1kEX7FI= github.com/parnurzeal/gorequest v0.3.0/go.mod h1:3Kh2QUMJoqw3icWAecsyzkpY7UzRfDhbRdTjtNwNiUE= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= @@ -88,16 +86,16 @@ github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINE github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/prometheus/client_golang v1.22.0 h1:rb93p9lokFEsctTys46VnV1kLCDpVZ0a/Y92Vm0Zc6Q= -github.com/prometheus/client_golang v1.22.0/go.mod h1:R7ljNsLXhuQXYZYtw6GAE9AZg8Y7vEW5scdCXrWRXC0= +github.com/prometheus/client_golang v1.23.2 h1:Je96obch5RDVy3FDMndoUsjAhG5Edi49h0RJWRi/o0o= +github.com/prometheus/client_golang v1.23.2/go.mod h1:Tb1a6LWHB3/SPIzCoaDXI4I8UHKeFTEQ1YCr+0Gyqmg= github.com/prometheus/client_model v0.6.2 h1:oBsgwpGs7iVziMvrGhE53c/GrLUsZdHnqNwqPLxwZyk= github.com/prometheus/client_model v0.6.2/go.mod h1:y3m2F6Gdpfy6Ut/GBsUqTWZqCUvMVzSfMLjcu6wAwpE= -github.com/prometheus/common v0.63.0 h1:YR/EIY1o3mEFP/kZCD7iDMnLPlGyuU2Gb3HIcXnA98k= -github.com/prometheus/common v0.63.0/go.mod h1:VVFF/fBIoToEnWRVkYoXEkq3R3paCoxG9PXP74SnV18= -github.com/prometheus/procfs v0.16.0 h1:xh6oHhKwnOJKMYiYBDWmkHqQPyiY40sny36Cmx2bbsM= -github.com/prometheus/procfs v0.16.0/go.mod h1:8veyXUu3nGP7oaCxhX6yeaM5u4stL2FeMXnCqhDthZg= -github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU5NdKM8= -github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4= +github.com/prometheus/common v0.66.1 h1:h5E0h5/Y8niHc5DlaLlWLArTQI7tMrsfQjHV+d9ZoGs= +github.com/prometheus/common v0.66.1/go.mod h1:gcaUsgf3KfRSwHY4dIMXLPV0K/Wg1oZ8+SbZk/HH/dA= +github.com/prometheus/procfs v0.16.1 h1:hZ15bTNuirocR6u0JZ6BAHHmwS1p8B4P6MRqxtzMyRg= +github.com/prometheus/procfs v0.16.1/go.mod h1:teAbpZRB1iIAJYREa1LsoWUXykVXA1KlTmWl8x/U+Is= +github.com/rogpeppe/go-internal v1.14.1 h1:UQB4HGPB6osV0SQTLymcB4TgvyWu6ZyliaW0tI/otEQ= +github.com/rogpeppe/go-internal v1.14.1/go.mod h1:MaRKkUm5W0goXpeCfT7UZI6fk/L7L7so1lCWt35ZSgc= github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ= github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= @@ -105,50 +103,60 @@ github.com/smartystreets/assertions v1.2.0 h1:42S6lae5dvLc7BrLu/0ugRtcFVjoJNMC/N github.com/smartystreets/assertions v1.2.0/go.mod h1:tcbTF8ujkAEcZ8TElKY+i30BzYlVhC/LOxJk7iOWnoo= github.com/smartystreets/goconvey v1.7.2 h1:9RBaZCeXEQ3UselpuwUQHltGVXvdwm6cv1hgR6gDIPg= github.com/smartystreets/goconvey v1.7.2/go.mod h1:Vw0tHAZW6lzCRk3xgdin6fKYcG+G3Pg9vgXWeJpQFMM= -github.com/spf13/cobra v1.10.1 h1:lJeBwCfmrnXthfAupyUTzJ/J4Nc1RsHC/mSRU2dll/s= -github.com/spf13/cobra v1.10.1/go.mod h1:7SmJGaTHFVBY0jW4NXGluQoLvhqFQM+6XSKD+P4XaB0= +github.com/spf13/cobra v1.10.2 h1:DMTTonx5m65Ic0GOoRY2c16WCbHxOOw6xxezuLaBpcU= +github.com/spf13/cobra v1.10.2/go.mod h1:7C1pvHqHw5A4vrJfjNwvOdzYu0Gml16OCs2GRiTUUS4= github.com/spf13/pflag v1.0.9 h1:9exaQaMOCwffKiiiYk6/BndUBv+iRViNW+4lEMi0PvY= github.com/spf13/pflag v1.0.9/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.5.2 h1:xuMeJ0Sdp5ZMRXx/aWO6RZxdr3beISkG5/G/aIRr3pY= +github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/8L+MA= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= -github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA= -github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= +github.com/stretchr/testify v1.11.1 h1:7s2iGBzp5EwR7/aIZr8ao5+dra3wiQyKjjFuvgVKu7U= +github.com/stretchr/testify v1.11.1/go.mod h1:wZwfW3scLgRK+23gO65QZefKpKQRnfz6sD981Nm4B6U= github.com/x448/float16 v0.8.4 h1:qLwI1I70+NjRFUR3zs1JPUCgaCXSh3SW62uAKT1mSBM= github.com/x448/float16 v0.8.4/go.mod h1:14CWIYCyZA/cWjXOioeEpHeN/83MdbZDRQHoFcYsOfg= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE= +go.yaml.in/yaml/v2 v2.4.3 h1:6gvOSjQoTB3vt1l+CU+tSyi/HOjfOjRLJ4YwYZGwRO0= +go.yaml.in/yaml/v2 v2.4.3/go.mod h1:zSxWcmIDjOzPXpjlTTbAsKokqkDNAVtZO0WOMiT90s8= +go.yaml.in/yaml/v3 v3.0.4 h1:tfq32ie2Jv2UxXFdLJdh3jXuOzWiL1fo0bu/FbuKpbc= +go.yaml.in/yaml/v3 v3.0.4/go.mod h1:DhzuOOF2ATzADvBadXxruRBLzYTpT36CKvDb3+aBEFg= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.30.0 h1:fDEXFVZ/fmCKProc/yAXXUijritrDzahmwwefnjoPFk= +golang.org/x/mod v0.30.0/go.mod h1:lAsf5O2EvJeSFMiBxXDki7sCgAxEUcZHXoXMKT4GJKc= golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= -golang.org/x/net v0.39.0 h1:ZCu7HMWDxpXpaiKdhzIfaltL9Lp31x/3fCP11bc6/fY= -golang.org/x/net v0.39.0/go.mod h1:X7NRbYVEA+ewNkCNyJ513WmMdQ3BineSwVtN2zD/d+E= -golang.org/x/oauth2 v0.29.0 h1:WdYw2tdTK1S8olAzWHdgeqfy+Mtm9XNhv/xJsY65d98= -golang.org/x/oauth2 v0.29.0/go.mod h1:onh5ek6nERTohokkhCD/y2cV4Do3fxFHFuAejCkRWT8= +golang.org/x/net v0.47.0 h1:Mx+4dIFzqraBXUugkia1OOvlD6LemFo1ALMHjrXDOhY= +golang.org/x/net v0.47.0/go.mod h1:/jNxtkgq5yWUGYkaZGqo27cfGZ1c5Nen03aYrrKpVRU= +golang.org/x/oauth2 v0.30.0 h1:dnDm7JmhM45NNpd8FDDeLhK6FwqbOf4MLCM9zb1BOHI= +golang.org/x/oauth2 v0.30.0/go.mod h1:B++QgG3ZKulg6sRPGD/mqlHQs5rB3Ml9erfeDY7xKlU= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.19.0 h1:vV+1eWNmZ5geRlYjzm2adRgW2/mcpevXNg50YZtPCE4= +golang.org/x/sync v0.19.0/go.mod h1:9KTHXmSnoGruLpwFjVSX0lNNA75CykiMECbovNTZqGI= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.32.0 h1:s77OFDvIQeibCmezSnk/q6iAfkdiQaJi4VzroCFrN20= -golang.org/x/sys v0.32.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k= -golang.org/x/term v0.31.0 h1:erwDkOK1Msy6offm1mOgvspSkslFnIGsFnxOKoufg3o= -golang.org/x/term v0.31.0/go.mod h1:R4BeIy7D95HzImkxGkTW1UQTtP54tio2RyHz7PwK0aw= +golang.org/x/sys v0.39.0 h1:CvCKL8MeisomCi6qNZ+wbb0DN9E5AATixKsvNtMoMFk= +golang.org/x/sys v0.39.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks= +golang.org/x/term v0.38.0 h1:PQ5pkm/rLO6HnxFR7N2lJHOZX6Kez5Y1gDSJla6jo7Q= +golang.org/x/term v0.38.0/go.mod h1:bSEAKrOT1W+VSu9TSCMtoGEOUcKxOKgl3LE5QEF/xVg= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.24.0 h1:dd5Bzh4yt5KYA8f9CJHCP4FB4D51c2c6JvN37xJJkJ0= -golang.org/x/text v0.24.0/go.mod h1:L8rBsPeo2pSS+xqN0d5u2ikmjtmoJbDBT1b7nHvFCdU= +golang.org/x/text v0.32.0 h1:ZD01bjUt1FQ9WJ0ClOL5vxgxOI/sVCNgX1YtKwcY0mU= +golang.org/x/text v0.32.0/go.mod h1:o/rUWzghvpD5TXrTIBuJU77MTaN0ljMWE47kxGJQ7jY= golang.org/x/time v0.11.0 h1:/bpjEDfN9tkoN/ryeYHnv5hcMlc8ncjMcM4XBk5NWV0= golang.org/x/time v0.11.0/go.mod h1:CDIdPxbZBQxdj6cxyCIdrNogrJKMJ7pr37NYpMcMDSg= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= @@ -156,46 +164,45 @@ golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3 golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.26.0 h1:v/60pFQmzmT9ExmjDv2gGIfi3OqfKoEP6I5+umXlbnQ= -golang.org/x/tools v0.26.0/go.mod h1:TPVVj70c7JJ3WCazhD8OdXcZg/og+b9+tH/KxylGwH0= +golang.org/x/tools v0.39.0 h1:ik4ho21kwuQln40uelmciQPp9SipgNDdrafrYA4TmQQ= +golang.org/x/tools v0.39.0/go.mod h1:JnefbkDPyD8UU2kI5fuf8ZX4/yUeh9W877ZeBONxUqQ= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -google.golang.org/protobuf v1.36.6 h1:z1NpPI8ku2WgiWnf+t9wTPsn6eP1L7ksHUlkfLvd9xY= -google.golang.org/protobuf v1.36.6/go.mod h1:jduwjTPXsFjZGTmRluh+L6NjiWu7pchiJ2/5YcXBHnY= +google.golang.org/protobuf v1.36.8 h1:xHScyCOEuuwZEc6UtSOvPbAT4zRh0xcNRYekJwfqyMc= +google.golang.org/protobuf v1.36.8/go.mod h1:fuxRtAxBytpl4zzqUh6/eyUujkJdNiuEkXntxiD/uRU= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= -gopkg.in/evanphx/json-patch.v4 v4.12.0 h1:n6jtcsulIzXPJaxegRbvFNNrZDjbij7ny3gmSPG+6V4= -gopkg.in/evanphx/json-patch.v4 v4.12.0/go.mod h1:p8EYWUEYMpynmqDbY58zCKCFZw8pRWMG4EsWvDvM72M= +gopkg.in/evanphx/json-patch.v4 v4.13.0 h1:czT3CmqEaQ1aanPc5SdlgQrrEIb8w/wwCvWWnfEbYzo= +gopkg.in/evanphx/json-patch.v4 v4.13.0/go.mod h1:p8EYWUEYMpynmqDbY58zCKCFZw8pRWMG4EsWvDvM72M= gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc= gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -k8s.io/api v0.32.3 h1:Hw7KqxRusq+6QSplE3NYG4MBxZw1BZnq4aP4cJVINls= -k8s.io/api v0.32.3/go.mod h1:2wEDTXADtm/HA7CCMD8D8bK4yuBUptzaRhYcYEEYA3k= -k8s.io/apimachinery v0.32.3 h1:JmDuDarhDmA/Li7j3aPrwhpNBA94Nvk5zLeOge9HH1U= -k8s.io/apimachinery v0.32.3/go.mod h1:GpHVgxoKlTxClKcteaeuF1Ul/lDVb74KpZcxcmLDElE= -k8s.io/client-go v0.32.3 h1:RKPVltzopkSgHS7aS98QdscAgtgah/+zmpAogooIqVU= -k8s.io/client-go v0.32.3/go.mod h1:3v0+3k4IcT9bXTc4V2rt+d2ZPPG700Xy6Oi0Gdl2PaY= +k8s.io/api v0.35.0 h1:iBAU5LTyBI9vw3L5glmat1njFK34srdLmktWwLTprlY= +k8s.io/api v0.35.0/go.mod h1:AQ0SNTzm4ZAczM03QH42c7l3bih1TbAXYo0DkF8ktnA= +k8s.io/apimachinery v0.35.0 h1:Z2L3IHvPVv/MJ7xRxHEtk6GoJElaAqDCCU0S6ncYok8= +k8s.io/apimachinery v0.35.0/go.mod h1:jQCgFZFR1F4Ik7hvr2g84RTJSZegBc8yHgFWKn//hns= +k8s.io/client-go v0.35.0 h1:IAW0ifFbfQQwQmga0UdoH0yvdqrbwMdq9vIFEhRpxBE= +k8s.io/client-go v0.35.0/go.mod h1:q2E5AAyqcbeLGPdoRB+Nxe3KYTfPce1Dnu1myQdqz9o= k8s.io/klog/v2 v2.130.1 h1:n9Xl7H1Xvksem4KFG4PYbdQCQxqc/tTUyrgXaOhHSzk= k8s.io/klog/v2 v2.130.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE= -k8s.io/kube-openapi v0.0.0-20250318190949-c8a335a9a2ff h1:/usPimJzUKKu+m+TE36gUyGcf03XZEP0ZIKgKj35LS4= -k8s.io/kube-openapi v0.0.0-20250318190949-c8a335a9a2ff/go.mod h1:5jIi+8yX4RIb8wk3XwBo5Pq2ccx4FP10ohkbSKCZoK8= -k8s.io/kubectl v0.32.3 h1:VMi584rbboso+yjfv0d8uBHwwxbC438LKq+dXd5tOAI= -k8s.io/kubectl v0.32.3/go.mod h1:6Euv2aso5GKzo/UVMacV6C7miuyevpfI91SvBvV9Zdg= -k8s.io/utils v0.0.0-20251002143259-bc988d571ff4 h1:SjGebBtkBqHFOli+05xYbK8YF1Dzkbzn+gDM4X9T4Ck= -k8s.io/utils v0.0.0-20251002143259-bc988d571ff4/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= -sigs.k8s.io/json v0.0.0-20241014173422-cfa47c3a1cc8 h1:gBQPwqORJ8d8/YNZWEjoZs7npUVDpVXUUOFfW6CgAqE= -sigs.k8s.io/json v0.0.0-20241014173422-cfa47c3a1cc8/go.mod h1:mdzfpAEoE6DHQEN0uh9ZbOCuHbLK5wOm7dK4ctXE9Tg= -sigs.k8s.io/randfill v0.0.0-20250304075658-069ef1bbf016/go.mod h1:XeLlZ/jmk4i1HRopwe7/aU3H5n1zNUcX6TM94b3QxOY= +k8s.io/kube-openapi v0.0.0-20250910181357-589584f1c912 h1:Y3gxNAuB0OBLImH611+UDZcmKS3g6CthxToOb37KgwE= +k8s.io/kube-openapi v0.0.0-20250910181357-589584f1c912/go.mod h1:kdmbQkyfwUagLfXIad1y2TdrjPFWp2Q89B3qkRwf/pQ= +k8s.io/kubectl v0.35.0 h1:cL/wJKHDe8E8+rP3G7avnymcMg6bH6JEcR5w5uo06wc= +k8s.io/kubectl v0.35.0/go.mod h1:VR5/TSkYyxZwrRwY5I5dDq6l5KXmiCb+9w8IKplk3Qo= +k8s.io/utils v0.0.0-20251222233032-718f0e51e6d2 h1:OfgiEo21hGiwx1oJUU5MpEaeOEg6coWndBkZF/lkFuE= +k8s.io/utils v0.0.0-20251222233032-718f0e51e6d2/go.mod h1:xDxuJ0whA3d0I4mf/C4ppKHxXynQ+fxnkmQH0vTHnuk= +sigs.k8s.io/json v0.0.0-20250730193827-2d320260d730 h1:IpInykpT6ceI+QxKBbEflcR5EXP7sU1kvOlxwZh5txg= +sigs.k8s.io/json v0.0.0-20250730193827-2d320260d730/go.mod h1:mdzfpAEoE6DHQEN0uh9ZbOCuHbLK5wOm7dK4ctXE9Tg= sigs.k8s.io/randfill v1.0.0 h1:JfjMILfT8A6RbawdsK2JXGBR5AQVfd+9TbzrlneTyrU= sigs.k8s.io/randfill v1.0.0/go.mod h1:XeLlZ/jmk4i1HRopwe7/aU3H5n1zNUcX6TM94b3QxOY= -sigs.k8s.io/secrets-store-csi-driver v1.5.4 h1:enl+v1+JbKDyVjdfT/7CillZsc4rLAM9tTHyf7GeLxc= -sigs.k8s.io/secrets-store-csi-driver v1.5.4/go.mod h1:Ct85xqsKLk/dxkj8inRjWA3RJsXXkPLjNSAJ0db5vKs= -sigs.k8s.io/structured-merge-diff/v4 v4.6.0 h1:IUA9nvMmnKWcj5jl84xn+T5MnlZKThmUW1TdblaLVAc= -sigs.k8s.io/structured-merge-diff/v4 v4.6.0/go.mod h1:dDy58f92j70zLsuZVuUX5Wp9vtxXpaZnkPGWeqDfCps= -sigs.k8s.io/yaml v1.4.0 h1:Mk1wCc2gy/F0THH0TAp1QYyJNzRm2KCLy3o5ASXVI5E= -sigs.k8s.io/yaml v1.4.0/go.mod h1:Ejl7/uTz7PSA4eKMyQCUTnhZYNmLIl+5c2lQPGR2BPY= +sigs.k8s.io/secrets-store-csi-driver v1.5.5 h1:LJDpDL5TILhlP68nGvtGSlJFxSDgAD2m148NT0Ts7os= +sigs.k8s.io/secrets-store-csi-driver v1.5.5/go.mod h1:i2WqLicYH00hrTG3JAzICPMF4HL4KMEORlDt9UQoZLk= +sigs.k8s.io/structured-merge-diff/v6 v6.3.0 h1:jTijUJbW353oVOd9oTlifJqOGEkUw2jB/fXCbTiQEco= +sigs.k8s.io/structured-merge-diff/v6 v6.3.0/go.mod h1:M3W8sfWvn2HhQDIbGWj3S099YozAsymCo/wrT5ohRUE= +sigs.k8s.io/yaml v1.6.0 h1:G8fkbMSAFqgEFgh4b1wmtzDnioxFCUgTZhlbj5P9QYs= +sigs.k8s.io/yaml v1.6.0/go.mod h1:796bPqUfzR/0jLAl6XjHl3Ck7MiyVv8dbTdyT3/pMf4= From 85f1c13de9e5ac72857a6890bb0814564dfd21de Mon Sep 17 00:00:00 2001 From: faizanahmad055 Date: Sun, 4 Jan 2026 00:47:13 +0100 Subject: [PATCH 14/24] Add CSI integration in rbac Signed-off-by: faizanahmad055 --- .../kubernetes/chart/reloader/templates/role.yaml | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/deployments/kubernetes/chart/reloader/templates/role.yaml b/deployments/kubernetes/chart/reloader/templates/role.yaml index 70a6815..7355d87 100644 --- a/deployments/kubernetes/chart/reloader/templates/role.yaml +++ b/deployments/kubernetes/chart/reloader/templates/role.yaml @@ -92,6 +92,17 @@ rules: - create - get - update +{{- end}} +{{- if .Values.reloader.enableCSIIntegration }} + - apiGroups: + - "secrets-store.csi.x-k8s.io" + resources: + - secretproviderclasspodstatuses + - secretproviderclasses + verbs: + - list + - get + - watch {{- end}} - apiGroups: - "" From 0f1d02e97557ccbb0508e62d4c86527036961907 Mon Sep 17 00:00:00 2001 From: faizanahmad055 Date: Sun, 4 Jan 2026 02:40:42 +0100 Subject: [PATCH 15/24] Readme update and code refactor Signed-off-by: faizanahmad055 --- README.md | 46 +++++++++++++++++-- .../pkg/callbacks/rolling_upgrade_test.go | 2 +- internal/pkg/cmd/reloader.go | 23 ++++++---- internal/pkg/constants/constants.go | 2 + internal/pkg/controller/controller.go | 24 ++++++---- internal/pkg/controller/controller_test.go | 2 +- internal/pkg/handler/pause_deployment_test.go | 4 +- internal/pkg/handler/update.go | 23 +++++++--- internal/pkg/handler/upgrade.go | 15 +++--- internal/pkg/handler/upgrade_test.go | 2 +- 10 files changed, 104 insertions(+), 39 deletions(-) diff --git a/README.md b/README.md index ae0a00a..4fe53f3 100644 --- a/README.md +++ b/README.md @@ -13,7 +13,7 @@ ## 🔁 What is Reloader? -Reloader is a Kubernetes controller that automatically triggers rollouts of workloads (like Deployments, StatefulSets, and more) whenever referenced `Secrets` or `ConfigMaps` are updated. +Reloader is a Kubernetes controller that automatically triggers rollouts of workloads (like Deployments, StatefulSets, and more) whenever referenced `Secrets`, `ConfigMaps` or **optionally CSI-mounted secrets** are updated. In a traditional Kubernetes setup, updating a `Secret` or `ConfigMap` does not automatically restart or redeploy your workloads. This can lead to stale configurations running in production, especially when dealing with dynamic values like credentials, feature flags, or environment configs. @@ -169,9 +169,11 @@ metadata: This instructs Reloader to skip all reload logic for that resource across all workloads. -### 4. ⚙️ Workload-Specific Rollout Strategy +### 4. ⚙️ Workload-Specific Rollout Strategy (Argo Rollouts Only) -By default, Reloader uses the **rollout** strategy — it updates the pod template to trigger a new rollout. This works well in most cases, but it can cause problems if you're using GitOps tools like ArgoCD, which detect this as configuration drift. +Note: This is only applicable when using [Argo Rollouts](https://argoproj.github.io/argo-rollouts/). It is ignored for standard Kubernetes Deployments, StatefulSets, or DaemonSets. To use this feature, Argo Rollouts support must be enabled in Reloader (for example via --is-argo-rollouts=true). + +By default, Reloader triggers the Argo Rollout controller to perform a standard rollout by updating the pod template. This works well in most cases, however, because this modifies the workload spec, GitOps tools like ArgoCD will detect this as "Configuration Drift" and mark your application as OutOfSync. To avoid that, you can switch to the **restart** strategy, which simply restarts the pod without changing the pod template. @@ -189,8 +191,10 @@ metadata: ✅ Use `restart` if: 1. You're using GitOps and want to avoid drift -1. You want a quick restart without changing the workload spec -1. Your platform restricts metadata changes +2. You want a quick restart without changing the workload spec +3. Your platform restricts metadata changes + +This setting affects Argo Rollouts behavior, not Argo CD sync settings. ### 5. ❗ Annotation Behavior Rules & Compatibility @@ -239,6 +243,38 @@ This feature allows you to pause rollouts for a deployment for a specified durat 1. ✅ Your deployment references multiple ConfigMaps or Secrets that may be updated at the same time. 1. ✅ You want to minimize unnecessary rollouts and reduce downtime caused by back-to-back configuration changes. +### 8. 🔐 CSI Secret Provider Support + +Reloader supports the [Secrets Store CSI Driver](https://secrets-store-csi-driver.sigs.k8s.io/), which allows mounting secrets from external secret stores (like AWS Secrets Manager, Azure Key Vault, HashiCorp Vault) directly into pods. +Unlike Kubernetes Secret objects, CSI-mounted secrets do not always trigger native Kubernetes update events. Reloader solves this by watching CSI status resources and restarting affected workloads when mounted secret versions change. + +#### How it works + +When secret rotation is enabled, the Secrets Store CSI Driver updates a Kubernetes resource called: `SecretProviderClassPodStatus` + +This resource reflects the currently mounted secret versions for a pod. +Reloader watches these updates and triggers a rollout when a change is detected. + +#### Prerequisites + +- Secrets Store CSI Driver must be installed in your cluster +- Secret rotation enabled in the CSI driver. +- Enable CSI integration in Reloader: `--enable-csi-integration=true` + +#### Annotations for CSI-mounted Secrets + +| Annotation | Description | +|--------------------------------------------|----------------------------------------------------------------------| +| `reloader.stakater.com/auto: "true"` | Reloads workload when CSI-mounted secrets change | +| `secretproviderclass.reloader.stakater.com/reload: "my-spc"` | Reloads when specific SecretProviderClass changes | + +#### Notes & Limitations + +Reloader reacts to CSI status changes, not direct updates to external secret stores +Secret rotation must be enabled in the CSI driver for updates to be detected +CSI limitations (such as subPath mounts) still apply and may require pod restarts +If secrets are synced to Kubernetes Secret objects, standard Reloader behavior applies and CSI support may not be required + ## 🚀 Installation ### 1. 📦 Helm diff --git a/internal/pkg/callbacks/rolling_upgrade_test.go b/internal/pkg/callbacks/rolling_upgrade_test.go index 452867f..75583de 100644 --- a/internal/pkg/callbacks/rolling_upgrade_test.go +++ b/internal/pkg/callbacks/rolling_upgrade_test.go @@ -49,7 +49,7 @@ func newTestFixtures() testFixtures { func setupTestClients() kube.Clients { return kube.Clients{ - KubernetesClient: fake.NewSimpleClientset(), + KubernetesClient: fake.NewClientset(), ArgoRolloutClient: fakeargoclientset.NewSimpleClientset(), } } diff --git a/internal/pkg/cmd/reloader.go b/internal/pkg/cmd/reloader.go index 6bdb339..771e2df 100644 --- a/internal/pkg/cmd/reloader.go +++ b/internal/pkg/cmd/reloader.go @@ -160,15 +160,8 @@ func startReloader(cmd *cobra.Command, args []string) { var controllers []*controller.Controller for k := range kube.ResourceMap { - if k == "secretproviderclasspodstatuses" { - if !options.EnableCSIIntegration { - logrus.Infof("EnableCSIIntegration is set to false, won't run secretproviderclasspodstatuses controller") - continue - } - if !kube.IsCSIInstalled { - logrus.Infof("Can't run secretproviderclasspodstatuses controller as CSI CRDs are not installed") - continue - } + if k == constants.SecretProviderClassController && !shouldRunCSIController() { + continue } if ignoredResourcesList.Contains(k) || (len(namespaceLabelSelector) == 0 && k == "namespaces") { @@ -218,3 +211,15 @@ func startPProfServer() { logrus.Errorf("Failed to start pprof server: %v", err) } } + +func shouldRunCSIController() bool { + if !options.EnableCSIIntegration { + logrus.Info("Skipping secretproviderclasspodstatuses controller: EnableCSIIntegration is disabled") + return false + } + if !kube.IsCSIInstalled { + logrus.Info("Skipping secretproviderclasspodstatuses controller: CSI CRDs not installed") + return false + } + return true +} diff --git a/internal/pkg/constants/constants.go b/internal/pkg/constants/constants.go index 0d1f1c7..8025a29 100644 --- a/internal/pkg/constants/constants.go +++ b/internal/pkg/constants/constants.go @@ -24,6 +24,8 @@ const ( EnvVarsReloadStrategy = "env-vars" // AnnotationsReloadStrategy instructs Reloader to add pod template annotations to facilitate a restart AnnotationsReloadStrategy = "annotations" + // SecretProviderClassController enables support for SecretProviderClassPodStatus resources + SecretProviderClassController = "secretproviderclasspodstatuses" ) // Leadership election related consts diff --git a/internal/pkg/controller/controller.go b/internal/pkg/controller/controller.go index a670d81..519923e 100644 --- a/internal/pkg/controller/controller.go +++ b/internal/pkg/controller/controller.go @@ -2,9 +2,11 @@ package controller import ( "fmt" + "slices" "time" "github.com/sirupsen/logrus" + "github.com/stakater/Reloader/internal/pkg/constants" "github.com/stakater/Reloader/internal/pkg/handler" "github.com/stakater/Reloader/internal/pkg/metrics" "github.com/stakater/Reloader/internal/pkg/options" @@ -21,7 +23,6 @@ import ( "k8s.io/client-go/tools/record" "k8s.io/client-go/util/workqueue" "k8s.io/kubectl/pkg/scheme" - "k8s.io/utils/strings/slices" csiv1 "sigs.k8s.io/secrets-store-csi-driver/apis/v1" ) @@ -80,13 +81,9 @@ func NewController( } } - getterRESTClient := client.CoreV1().RESTClient() - if resource == "secretproviderclasspodstatuses" { - csiClient, err := kube.GetCSIClient() - if err != nil { - logrus.Fatal(err) - } - getterRESTClient = csiClient.SecretsstoreV1().RESTClient() + getterRESTClient, err := getClientForResource(resource, client) + if err != nil { + return nil, fmt.Errorf("failed to initialize REST client for %s: %w", resource, err) } listWatcher := cache.NewFilteredListWatchFromClient(getterRESTClient, resource, namespace, optionsModifier) @@ -301,3 +298,14 @@ func (c *Controller) handleErr(err error, key interface{}) { logrus.Errorf("Dropping key out of the queue: %v", err) logrus.Debugf("Dropping the key %q out of the queue: %v", key, err) } + +func getClientForResource(resource string, coreClient kubernetes.Interface) (cache.Getter, error) { + if resource == constants.SecretProviderClassController { + csiClient, err := kube.GetCSIClient() + if err != nil { + return nil, fmt.Errorf("failed to get CSI client: %w", err) + } + return csiClient.SecretsstoreV1().RESTClient(), nil + } + return coreClient.CoreV1().RESTClient(), nil +} diff --git a/internal/pkg/controller/controller_test.go b/internal/pkg/controller/controller_test.go index 0399933..778b38d 100644 --- a/internal/pkg/controller/controller_test.go +++ b/internal/pkg/controller/controller_test.go @@ -2757,7 +2757,7 @@ func TestController_resourceInNamespaceSelector(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - fakeClient := fake.NewSimpleClientset() + fakeClient := fake.NewClientset() namespace, _ := fakeClient.CoreV1().Namespaces().Create(context.Background(), &tt.fields.namespace, metav1.CreateOptions{}) logrus.Infof("created fakeClient namespace for testing = %s", namespace.Name) diff --git a/internal/pkg/handler/pause_deployment_test.go b/internal/pkg/handler/pause_deployment_test.go index c14cbfc..19e7ac6 100644 --- a/internal/pkg/handler/pause_deployment_test.go +++ b/internal/pkg/handler/pause_deployment_test.go @@ -244,7 +244,7 @@ func TestHandleMissingTimerSimple(t *testing.T) { }() t.Run(test.name, func(t *testing.T) { - fakeClient := testclient.NewSimpleClientset() + fakeClient := testclient.NewClientset() clients := kube.Clients{ KubernetesClient: fakeClient, } @@ -337,7 +337,7 @@ func TestPauseDeployment(t *testing.T) { for _, test := range tests { t.Run(test.name, func(t *testing.T) { - fakeClient := testclient.NewSimpleClientset() + fakeClient := testclient.NewClientset() clients := kube.Clients{ KubernetesClient: fakeClient, } diff --git a/internal/pkg/handler/update.go b/internal/pkg/handler/update.go index cc1e16b..25a4380 100644 --- a/internal/pkg/handler/update.go +++ b/internal/pkg/handler/update.go @@ -39,20 +39,31 @@ func (r ResourceUpdatedHandler) Handle() error { // GetConfig gets configurations containing SHA, annotations, namespace and resource name func (r ResourceUpdatedHandler) GetConfig() (common.Config, string) { - var oldSHAData string - var config common.Config + var ( + oldSHAData string + config common.Config + ) + switch res := r.Resource.(type) { case *v1.ConfigMap: - oldSHAData = util.GetSHAfromConfigmap(r.OldResource.(*v1.ConfigMap)) + if old, ok := r.OldResource.(*v1.ConfigMap); ok && old != nil { + oldSHAData = util.GetSHAfromConfigmap(old) + } config = common.GetConfigmapConfig(res) + case *v1.Secret: - oldSHAData = util.GetSHAfromSecret(r.OldResource.(*v1.Secret).Data) + if old, ok := r.OldResource.(*v1.Secret); ok && old != nil { + oldSHAData = util.GetSHAfromSecret(old.Data) + } config = common.GetSecretConfig(res) + case *csiv1.SecretProviderClassPodStatus: - oldSHAData = util.GetSHAfromSecretProviderClassPodStatus(r.OldResource.(*csiv1.SecretProviderClassPodStatus).Status) + if old, ok := r.OldResource.(*csiv1.SecretProviderClassPodStatus); ok && old != nil && old.Status.Objects != nil { + oldSHAData = util.GetSHAfromSecretProviderClassPodStatus(old.Status) + } config = common.GetSecretProviderClassPodStatusConfig(res) default: - logrus.Warnf("Invalid resource: Resource should be 'Secret', 'Configmap' or 'SecretProviderClassPodStatus' but found, %v", r.Resource) + logrus.Warnf("Invalid resource: Resource should be 'Secret', 'Configmap' or 'SecretProviderClassPodStatus' but found, %T", r.Resource) } return config, oldSHAData } diff --git a/internal/pkg/handler/upgrade.go b/internal/pkg/handler/upgrade.go index f5b7ead..b10bfbc 100644 --- a/internal/pkg/handler/upgrade.go +++ b/internal/pkg/handler/upgrade.go @@ -634,11 +634,10 @@ func updateEnvVar(container *v1.Container, envVar string, shaData string) consta } func secretProviderClassEnvReloaded(containers []v1.Container, envVar string, shaData string) bool { - for i := range containers { - envs := containers[i].Env - for j := range envs { - if envs[j].Name == envVar { - return envs[j].Value == shaData + for _, container := range containers { + for _, env := range container.Env { + if env.Name == envVar { + return env.Value == shaData } } } @@ -649,7 +648,11 @@ func populateAnnotationsFromSecretProviderClass(clients kube.Clients, config *co obj, err := clients.CSIClient.SecretsstoreV1().SecretProviderClasses(config.Namespace).Get(context.TODO(), config.ResourceName, metav1.GetOptions{}) annotations := make(map[string]string) if err != nil { - logrus.Infof("Couldn't find secretproviderclass '%s' in '%s' namespace for typed annotation", config.ResourceName, config.Namespace) + if apierrors.IsNotFound(err) { + logrus.Warnf("SecretProviderClass '%s' not found in namespace '%s'", config.ResourceName, config.Namespace) + } else { + logrus.Errorf("Failed to get SecretProviderClass '%s' in namespace '%s': %v", config.ResourceName, config.Namespace, err) + } } else if obj.Annotations != nil { annotations = obj.Annotations } diff --git a/internal/pkg/handler/upgrade_test.go b/internal/pkg/handler/upgrade_test.go index 5bf490f..c1897f6 100644 --- a/internal/pkg/handler/upgrade_test.go +++ b/internal/pkg/handler/upgrade_test.go @@ -32,7 +32,7 @@ import ( var ( clients = kube.Clients{ - KubernetesClient: testclient.NewSimpleClientset(), + KubernetesClient: testclient.NewClientset(), CSIClient: csitestclient.NewSimpleClientset(), } From 7e9d571e1e6b562082b2e4c2ede04d17e309ee53 Mon Sep 17 00:00:00 2001 From: faizanahmad055 Date: Sun, 4 Jan 2026 20:10:52 +0100 Subject: [PATCH 16/24] Readme update and change SHA1 to SHA512 Signed-off-by: faizanahmad055 --- README.md | 31 +++++++++++++++++++--- docs/How-it-works.md | 4 +-- docs/Reloader-vs-ConfigmapController.md | 14 +++++----- docs/Reloader-vs-k8s-trigger-controller.md | 2 +- internal/pkg/crypto/sha.go | 18 +++++-------- 5 files changed, 44 insertions(+), 25 deletions(-) diff --git a/README.md b/README.md index 4fe53f3..e7f1d4b 100644 --- a/README.md +++ b/README.md @@ -263,10 +263,33 @@ Reloader watches these updates and triggers a rollout when a change is detected. #### Annotations for CSI-mounted Secrets -| Annotation | Description | -|--------------------------------------------|----------------------------------------------------------------------| -| `reloader.stakater.com/auto: "true"` | Reloads workload when CSI-mounted secrets change | -| `secretproviderclass.reloader.stakater.com/reload: "my-spc"` | Reloads when specific SecretProviderClass changes | +| Annotation | Description | +|------------------------------------------------------------------------------|-----------------------------------------------------------------------------------------------------------------------------| +| `reloader.stakater.com/auto: "true"` | Global Discovery: Automatically discovers and reloads the workload when any mounted ConfigMap or Secret is updated. | +| `secretproviderclass.reloader.stakater.com/auto: 'true'` | CSI Discovery: Specifically watches for updates to all SecretProviderClasses used by the workload (CSI driver integration). | +| `secretproviderclass.reloader.stakater.com/reload: "my-secretproviderclass"` | Targeted Reload: Only reloads the workload when the specifically named SecretProviderClass(es) are updated. | + +Reloader monitors changes at the **per-secret level** by watching the `SecretProviderClassPodStatus`. Make sure each secret you want to monitor is properly defined with a `secretKey` in your `SecretProviderClass`: + +```yaml +apiVersion: secrets-store.csi.x-k8s.io/v1 +kind: SecretProviderClass +metadata: + name: vault-reloader-demo + namespace: test +spec: + provider: vault + parameters: + vaultAddress: "http://vault.vault.svc:8200" + vaultSkipTLSVerify: "true" + roleName: "demo-role" + objects: | + - objectName: "password" + secretPath: "secret/data/reloader-demo" + secretKey: "password" +``` +***Important***: Reloader tracks changes to individual secrets (identified by secretKey). If your SecretProviderClass doesn't specify secretKey for each object, Reloader may not detect updates correctly. + #### Notes & Limitations diff --git a/docs/How-it-works.md b/docs/How-it-works.md index c0ae964..6a946f9 100644 --- a/docs/How-it-works.md +++ b/docs/How-it-works.md @@ -76,7 +76,7 @@ Note: Rolling upgrade also works in the same way for secrets. ### Hash Value Computation -Reloader uses SHA1 to compute hash value. SHA1 is used because it is efficient and less prone to collision. +Reloader uses SHA512 to compute hash value. SHA1 is used because it is efficient and less prone to collision. ## Monitor All Namespaces @@ -90,4 +90,4 @@ The output file can then be used to deploy Reloader in specific namespace. ## Compatibility With Helm Install and Upgrade -Reloader has no impact on helm deployment cycle. Reloader only injects an environment variable in `deployment`, `daemonset` or `statefulset`. The environment variable contains the SHA1 value of `ConfigMaps` or `Secrets` data. So if a deployment is created using Helm and Reloader updates the deployment, then next time you upgrade the helm release, Reloader will do nothing except changing that environment variable value in `deployment` , `daemonset` or `statefulset`. +Reloader has no impact on helm deployment cycle. Reloader only injects an environment variable in `deployment`, `daemonset` or `statefulset`. The environment variable contains the SHA512 value of `ConfigMaps` or `Secrets` data. So if a deployment is created using Helm and Reloader updates the deployment, then next time you upgrade the helm release, Reloader will do nothing except changing that environment variable value in `deployment` , `daemonset` or `statefulset`. diff --git a/docs/Reloader-vs-ConfigmapController.md b/docs/Reloader-vs-ConfigmapController.md index f866f89..c8bcfc8 100644 --- a/docs/Reloader-vs-ConfigmapController.md +++ b/docs/Reloader-vs-ConfigmapController.md @@ -2,10 +2,10 @@ Reloader is inspired from [`configmapcontroller`](https://github.com/fabric8io/configmapcontroller) but there are many ways in which it differs from `configmapcontroller`. Below is the small comparison between these two controllers. -| Reloader | ConfigMap | -|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| -| Reloader can watch both `Secrets` and `ConfigMaps`. | `configmapcontroller` can only watch changes in `ConfigMaps`. It cannot detect changes in other resources like `Secrets`. | -| Reloader can perform rolling upgrades on `deployments` as well as on `statefulsets` and `daemonsets` | `configmapcontroller` can only perform rolling upgrades on `deployments`. It currently does not support rolling upgrades on `statefulsets` and `daemonsets` | -| Reloader provides both unit test cases and end to end integration test cases for future updates. So one can make sure that new changes do not break any old functionality. | Currently there are not any unit test cases or end to end integration test cases in `configmap-controller`. It add difficulties for any additional updates in `configmap-controller` and one can not know for sure whether new changes breaks any old functionality or not. | -| Reloader uses SHA1 to encode the change in `ConfigMap` or `Secret`. It then saves the SHA1 value in `STAKATER_FOO_CONFIGMAP` or `STAKATER_FOO_SECRET` environment variable depending upon where the change has happened. The use of SHA1 provides a concise 40 characters encoded value that is very less prone to collision. | `configmap-controller` uses `FABRICB_FOO_REVISION` environment variable to store any change in `ConfigMap` controller. It does not encode it or convert it in suitable hash value to avoid data pollution in deployment. | -| Reloader allows you to customize your own annotation (for both `Secrets` and `ConfigMaps`) using command line flags | `configmap-controller` restricts you to only their provided annotation | +| Reloader | ConfigMap | +|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| Reloader can watch both `Secrets` and `ConfigMaps`. | `configmapcontroller` can only watch changes in `ConfigMaps`. It cannot detect changes in other resources like `Secrets`. | +| Reloader can perform rolling upgrades on `deployments` as well as on `statefulsets` and `daemonsets` | `configmapcontroller` can only perform rolling upgrades on `deployments`. It currently does not support rolling upgrades on `statefulsets` and `daemonsets` | +| Reloader provides both unit test cases and end to end integration test cases for future updates. So one can make sure that new changes do not break any old functionality. | Currently there are not any unit test cases or end to end integration test cases in `configmap-controller`. It add difficulties for any additional updates in `configmap-controller` and one can not know for sure whether new changes breaks any old functionality or not. | +| Reloader uses SHA512 to encode the change in `ConfigMap` or `Secret`. It then saves the SHA1 value in `STAKATER_FOO_CONFIGMAP` or `STAKATER_FOO_SECRET` environment variable depending upon where the change has happened. The use of SHA1 provides a concise 40 characters encoded value that is very less prone to collision. | `configmap-controller` uses `FABRICB_FOO_REVISION` environment variable to store any change in `ConfigMap` controller. It does not encode it or convert it in suitable hash value to avoid data pollution in deployment. | +| Reloader allows you to customize your own annotation (for both `Secrets` and `ConfigMaps`) using command line flags | `configmap-controller` restricts you to only their provided annotation | diff --git a/docs/Reloader-vs-k8s-trigger-controller.md b/docs/Reloader-vs-k8s-trigger-controller.md index 811987a..fe0f6d9 100644 --- a/docs/Reloader-vs-k8s-trigger-controller.md +++ b/docs/Reloader-vs-k8s-trigger-controller.md @@ -6,7 +6,7 @@ Reloader and k8s-trigger-controller are both built for same purpose. So there ar - Both controllers support change detection in `ConfigMaps` and `Secrets` - Both controllers support deployment `rollout` -- Both controllers use SHA1 for hashing +- Reloader controller use SHA512 for hashing - Both controllers have end to end as well as unit test cases. ## Differences diff --git a/internal/pkg/crypto/sha.go b/internal/pkg/crypto/sha.go index 043fc22..9235425 100644 --- a/internal/pkg/crypto/sha.go +++ b/internal/pkg/crypto/sha.go @@ -1,20 +1,16 @@ package crypto import ( - "crypto/sha1" - "fmt" - "io" - - "github.com/sirupsen/logrus" + "crypto/sha512" + "encoding/hex" ) // GenerateSHA generates SHA from string func GenerateSHA(data string) string { - hasher := sha1.New() - _, err := io.WriteString(hasher, data) - if err != nil { - logrus.Errorf("Unable to write data in hash writer %v", err) + if data == "" { + return "" } - sha := hasher.Sum(nil) - return fmt.Sprintf("%x", sha) + + hash := sha512.Sum512_256([]byte(data)) + return hex.EncodeToString(hash[:]) } From 4b90335362096de60340bde801b47d4006908f3b Mon Sep 17 00:00:00 2001 From: faizanahmad055 Date: Sun, 4 Jan 2026 21:52:07 +0100 Subject: [PATCH 17/24] Readme update and fix tests Signed-off-by: faizanahmad055 --- README.md | 2 +- docs/Reloader-vs-ConfigmapController.md | 14 +++++++------- internal/pkg/handler/upgrade_test.go | 12 ++++++------ 3 files changed, 14 insertions(+), 14 deletions(-) diff --git a/README.md b/README.md index e7f1d4b..5c22154 100644 --- a/README.md +++ b/README.md @@ -288,8 +288,8 @@ spec: secretPath: "secret/data/reloader-demo" secretKey: "password" ``` -***Important***: Reloader tracks changes to individual secrets (identified by secretKey). If your SecretProviderClass doesn't specify secretKey for each object, Reloader may not detect updates correctly. +***Important***: Reloader tracks changes to individual secrets (identified by secretKey). If your SecretProviderClass doesn't specify secretKey for each object, Reloader may not detect updates correctly. #### Notes & Limitations diff --git a/docs/Reloader-vs-ConfigmapController.md b/docs/Reloader-vs-ConfigmapController.md index c8bcfc8..3ddab08 100644 --- a/docs/Reloader-vs-ConfigmapController.md +++ b/docs/Reloader-vs-ConfigmapController.md @@ -2,10 +2,10 @@ Reloader is inspired from [`configmapcontroller`](https://github.com/fabric8io/configmapcontroller) but there are many ways in which it differs from `configmapcontroller`. Below is the small comparison between these two controllers. -| Reloader | ConfigMap | -|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| -| Reloader can watch both `Secrets` and `ConfigMaps`. | `configmapcontroller` can only watch changes in `ConfigMaps`. It cannot detect changes in other resources like `Secrets`. | -| Reloader can perform rolling upgrades on `deployments` as well as on `statefulsets` and `daemonsets` | `configmapcontroller` can only perform rolling upgrades on `deployments`. It currently does not support rolling upgrades on `statefulsets` and `daemonsets` | -| Reloader provides both unit test cases and end to end integration test cases for future updates. So one can make sure that new changes do not break any old functionality. | Currently there are not any unit test cases or end to end integration test cases in `configmap-controller`. It add difficulties for any additional updates in `configmap-controller` and one can not know for sure whether new changes breaks any old functionality or not. | -| Reloader uses SHA512 to encode the change in `ConfigMap` or `Secret`. It then saves the SHA1 value in `STAKATER_FOO_CONFIGMAP` or `STAKATER_FOO_SECRET` environment variable depending upon where the change has happened. The use of SHA1 provides a concise 40 characters encoded value that is very less prone to collision. | `configmap-controller` uses `FABRICB_FOO_REVISION` environment variable to store any change in `ConfigMap` controller. It does not encode it or convert it in suitable hash value to avoid data pollution in deployment. | -| Reloader allows you to customize your own annotation (for both `Secrets` and `ConfigMaps`) using command line flags | `configmap-controller` restricts you to only their provided annotation | +| Reloader | ConfigMap | +|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| Reloader can watch both `Secrets` and `ConfigMaps`. | `configmapcontroller` can only watch changes in `ConfigMaps`. It cannot detect changes in other resources like `Secrets`. | +| Reloader can perform rolling upgrades on `deployments` as well as on `statefulsets` and `daemonsets` | `configmapcontroller` can only perform rolling upgrades on `deployments`. It currently does not support rolling upgrades on `statefulsets` and `daemonsets` | +| Reloader provides both unit test cases and end to end integration test cases for future updates. So one can make sure that new changes do not break any old functionality. | Currently there are not any unit test cases or end to end integration test cases in `configmap-controller`. It adds difficulties for any additional updates in `configmap-controller` and one can not know for sure whether new changes breaks any old functionality or not. | +| Reloader uses SHA512 to encode the change in `ConfigMap` or `Secret`. It then saves the SHA1 value in `STAKATER_FOO_CONFIGMAP` or `STAKATER_FOO_SECRET` environment variable depending upon where the change has happened. The use of SHA1 provides a concise 40 characters encoded value that is very less prone to collision. | `configmap-controller` uses `FABRICB_FOO_REVISION` environment variable to store any change in `ConfigMap` controller. It does not encode it or convert it in suitable hash value to avoid data pollution in deployment. | +| Reloader allows you to customize your own annotation (for both `Secrets` and `ConfigMaps`) using command line flags | `configmap-controller` restricts you to only their provided annotation | diff --git a/internal/pkg/handler/upgrade_test.go b/internal/pkg/handler/upgrade_test.go index c1897f6..a334db0 100644 --- a/internal/pkg/handler/upgrade_test.go +++ b/internal/pkg/handler/upgrade_test.go @@ -1981,7 +1981,7 @@ func TestRollingUpgradeForDeploymentWithPatchAndRetryUsingArs(t *testing.T) { assert.Equal(t, patchtypes.StrategicMergePatchType, patchType) assert.NotEmpty(t, bytes) assert.Contains(t, string(bytes), `{"spec":{"template":{"metadata":{"annotations":{"reloader.stakater.com/last-reloaded-from":`) - assert.Contains(t, string(bytes), `\"hash\":\"3c9a892aeaedc759abc3df9884a37b8be5680382\"`) + assert.Contains(t, string(bytes), `\"hash\":\"fd9e71a362056bfa864d9859e12978f893d330ce8cbf09218b25d015770ad91f\"`) return nil } @@ -2964,7 +2964,7 @@ func TestRollingUpgradeForDaemonSetWithPatchAndRetryUsingArs(t *testing.T) { assert.Equal(t, patchtypes.StrategicMergePatchType, patchType) assert.NotEmpty(t, bytes) assert.Contains(t, string(bytes), `{"spec":{"template":{"metadata":{"annotations":{"reloader.stakater.com/last-reloaded-from":`) - assert.Contains(t, string(bytes), `\"hash\":\"314a2269170750a974d79f02b5b9ee517de7f280\"`) + assert.Contains(t, string(bytes), `\"hash\":\"43bf9e30e7c4e32a8f8673c462b86d0b1ac626cf498afdc0d0108e79ebe7ee0c\"`) return nil } @@ -3227,7 +3227,7 @@ func TestRollingUpgradeForStatefulSetWithPatchAndRetryUsingArs(t *testing.T) { assert.Equal(t, patchtypes.StrategicMergePatchType, patchType) assert.NotEmpty(t, bytes) assert.Contains(t, string(bytes), `{"spec":{"template":{"metadata":{"annotations":{"reloader.stakater.com/last-reloaded-from":`) - assert.Contains(t, string(bytes), `\"hash\":\"f821414d40d8815fb330763f74a4ff7ab651d4fa\"`) + assert.Contains(t, string(bytes), `\"hash\":\"6aa837180bdf6a93306c71a0cf62b4a45c2d5b021578247b3b64d5baea2b84d9\"`) return nil } @@ -3607,7 +3607,7 @@ func TestRollingUpgradeForDeploymentWithPatchAndRetryUsingErs(t *testing.T) { assert.Equal(t, patchtypes.StrategicMergePatchType, patchType) assert.NotEmpty(t, bytes) assert.Contains(t, string(bytes), `{"spec":{"template":{"spec":{"containers":[{"name":`) - assert.Contains(t, string(bytes), `"value":"3c9a892aeaedc759abc3df9884a37b8be5680382"`) + assert.Contains(t, string(bytes), `"value":"fd9e71a362056bfa864d9859e12978f893d330ce8cbf09218b25d015770ad91f"`) return nil } @@ -4502,7 +4502,7 @@ func TestRollingUpgradeForDaemonSetWithPatchAndRetryUsingErs(t *testing.T) { assert.Equal(t, patchtypes.StrategicMergePatchType, patchType) assert.NotEmpty(t, bytes) assert.Contains(t, string(bytes), `{"spec":{"template":{"spec":{"containers":[{"name":`) - assert.Contains(t, string(bytes), `"value":"314a2269170750a974d79f02b5b9ee517de7f280"`) + assert.Contains(t, string(bytes), `"value":"43bf9e30e7c4e32a8f8673c462b86d0b1ac626cf498afdc0d0108e79ebe7ee0c"`) return nil } @@ -4737,7 +4737,7 @@ func TestRollingUpgradeForStatefulSetWithPatchAndRetryUsingErs(t *testing.T) { assert.Equal(t, patchtypes.StrategicMergePatchType, patchType) assert.NotEmpty(t, bytes) assert.Contains(t, string(bytes), `{"spec":{"template":{"spec":{"containers":[{"name":`) - assert.Contains(t, string(bytes), `"value":"f821414d40d8815fb330763f74a4ff7ab651d4fa"`) + assert.Contains(t, string(bytes), `"value":"6aa837180bdf6a93306c71a0cf62b4a45c2d5b021578247b3b64d5baea2b84d9"`) return nil } From eb38bf7470dac4764fe355104b34c690f7ec747f Mon Sep 17 00:00:00 2001 From: faizanahmad055 Date: Sun, 4 Jan 2026 22:01:44 +0100 Subject: [PATCH 18/24] Fix linting errors Signed-off-by: faizanahmad055 --- README.md | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/README.md b/README.md index 5c22154..57a1e62 100644 --- a/README.md +++ b/README.md @@ -171,7 +171,7 @@ This instructs Reloader to skip all reload logic for that resource across all wo ### 4. ⚙️ Workload-Specific Rollout Strategy (Argo Rollouts Only) -Note: This is only applicable when using [Argo Rollouts](https://argoproj.github.io/argo-rollouts/). It is ignored for standard Kubernetes Deployments, StatefulSets, or DaemonSets. To use this feature, Argo Rollouts support must be enabled in Reloader (for example via --is-argo-rollouts=true). +Note: This is only applicable when using [Argo Rollouts](https://argoproj.github.io/argo-rollouts/). It is ignored for standard Kubernetes `Deployments`, `StatefulSets`, or `DaemonSets`. To use this feature, Argo Rollouts support must be enabled in Reloader (for example via --is-argo-rollouts=true). By default, Reloader triggers the Argo Rollout controller to perform a standard rollout by updating the pod template. This works well in most cases, however, because this modifies the workload spec, GitOps tools like ArgoCD will detect this as "Configuration Drift" and mark your application as OutOfSync. @@ -191,8 +191,8 @@ metadata: ✅ Use `restart` if: 1. You're using GitOps and want to avoid drift -2. You want a quick restart without changing the workload spec -3. Your platform restricts metadata changes +1. You want a quick restart without changing the workload spec +1. Your platform restricts metadata changes This setting affects Argo Rollouts behavior, not Argo CD sync settings. @@ -289,13 +289,13 @@ spec: secretKey: "password" ``` -***Important***: Reloader tracks changes to individual secrets (identified by secretKey). If your SecretProviderClass doesn't specify secretKey for each object, Reloader may not detect updates correctly. +***Important***: Reloader tracks changes to individual secrets (identified by `secretKey`). If your SecretProviderClass doesn't specify `secretKey` for each object, Reloader may not detect updates correctly. #### Notes & Limitations Reloader reacts to CSI status changes, not direct updates to external secret stores Secret rotation must be enabled in the CSI driver for updates to be detected -CSI limitations (such as subPath mounts) still apply and may require pod restarts +CSI limitations (such as `subPath` mounts) still apply and may require pod restarts If secrets are synced to Kubernetes Secret objects, standard Reloader behavior applies and CSI support may not be required ## 🚀 Installation @@ -489,7 +489,7 @@ PRs are welcome. In general, we follow the "fork-and-pull" Git workflow: ## Release Processes -_Repository GitHub releases_: As requested by the community in [issue 685](https://github.com/stakater/Reloader/issues/685), Reloader is now based on a manual release process. Releases are no longer done on every merged PR to the main branch, but manually on request. +*Repository GitHub releases*: As requested by the community in [issue 685](https://github.com/stakater/Reloader/issues/685), Reloader is now based on a manual release process. Releases are no longer done on every merged PR to the main branch, but manually on request. To make a GitHub release: @@ -502,7 +502,7 @@ To make a GitHub release: 1. Code owners create another branch from `master` and bump the helm chart version as well as Reloader image version. - Code owners create a PR with `release/helm-chart` label, example: [PR-846](https://github.com/stakater/Reloader/pull/846) -_Repository git tagging_: Push to the main branch will create a merge-image and merge-tag named `merge-${{ github.event.number }}`, for example `merge-800` when pull request number 800 is merged. +*Repository git tagging*: Push to the main branch will create a merge-image and merge-tag named `merge-${{ github.event.number }}`, for example `merge-800` when pull request number 800 is merged. ## Changelog From e1db875efeba113b068c470b92e24173054f0e6f Mon Sep 17 00:00:00 2001 From: faizanahmad055 Date: Tue, 6 Jan 2026 22:20:20 +0100 Subject: [PATCH 19/24] Fix failing tests Signed-off-by: faizanahmad055 --- internal/pkg/crypto/sha.go | 6 ++---- internal/pkg/crypto/sha_test.go | 13 +++++++++++++ internal/pkg/handler/upgrade.go | 6 +++--- internal/pkg/handler/upgrade_test.go | 6 +++--- internal/pkg/testutil/kube.go | 6 +++++- internal/pkg/util/util.go | 2 +- 6 files changed, 27 insertions(+), 12 deletions(-) diff --git a/internal/pkg/crypto/sha.go b/internal/pkg/crypto/sha.go index 9235425..f9ae235 100644 --- a/internal/pkg/crypto/sha.go +++ b/internal/pkg/crypto/sha.go @@ -6,11 +6,9 @@ import ( ) // GenerateSHA generates SHA from string +// Always returns a hash value, even for empty strings, to ensure consistent behavior +// and avoid issues with string matching operations (e.g., strings.Contains(str, "") always returns true) func GenerateSHA(data string) string { - if data == "" { - return "" - } - hash := sha512.Sum512_256([]byte(data)) return hex.EncodeToString(hash[:]) } diff --git a/internal/pkg/crypto/sha_test.go b/internal/pkg/crypto/sha_test.go index 60d5af6..761f8d0 100644 --- a/internal/pkg/crypto/sha_test.go +++ b/internal/pkg/crypto/sha_test.go @@ -13,3 +13,16 @@ func TestGenerateSHA(t *testing.T) { t.Errorf("Failed to generate SHA") } } + +// TestGenerateSHAEmptyString verifies that empty string generates a valid hash +// This ensures consistent behavior and avoids issues with string matching operations +func TestGenerateSHAEmptyString(t *testing.T) { + result := GenerateSHA("") + expected := "c672b8d1ef56ed28ab87c3622c5114069bdd3ad7b8f9737498d0c01ecef0967a" + if result != expected { + t.Errorf("Failed to generate SHA for empty string. Expected: %s, Got: %s", expected, result) + } + if len(result) != 64 { + t.Errorf("SHA hash should be 64 characters long, got %d", len(result)) + } +} diff --git a/internal/pkg/handler/upgrade.go b/internal/pkg/handler/upgrade.go index b10bfbc..6d63d5c 100644 --- a/internal/pkg/handler/upgrade.go +++ b/internal/pkg/handler/upgrade.go @@ -539,8 +539,8 @@ func updatePodAnnotations(upgradeFuncs callbacks.RollingUpgradeFuncs, item runti } func secretProviderClassAnnotationReloaded(oldAnnotations map[string]string, newConfig common.Config) bool { - annotaion := oldAnnotations[getReloaderAnnotationKey()] - return strings.Contains(annotaion, newConfig.ResourceName) && strings.Contains(annotaion, newConfig.SHAValue) + annotation := oldAnnotations[getReloaderAnnotationKey()] + return strings.Contains(annotation, newConfig.ResourceName) && strings.Contains(annotation, newConfig.SHAValue) } func getReloaderAnnotationKey() string { @@ -645,7 +645,7 @@ func secretProviderClassEnvReloaded(containers []v1.Container, envVar string, sh } func populateAnnotationsFromSecretProviderClass(clients kube.Clients, config *common.Config) { - obj, err := clients.CSIClient.SecretsstoreV1().SecretProviderClasses(config.Namespace).Get(context.TODO(), config.ResourceName, metav1.GetOptions{}) + obj, err := clients.CSIClient.SecretsstoreV1().SecretProviderClasses(config.Namespace).Get(context.Background(), config.ResourceName, metav1.GetOptions{}) annotations := make(map[string]string) if err != nil { if apierrors.IsNotFound(err) { diff --git a/internal/pkg/handler/upgrade_test.go b/internal/pkg/handler/upgrade_test.go index a334db0..e905ee0 100644 --- a/internal/pkg/handler/upgrade_test.go +++ b/internal/pkg/handler/upgrade_test.go @@ -3327,7 +3327,7 @@ func TestRollingUpgradeForStatefulSetWithSecretProviderClassUsingArs(t *testing. err := PerformAction(clients, config, statefulSetFuncs, collectors, nil, invokeReloadStrategy) time.Sleep(5 * time.Second) if err != nil { - t.Errorf("Rolling upgrade failed for StatefulSet with SecretProviderClass") + t.Errorf("Rolling upgrade failed for StatefulSet with SecretProviderClass: %v", err) } logrus.Infof("Verifying statefulSet update") @@ -3337,11 +3337,11 @@ func TestRollingUpgradeForStatefulSetWithSecretProviderClassUsingArs(t *testing. } if promtestutil.ToFloat64(collectors.Reloaded.With(labelSucceeded)) != 1 { - t.Errorf("Counter was not increased") + t.Errorf("Counter was not increased, expected 1 but got %f", promtestutil.ToFloat64(collectors.Reloaded.With(labelSucceeded))) } if promtestutil.ToFloat64(collectors.ReloadedByNamespace.With(prometheus.Labels{"success": "true", "namespace": arsNamespace})) != 1 { - t.Errorf("Counter by namespace was not increased") + t.Errorf("Counter by namespace was not increased, expected 1 but got %f", promtestutil.ToFloat64(collectors.ReloadedByNamespace.With(prometheus.Labels{"success": "true", "namespace": arsNamespace}))) } testRollingUpgradeInvokeDeleteStrategyArs(t, clients, config, statefulSetFuncs, collectors, envVarPostfix) diff --git a/internal/pkg/testutil/kube.go b/internal/pkg/testutil/kube.go index 4901d9a..a778eb1 100644 --- a/internal/pkg/testutil/kube.go +++ b/internal/pkg/testutil/kube.go @@ -1327,7 +1327,11 @@ func VerifyResourceAnnotationUpdate(clients kube.Clients, config common.Config, } func GetSHAfromEmptyData() string { - return crypto.GenerateSHA("") + // Use a special marker that represents "deleted" or "empty" state + // This ensures we have a distinct, deterministic hash for the delete strategy + // Note: We could use GenerateSHA("") which now returns a hash, but using a marker + // makes the intent clearer and avoids potential confusion with actual empty data + return crypto.GenerateSHA("__RELOADER_EMPTY_DELETE_MARKER__") } // GetRollout provides rollout for testing diff --git a/internal/pkg/util/util.go b/internal/pkg/util/util.go index 047d068..476cdb9 100644 --- a/internal/pkg/util/util.go +++ b/internal/pkg/util/util.go @@ -106,7 +106,7 @@ func ConfigureReloaderFlags(cmd *cobra.Command) { cmd.PersistentFlags().BoolVar(&options.SyncAfterRestart, "sync-after-restart", false, "Sync add events after reloader restarts") cmd.PersistentFlags().BoolVar(&options.EnablePProf, "enable-pprof", false, "Enable pprof for profiling") cmd.PersistentFlags().StringVar(&options.PProfAddr, "pprof-addr", ":6060", "Address to start pprof server on. Default is :6060") - cmd.PersistentFlags().BoolVar(&options.EnableCSIIntegration, "enable-csi-integration", false, "Enables CSI integration. Default is :true") + cmd.PersistentFlags().BoolVar(&options.EnableCSIIntegration, "enable-csi-integration", false, "Enables CSI integration. Default is :false") } func GetIgnoredResourcesList() (List, error) { From 8b64c9b9cda9ef1609e3e9dc078dd8ccc5c9bf81 Mon Sep 17 00:00:00 2001 From: faizanahmad055 Date: Tue, 6 Jan 2026 22:53:24 +0100 Subject: [PATCH 20/24] Fix failing SHA test Signed-off-by: faizanahmad055 --- internal/pkg/crypto/sha_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/internal/pkg/crypto/sha_test.go b/internal/pkg/crypto/sha_test.go index 761f8d0..ee530e3 100644 --- a/internal/pkg/crypto/sha_test.go +++ b/internal/pkg/crypto/sha_test.go @@ -7,7 +7,7 @@ import ( // TestGenerateSHA generates the sha from given data and verifies whether it is correct or not func TestGenerateSHA(t *testing.T) { data := "www.stakater.com" - sha := "abd4ed82fb04548388a6cf3c339fd9dc84d275df" + sha := "2e9aa975331b22861b4f62b7fcc69b63e001f938361fee3b4ed888adf26a10e3" result := GenerateSHA(data) if result != sha { t.Errorf("Failed to generate SHA") From b0ca635e4984dec799b4c15a9689eae26dfb630e Mon Sep 17 00:00:00 2001 From: faizanahmad055 Date: Wed, 7 Jan 2026 09:11:48 +0100 Subject: [PATCH 21/24] Add file filtering in UBI docker image Signed-off-by: faizanahmad055 --- Dockerfile.ubi | 20 +++++++++++++++++++- 1 file changed, 19 insertions(+), 1 deletion(-) diff --git a/Dockerfile.ubi b/Dockerfile.ubi index 4359730..20e2b16 100644 --- a/Dockerfile.ubi +++ b/Dockerfile.ubi @@ -20,7 +20,25 @@ RUN mkdir /image && \ COPY ubi-build-files-${TARGETARCH}.txt /tmp # Copy all the required files from the base UBI image into the image directory # As the go binary is not statically compiled this includes everything needed for CGO to work, cacerts, tzdata and RH release files -RUN tar cf /tmp/files.tar -T /tmp/ubi-build-files-${TARGETARCH}.txt && tar xf /tmp/files.tar -C /image/ +# Filter existing files and exclude temporary entitlement files that may be removed during build +RUN set -e && \ + # Filter files that actually exist (files, directories, or symlinks) + while IFS= read -r file; do \ + [ -n "$file" ] && ([ -e "$file" ] || [ -L "$file" ]) && echo "$file"; \ + done < /tmp/ubi-build-files-${TARGETARCH}.txt > /tmp/existing-files.txt && \ + # Create tarball if we have files to archive + if [ -s /tmp/existing-files.txt ]; then \ + tar -chf /tmp/files.tar \ + --exclude='etc/pki/entitlement-host*' \ + -T /tmp/existing-files.txt 2>&1 | grep -vE "(File removed before we read it|Cannot stat)" || true; \ + # Extract only if tarball was created successfully + if [ -f /tmp/files.tar ]; then \ + tar -xf /tmp/files.tar -C /image/ && \ + rm -f /tmp/files.tar; \ + fi; \ + fi && \ + # Clean up temporary file list + rm -f /tmp/existing-files.txt # Generate a rpm database which contains all the packages that you said were needed in ubi-build-files-*.txt RUN rpm --root /image --initdb \ From 703319e732f11ac6314b265ffe59098039ac2bd7 Mon Sep 17 00:00:00 2001 From: faizanahmad055 Date: Wed, 7 Jan 2026 09:27:29 +0100 Subject: [PATCH 22/24] Improve file filtering in UBI docker image Signed-off-by: faizanahmad055 --- Dockerfile.ubi | 30 ++++++++++++++++++++---------- 1 file changed, 20 insertions(+), 10 deletions(-) diff --git a/Dockerfile.ubi b/Dockerfile.ubi index 20e2b16..9d8c88d 100644 --- a/Dockerfile.ubi +++ b/Dockerfile.ubi @@ -23,19 +23,29 @@ COPY ubi-build-files-${TARGETARCH}.txt /tmp # Filter existing files and exclude temporary entitlement files that may be removed during build RUN set -e && \ # Filter files that actually exist (files, directories, or symlinks) + # This ensures we only copy files that are present, avoiding "Cannot stat" errors while IFS= read -r file; do \ [ -n "$file" ] && ([ -e "$file" ] || [ -L "$file" ]) && echo "$file"; \ done < /tmp/ubi-build-files-${TARGETARCH}.txt > /tmp/existing-files.txt && \ - # Create tarball if we have files to archive - if [ -s /tmp/existing-files.txt ]; then \ - tar -chf /tmp/files.tar \ - --exclude='etc/pki/entitlement-host*' \ - -T /tmp/existing-files.txt 2>&1 | grep -vE "(File removed before we read it|Cannot stat)" || true; \ - # Extract only if tarball was created successfully - if [ -f /tmp/files.tar ]; then \ - tar -xf /tmp/files.tar -C /image/ && \ - rm -f /tmp/files.tar; \ - fi; \ + # Verify we have files to copy (fail if list is empty to catch configuration issues) + if [ ! -s /tmp/existing-files.txt ]; then \ + echo "ERROR: No files found to copy from ubi-build-files-${TARGETARCH}.txt" >&2; \ + echo "This indicates the base image may be missing required files or the file list is incorrect." >&2; \ + echo "Expected files from ubi-build-files-${TARGETARCH}.txt:" >&2; \ + cat /tmp/ubi-build-files-${TARGETARCH}.txt >&2; \ + exit 1; \ + fi && \ + # Create tarball, excluding only temporary entitlement files (safe to exclude) + # Note: --exclude only affects files within directories being archived, not the directories themselves + tar -chf /tmp/files.tar \ + --exclude='etc/pki/entitlement-host*' \ + -T /tmp/existing-files.txt 2>&1 | grep -vE "(File removed before we read it|Cannot stat)" || true; \ + # Extract tarball (critical files like libc.so.6, ld-linux, etc/ssl/certs are included) + if [ -f /tmp/files.tar ]; then \ + tar -xf /tmp/files.tar -C /image/ && \ + rm -f /tmp/files.tar; \ + else \ + echo "WARNING: Tarball was not created, but continuing..." >&2; \ fi && \ # Clean up temporary file list rm -f /tmp/existing-files.txt From 6fd7c8254a1644833f3194b101e3a8036191a754 Mon Sep 17 00:00:00 2001 From: faizanahmad055 Date: Wed, 7 Jan 2026 10:28:38 +0100 Subject: [PATCH 23/24] Update filtering in UBI image Signed-off-by: faizanahmad055 --- Dockerfile.ubi | 36 +++++++++++------------------------- 1 file changed, 11 insertions(+), 25 deletions(-) diff --git a/Dockerfile.ubi b/Dockerfile.ubi index 9d8c88d..2f92b38 100644 --- a/Dockerfile.ubi +++ b/Dockerfile.ubi @@ -21,33 +21,19 @@ COPY ubi-build-files-${TARGETARCH}.txt /tmp # Copy all the required files from the base UBI image into the image directory # As the go binary is not statically compiled this includes everything needed for CGO to work, cacerts, tzdata and RH release files # Filter existing files and exclude temporary entitlement files that may be removed during build -RUN set -e && \ - # Filter files that actually exist (files, directories, or symlinks) - # This ensures we only copy files that are present, avoiding "Cannot stat" errors - while IFS= read -r file; do \ - [ -n "$file" ] && ([ -e "$file" ] || [ -L "$file" ]) && echo "$file"; \ +RUN while IFS= read -r file; do \ + [ -z "$file" ] && continue; \ + if [ -e "$file" ] || [ -L "$file" ]; then \ + echo "$file"; \ + fi; \ done < /tmp/ubi-build-files-${TARGETARCH}.txt > /tmp/existing-files.txt && \ - # Verify we have files to copy (fail if list is empty to catch configuration issues) - if [ ! -s /tmp/existing-files.txt ]; then \ - echo "ERROR: No files found to copy from ubi-build-files-${TARGETARCH}.txt" >&2; \ - echo "This indicates the base image may be missing required files or the file list is incorrect." >&2; \ - echo "Expected files from ubi-build-files-${TARGETARCH}.txt:" >&2; \ - cat /tmp/ubi-build-files-${TARGETARCH}.txt >&2; \ - exit 1; \ + if [ -s /tmp/existing-files.txt ]; then \ + tar -chf /tmp/files.tar --exclude='etc/pki/entitlement-host*' -T /tmp/existing-files.txt 2>&1 | grep -vE "(File removed before we read it|Cannot stat)" || true; \ + if [ -f /tmp/files.tar ]; then \ + tar xf /tmp/files.tar -C /image/ 2>/dev/null || true; \ + rm -f /tmp/files.tar; \ + fi; \ fi && \ - # Create tarball, excluding only temporary entitlement files (safe to exclude) - # Note: --exclude only affects files within directories being archived, not the directories themselves - tar -chf /tmp/files.tar \ - --exclude='etc/pki/entitlement-host*' \ - -T /tmp/existing-files.txt 2>&1 | grep -vE "(File removed before we read it|Cannot stat)" || true; \ - # Extract tarball (critical files like libc.so.6, ld-linux, etc/ssl/certs are included) - if [ -f /tmp/files.tar ]; then \ - tar -xf /tmp/files.tar -C /image/ && \ - rm -f /tmp/files.tar; \ - else \ - echo "WARNING: Tarball was not created, but continuing..." >&2; \ - fi && \ - # Clean up temporary file list rm -f /tmp/existing-files.txt # Generate a rpm database which contains all the packages that you said were needed in ubi-build-files-*.txt From 157cf0f2e4ae367f4b0f78f578ce40fd6dc3f3ad Mon Sep 17 00:00:00 2001 From: faizanahmad055 Date: Wed, 7 Jan 2026 12:13:04 +0100 Subject: [PATCH 24/24] Remove SHA1 changes Signed-off-by: faizanahmad055 --- docs/How-it-works.md | 4 ++-- docs/Reloader-vs-ConfigmapController.md | 14 +++++++------- docs/Reloader-vs-k8s-trigger-controller.md | 2 +- internal/pkg/crypto/sha.go | 18 ++++++++++++------ internal/pkg/crypto/sha_test.go | 8 ++++---- internal/pkg/handler/upgrade_test.go | 12 ++++++------ 6 files changed, 32 insertions(+), 26 deletions(-) diff --git a/docs/How-it-works.md b/docs/How-it-works.md index 6a946f9..c0ae964 100644 --- a/docs/How-it-works.md +++ b/docs/How-it-works.md @@ -76,7 +76,7 @@ Note: Rolling upgrade also works in the same way for secrets. ### Hash Value Computation -Reloader uses SHA512 to compute hash value. SHA1 is used because it is efficient and less prone to collision. +Reloader uses SHA1 to compute hash value. SHA1 is used because it is efficient and less prone to collision. ## Monitor All Namespaces @@ -90,4 +90,4 @@ The output file can then be used to deploy Reloader in specific namespace. ## Compatibility With Helm Install and Upgrade -Reloader has no impact on helm deployment cycle. Reloader only injects an environment variable in `deployment`, `daemonset` or `statefulset`. The environment variable contains the SHA512 value of `ConfigMaps` or `Secrets` data. So if a deployment is created using Helm and Reloader updates the deployment, then next time you upgrade the helm release, Reloader will do nothing except changing that environment variable value in `deployment` , `daemonset` or `statefulset`. +Reloader has no impact on helm deployment cycle. Reloader only injects an environment variable in `deployment`, `daemonset` or `statefulset`. The environment variable contains the SHA1 value of `ConfigMaps` or `Secrets` data. So if a deployment is created using Helm and Reloader updates the deployment, then next time you upgrade the helm release, Reloader will do nothing except changing that environment variable value in `deployment` , `daemonset` or `statefulset`. diff --git a/docs/Reloader-vs-ConfigmapController.md b/docs/Reloader-vs-ConfigmapController.md index 3ddab08..1433daa 100644 --- a/docs/Reloader-vs-ConfigmapController.md +++ b/docs/Reloader-vs-ConfigmapController.md @@ -2,10 +2,10 @@ Reloader is inspired from [`configmapcontroller`](https://github.com/fabric8io/configmapcontroller) but there are many ways in which it differs from `configmapcontroller`. Below is the small comparison between these two controllers. -| Reloader | ConfigMap | -|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| -| Reloader can watch both `Secrets` and `ConfigMaps`. | `configmapcontroller` can only watch changes in `ConfigMaps`. It cannot detect changes in other resources like `Secrets`. | -| Reloader can perform rolling upgrades on `deployments` as well as on `statefulsets` and `daemonsets` | `configmapcontroller` can only perform rolling upgrades on `deployments`. It currently does not support rolling upgrades on `statefulsets` and `daemonsets` | -| Reloader provides both unit test cases and end to end integration test cases for future updates. So one can make sure that new changes do not break any old functionality. | Currently there are not any unit test cases or end to end integration test cases in `configmap-controller`. It adds difficulties for any additional updates in `configmap-controller` and one can not know for sure whether new changes breaks any old functionality or not. | -| Reloader uses SHA512 to encode the change in `ConfigMap` or `Secret`. It then saves the SHA1 value in `STAKATER_FOO_CONFIGMAP` or `STAKATER_FOO_SECRET` environment variable depending upon where the change has happened. The use of SHA1 provides a concise 40 characters encoded value that is very less prone to collision. | `configmap-controller` uses `FABRICB_FOO_REVISION` environment variable to store any change in `ConfigMap` controller. It does not encode it or convert it in suitable hash value to avoid data pollution in deployment. | -| Reloader allows you to customize your own annotation (for both `Secrets` and `ConfigMaps`) using command line flags | `configmap-controller` restricts you to only their provided annotation | +| Reloader | ConfigMap | +|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| Reloader can watch both `Secrets` and `ConfigMaps`. | `configmapcontroller` can only watch changes in `ConfigMaps`. It cannot detect changes in other resources like `Secrets`. | +| Reloader can perform rolling upgrades on `deployments` as well as on `statefulsets` and `daemonsets` | `configmapcontroller` can only perform rolling upgrades on `deployments`. It currently does not support rolling upgrades on `statefulsets` and `daemonsets` | +| Reloader provides both unit test cases and end to end integration test cases for future updates. So one can make sure that new changes do not break any old functionality. | Currently there are not any unit test cases or end to end integration test cases in `configmap-controller`. It adds difficulties for any additional updates in `configmap-controller` and one can not know for sure whether new changes breaks any old functionality or not. | +| Reloader uses SHA1 to encode the change in `ConfigMap` or `Secret`. It then saves the SHA1 value in `STAKATER_FOO_CONFIGMAP` or `STAKATER_FOO_SECRET` environment variable depending upon where the change has happened. The use of SHA1 provides a concise 40 characters encoded value that is very less prone to collision. | `configmap-controller` uses `FABRICB_FOO_REVISION` environment variable to store any change in `ConfigMap` controller. It does not encode it or convert it in suitable hash value to avoid data pollution in deployment. | +| Reloader allows you to customize your own annotation (for both `Secrets` and `ConfigMaps`) using command line flags | `configmap-controller` restricts you to only their provided annotation | diff --git a/docs/Reloader-vs-k8s-trigger-controller.md b/docs/Reloader-vs-k8s-trigger-controller.md index fe0f6d9..561dca5 100644 --- a/docs/Reloader-vs-k8s-trigger-controller.md +++ b/docs/Reloader-vs-k8s-trigger-controller.md @@ -6,7 +6,7 @@ Reloader and k8s-trigger-controller are both built for same purpose. So there ar - Both controllers support change detection in `ConfigMaps` and `Secrets` - Both controllers support deployment `rollout` -- Reloader controller use SHA512 for hashing +- Reloader controller use SHA1 for hashing - Both controllers have end to end as well as unit test cases. ## Differences diff --git a/internal/pkg/crypto/sha.go b/internal/pkg/crypto/sha.go index f9ae235..043fc22 100644 --- a/internal/pkg/crypto/sha.go +++ b/internal/pkg/crypto/sha.go @@ -1,14 +1,20 @@ package crypto import ( - "crypto/sha512" - "encoding/hex" + "crypto/sha1" + "fmt" + "io" + + "github.com/sirupsen/logrus" ) // GenerateSHA generates SHA from string -// Always returns a hash value, even for empty strings, to ensure consistent behavior -// and avoid issues with string matching operations (e.g., strings.Contains(str, "") always returns true) func GenerateSHA(data string) string { - hash := sha512.Sum512_256([]byte(data)) - return hex.EncodeToString(hash[:]) + hasher := sha1.New() + _, err := io.WriteString(hasher, data) + if err != nil { + logrus.Errorf("Unable to write data in hash writer %v", err) + } + sha := hasher.Sum(nil) + return fmt.Sprintf("%x", sha) } diff --git a/internal/pkg/crypto/sha_test.go b/internal/pkg/crypto/sha_test.go index ee530e3..5cb0afc 100644 --- a/internal/pkg/crypto/sha_test.go +++ b/internal/pkg/crypto/sha_test.go @@ -7,7 +7,7 @@ import ( // TestGenerateSHA generates the sha from given data and verifies whether it is correct or not func TestGenerateSHA(t *testing.T) { data := "www.stakater.com" - sha := "2e9aa975331b22861b4f62b7fcc69b63e001f938361fee3b4ed888adf26a10e3" + sha := "abd4ed82fb04548388a6cf3c339fd9dc84d275df" result := GenerateSHA(data) if result != sha { t.Errorf("Failed to generate SHA") @@ -18,11 +18,11 @@ func TestGenerateSHA(t *testing.T) { // This ensures consistent behavior and avoids issues with string matching operations func TestGenerateSHAEmptyString(t *testing.T) { result := GenerateSHA("") - expected := "c672b8d1ef56ed28ab87c3622c5114069bdd3ad7b8f9737498d0c01ecef0967a" + expected := "da39a3ee5e6b4b0d3255bfef95601890afd80709" if result != expected { t.Errorf("Failed to generate SHA for empty string. Expected: %s, Got: %s", expected, result) } - if len(result) != 64 { - t.Errorf("SHA hash should be 64 characters long, got %d", len(result)) + if len(result) != 40 { + t.Errorf("SHA hash should be 40 characters long, got %d", len(result)) } } diff --git a/internal/pkg/handler/upgrade_test.go b/internal/pkg/handler/upgrade_test.go index e905ee0..68ba94d 100644 --- a/internal/pkg/handler/upgrade_test.go +++ b/internal/pkg/handler/upgrade_test.go @@ -1981,7 +1981,7 @@ func TestRollingUpgradeForDeploymentWithPatchAndRetryUsingArs(t *testing.T) { assert.Equal(t, patchtypes.StrategicMergePatchType, patchType) assert.NotEmpty(t, bytes) assert.Contains(t, string(bytes), `{"spec":{"template":{"metadata":{"annotations":{"reloader.stakater.com/last-reloaded-from":`) - assert.Contains(t, string(bytes), `\"hash\":\"fd9e71a362056bfa864d9859e12978f893d330ce8cbf09218b25d015770ad91f\"`) + assert.Contains(t, string(bytes), `\"hash\":\"3c9a892aeaedc759abc3df9884a37b8be5680382\"`) return nil } @@ -2964,7 +2964,7 @@ func TestRollingUpgradeForDaemonSetWithPatchAndRetryUsingArs(t *testing.T) { assert.Equal(t, patchtypes.StrategicMergePatchType, patchType) assert.NotEmpty(t, bytes) assert.Contains(t, string(bytes), `{"spec":{"template":{"metadata":{"annotations":{"reloader.stakater.com/last-reloaded-from":`) - assert.Contains(t, string(bytes), `\"hash\":\"43bf9e30e7c4e32a8f8673c462b86d0b1ac626cf498afdc0d0108e79ebe7ee0c\"`) + assert.Contains(t, string(bytes), `\"hash\":\"314a2269170750a974d79f02b5b9ee517de7f280\"`) return nil } @@ -3227,7 +3227,7 @@ func TestRollingUpgradeForStatefulSetWithPatchAndRetryUsingArs(t *testing.T) { assert.Equal(t, patchtypes.StrategicMergePatchType, patchType) assert.NotEmpty(t, bytes) assert.Contains(t, string(bytes), `{"spec":{"template":{"metadata":{"annotations":{"reloader.stakater.com/last-reloaded-from":`) - assert.Contains(t, string(bytes), `\"hash\":\"6aa837180bdf6a93306c71a0cf62b4a45c2d5b021578247b3b64d5baea2b84d9\"`) + assert.Contains(t, string(bytes), `\"hash\":\"f821414d40d8815fb330763f74a4ff7ab651d4fa\"`) return nil } @@ -3607,7 +3607,7 @@ func TestRollingUpgradeForDeploymentWithPatchAndRetryUsingErs(t *testing.T) { assert.Equal(t, patchtypes.StrategicMergePatchType, patchType) assert.NotEmpty(t, bytes) assert.Contains(t, string(bytes), `{"spec":{"template":{"spec":{"containers":[{"name":`) - assert.Contains(t, string(bytes), `"value":"fd9e71a362056bfa864d9859e12978f893d330ce8cbf09218b25d015770ad91f"`) + assert.Contains(t, string(bytes), `"value":"3c9a892aeaedc759abc3df9884a37b8be5680382"`) return nil } @@ -4502,7 +4502,7 @@ func TestRollingUpgradeForDaemonSetWithPatchAndRetryUsingErs(t *testing.T) { assert.Equal(t, patchtypes.StrategicMergePatchType, patchType) assert.NotEmpty(t, bytes) assert.Contains(t, string(bytes), `{"spec":{"template":{"spec":{"containers":[{"name":`) - assert.Contains(t, string(bytes), `"value":"43bf9e30e7c4e32a8f8673c462b86d0b1ac626cf498afdc0d0108e79ebe7ee0c"`) + assert.Contains(t, string(bytes), `"value":"314a2269170750a974d79f02b5b9ee517de7f280"`) return nil } @@ -4737,7 +4737,7 @@ func TestRollingUpgradeForStatefulSetWithPatchAndRetryUsingErs(t *testing.T) { assert.Equal(t, patchtypes.StrategicMergePatchType, patchType) assert.NotEmpty(t, bytes) assert.Contains(t, string(bytes), `{"spec":{"template":{"spec":{"containers":[{"name":`) - assert.Contains(t, string(bytes), `"value":"6aa837180bdf6a93306c71a0cf62b4a45c2d5b021578247b3b64d5baea2b84d9"`) + assert.Contains(t, string(bytes), `"value":"f821414d40d8815fb330763f74a4ff7ab651d4fa"`) return nil }