diff --git a/README.md b/README.md index ae0a00a..4fe53f3 100644 --- a/README.md +++ b/README.md @@ -13,7 +13,7 @@ ## 🔁 What is Reloader? -Reloader is a Kubernetes controller that automatically triggers rollouts of workloads (like Deployments, StatefulSets, and more) whenever referenced `Secrets` or `ConfigMaps` are updated. +Reloader is a Kubernetes controller that automatically triggers rollouts of workloads (like Deployments, StatefulSets, and more) whenever referenced `Secrets`, `ConfigMaps` or **optionally CSI-mounted secrets** are updated. In a traditional Kubernetes setup, updating a `Secret` or `ConfigMap` does not automatically restart or redeploy your workloads. This can lead to stale configurations running in production, especially when dealing with dynamic values like credentials, feature flags, or environment configs. @@ -169,9 +169,11 @@ metadata: This instructs Reloader to skip all reload logic for that resource across all workloads. -### 4. ⚙️ Workload-Specific Rollout Strategy +### 4. ⚙️ Workload-Specific Rollout Strategy (Argo Rollouts Only) -By default, Reloader uses the **rollout** strategy — it updates the pod template to trigger a new rollout. This works well in most cases, but it can cause problems if you're using GitOps tools like ArgoCD, which detect this as configuration drift. +Note: This is only applicable when using [Argo Rollouts](https://argoproj.github.io/argo-rollouts/). It is ignored for standard Kubernetes Deployments, StatefulSets, or DaemonSets. To use this feature, Argo Rollouts support must be enabled in Reloader (for example via --is-argo-rollouts=true). + +By default, Reloader triggers the Argo Rollout controller to perform a standard rollout by updating the pod template. This works well in most cases, however, because this modifies the workload spec, GitOps tools like ArgoCD will detect this as "Configuration Drift" and mark your application as OutOfSync. To avoid that, you can switch to the **restart** strategy, which simply restarts the pod without changing the pod template. @@ -189,8 +191,10 @@ metadata: ✅ Use `restart` if: 1. You're using GitOps and want to avoid drift -1. You want a quick restart without changing the workload spec -1. Your platform restricts metadata changes +2. You want a quick restart without changing the workload spec +3. Your platform restricts metadata changes + +This setting affects Argo Rollouts behavior, not Argo CD sync settings. ### 5. ❗ Annotation Behavior Rules & Compatibility @@ -239,6 +243,38 @@ This feature allows you to pause rollouts for a deployment for a specified durat 1. ✅ Your deployment references multiple ConfigMaps or Secrets that may be updated at the same time. 1. ✅ You want to minimize unnecessary rollouts and reduce downtime caused by back-to-back configuration changes. +### 8. 🔐 CSI Secret Provider Support + +Reloader supports the [Secrets Store CSI Driver](https://secrets-store-csi-driver.sigs.k8s.io/), which allows mounting secrets from external secret stores (like AWS Secrets Manager, Azure Key Vault, HashiCorp Vault) directly into pods. +Unlike Kubernetes Secret objects, CSI-mounted secrets do not always trigger native Kubernetes update events. Reloader solves this by watching CSI status resources and restarting affected workloads when mounted secret versions change. + +#### How it works + +When secret rotation is enabled, the Secrets Store CSI Driver updates a Kubernetes resource called: `SecretProviderClassPodStatus` + +This resource reflects the currently mounted secret versions for a pod. +Reloader watches these updates and triggers a rollout when a change is detected. + +#### Prerequisites + +- Secrets Store CSI Driver must be installed in your cluster +- Secret rotation enabled in the CSI driver. +- Enable CSI integration in Reloader: `--enable-csi-integration=true` + +#### Annotations for CSI-mounted Secrets + +| Annotation | Description | +|--------------------------------------------|----------------------------------------------------------------------| +| `reloader.stakater.com/auto: "true"` | Reloads workload when CSI-mounted secrets change | +| `secretproviderclass.reloader.stakater.com/reload: "my-spc"` | Reloads when specific SecretProviderClass changes | + +#### Notes & Limitations + +Reloader reacts to CSI status changes, not direct updates to external secret stores +Secret rotation must be enabled in the CSI driver for updates to be detected +CSI limitations (such as subPath mounts) still apply and may require pod restarts +If secrets are synced to Kubernetes Secret objects, standard Reloader behavior applies and CSI support may not be required + ## 🚀 Installation ### 1. 📦 Helm diff --git a/internal/pkg/callbacks/rolling_upgrade_test.go b/internal/pkg/callbacks/rolling_upgrade_test.go index 452867f..75583de 100644 --- a/internal/pkg/callbacks/rolling_upgrade_test.go +++ b/internal/pkg/callbacks/rolling_upgrade_test.go @@ -49,7 +49,7 @@ func newTestFixtures() testFixtures { func setupTestClients() kube.Clients { return kube.Clients{ - KubernetesClient: fake.NewSimpleClientset(), + KubernetesClient: fake.NewClientset(), ArgoRolloutClient: fakeargoclientset.NewSimpleClientset(), } } diff --git a/internal/pkg/cmd/reloader.go b/internal/pkg/cmd/reloader.go index 6bdb339..771e2df 100644 --- a/internal/pkg/cmd/reloader.go +++ b/internal/pkg/cmd/reloader.go @@ -160,15 +160,8 @@ func startReloader(cmd *cobra.Command, args []string) { var controllers []*controller.Controller for k := range kube.ResourceMap { - if k == "secretproviderclasspodstatuses" { - if !options.EnableCSIIntegration { - logrus.Infof("EnableCSIIntegration is set to false, won't run secretproviderclasspodstatuses controller") - continue - } - if !kube.IsCSIInstalled { - logrus.Infof("Can't run secretproviderclasspodstatuses controller as CSI CRDs are not installed") - continue - } + if k == constants.SecretProviderClassController && !shouldRunCSIController() { + continue } if ignoredResourcesList.Contains(k) || (len(namespaceLabelSelector) == 0 && k == "namespaces") { @@ -218,3 +211,15 @@ func startPProfServer() { logrus.Errorf("Failed to start pprof server: %v", err) } } + +func shouldRunCSIController() bool { + if !options.EnableCSIIntegration { + logrus.Info("Skipping secretproviderclasspodstatuses controller: EnableCSIIntegration is disabled") + return false + } + if !kube.IsCSIInstalled { + logrus.Info("Skipping secretproviderclasspodstatuses controller: CSI CRDs not installed") + return false + } + return true +} diff --git a/internal/pkg/constants/constants.go b/internal/pkg/constants/constants.go index 0d1f1c7..8025a29 100644 --- a/internal/pkg/constants/constants.go +++ b/internal/pkg/constants/constants.go @@ -24,6 +24,8 @@ const ( EnvVarsReloadStrategy = "env-vars" // AnnotationsReloadStrategy instructs Reloader to add pod template annotations to facilitate a restart AnnotationsReloadStrategy = "annotations" + // SecretProviderClassController enables support for SecretProviderClassPodStatus resources + SecretProviderClassController = "secretproviderclasspodstatuses" ) // Leadership election related consts diff --git a/internal/pkg/controller/controller.go b/internal/pkg/controller/controller.go index a670d81..519923e 100644 --- a/internal/pkg/controller/controller.go +++ b/internal/pkg/controller/controller.go @@ -2,9 +2,11 @@ package controller import ( "fmt" + "slices" "time" "github.com/sirupsen/logrus" + "github.com/stakater/Reloader/internal/pkg/constants" "github.com/stakater/Reloader/internal/pkg/handler" "github.com/stakater/Reloader/internal/pkg/metrics" "github.com/stakater/Reloader/internal/pkg/options" @@ -21,7 +23,6 @@ import ( "k8s.io/client-go/tools/record" "k8s.io/client-go/util/workqueue" "k8s.io/kubectl/pkg/scheme" - "k8s.io/utils/strings/slices" csiv1 "sigs.k8s.io/secrets-store-csi-driver/apis/v1" ) @@ -80,13 +81,9 @@ func NewController( } } - getterRESTClient := client.CoreV1().RESTClient() - if resource == "secretproviderclasspodstatuses" { - csiClient, err := kube.GetCSIClient() - if err != nil { - logrus.Fatal(err) - } - getterRESTClient = csiClient.SecretsstoreV1().RESTClient() + getterRESTClient, err := getClientForResource(resource, client) + if err != nil { + return nil, fmt.Errorf("failed to initialize REST client for %s: %w", resource, err) } listWatcher := cache.NewFilteredListWatchFromClient(getterRESTClient, resource, namespace, optionsModifier) @@ -301,3 +298,14 @@ func (c *Controller) handleErr(err error, key interface{}) { logrus.Errorf("Dropping key out of the queue: %v", err) logrus.Debugf("Dropping the key %q out of the queue: %v", key, err) } + +func getClientForResource(resource string, coreClient kubernetes.Interface) (cache.Getter, error) { + if resource == constants.SecretProviderClassController { + csiClient, err := kube.GetCSIClient() + if err != nil { + return nil, fmt.Errorf("failed to get CSI client: %w", err) + } + return csiClient.SecretsstoreV1().RESTClient(), nil + } + return coreClient.CoreV1().RESTClient(), nil +} diff --git a/internal/pkg/controller/controller_test.go b/internal/pkg/controller/controller_test.go index 0399933..778b38d 100644 --- a/internal/pkg/controller/controller_test.go +++ b/internal/pkg/controller/controller_test.go @@ -2757,7 +2757,7 @@ func TestController_resourceInNamespaceSelector(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - fakeClient := fake.NewSimpleClientset() + fakeClient := fake.NewClientset() namespace, _ := fakeClient.CoreV1().Namespaces().Create(context.Background(), &tt.fields.namespace, metav1.CreateOptions{}) logrus.Infof("created fakeClient namespace for testing = %s", namespace.Name) diff --git a/internal/pkg/handler/pause_deployment_test.go b/internal/pkg/handler/pause_deployment_test.go index c14cbfc..19e7ac6 100644 --- a/internal/pkg/handler/pause_deployment_test.go +++ b/internal/pkg/handler/pause_deployment_test.go @@ -244,7 +244,7 @@ func TestHandleMissingTimerSimple(t *testing.T) { }() t.Run(test.name, func(t *testing.T) { - fakeClient := testclient.NewSimpleClientset() + fakeClient := testclient.NewClientset() clients := kube.Clients{ KubernetesClient: fakeClient, } @@ -337,7 +337,7 @@ func TestPauseDeployment(t *testing.T) { for _, test := range tests { t.Run(test.name, func(t *testing.T) { - fakeClient := testclient.NewSimpleClientset() + fakeClient := testclient.NewClientset() clients := kube.Clients{ KubernetesClient: fakeClient, } diff --git a/internal/pkg/handler/update.go b/internal/pkg/handler/update.go index cc1e16b..25a4380 100644 --- a/internal/pkg/handler/update.go +++ b/internal/pkg/handler/update.go @@ -39,20 +39,31 @@ func (r ResourceUpdatedHandler) Handle() error { // GetConfig gets configurations containing SHA, annotations, namespace and resource name func (r ResourceUpdatedHandler) GetConfig() (common.Config, string) { - var oldSHAData string - var config common.Config + var ( + oldSHAData string + config common.Config + ) + switch res := r.Resource.(type) { case *v1.ConfigMap: - oldSHAData = util.GetSHAfromConfigmap(r.OldResource.(*v1.ConfigMap)) + if old, ok := r.OldResource.(*v1.ConfigMap); ok && old != nil { + oldSHAData = util.GetSHAfromConfigmap(old) + } config = common.GetConfigmapConfig(res) + case *v1.Secret: - oldSHAData = util.GetSHAfromSecret(r.OldResource.(*v1.Secret).Data) + if old, ok := r.OldResource.(*v1.Secret); ok && old != nil { + oldSHAData = util.GetSHAfromSecret(old.Data) + } config = common.GetSecretConfig(res) + case *csiv1.SecretProviderClassPodStatus: - oldSHAData = util.GetSHAfromSecretProviderClassPodStatus(r.OldResource.(*csiv1.SecretProviderClassPodStatus).Status) + if old, ok := r.OldResource.(*csiv1.SecretProviderClassPodStatus); ok && old != nil && old.Status.Objects != nil { + oldSHAData = util.GetSHAfromSecretProviderClassPodStatus(old.Status) + } config = common.GetSecretProviderClassPodStatusConfig(res) default: - logrus.Warnf("Invalid resource: Resource should be 'Secret', 'Configmap' or 'SecretProviderClassPodStatus' but found, %v", r.Resource) + logrus.Warnf("Invalid resource: Resource should be 'Secret', 'Configmap' or 'SecretProviderClassPodStatus' but found, %T", r.Resource) } return config, oldSHAData } diff --git a/internal/pkg/handler/upgrade.go b/internal/pkg/handler/upgrade.go index f5b7ead..b10bfbc 100644 --- a/internal/pkg/handler/upgrade.go +++ b/internal/pkg/handler/upgrade.go @@ -634,11 +634,10 @@ func updateEnvVar(container *v1.Container, envVar string, shaData string) consta } func secretProviderClassEnvReloaded(containers []v1.Container, envVar string, shaData string) bool { - for i := range containers { - envs := containers[i].Env - for j := range envs { - if envs[j].Name == envVar { - return envs[j].Value == shaData + for _, container := range containers { + for _, env := range container.Env { + if env.Name == envVar { + return env.Value == shaData } } } @@ -649,7 +648,11 @@ func populateAnnotationsFromSecretProviderClass(clients kube.Clients, config *co obj, err := clients.CSIClient.SecretsstoreV1().SecretProviderClasses(config.Namespace).Get(context.TODO(), config.ResourceName, metav1.GetOptions{}) annotations := make(map[string]string) if err != nil { - logrus.Infof("Couldn't find secretproviderclass '%s' in '%s' namespace for typed annotation", config.ResourceName, config.Namespace) + if apierrors.IsNotFound(err) { + logrus.Warnf("SecretProviderClass '%s' not found in namespace '%s'", config.ResourceName, config.Namespace) + } else { + logrus.Errorf("Failed to get SecretProviderClass '%s' in namespace '%s': %v", config.ResourceName, config.Namespace, err) + } } else if obj.Annotations != nil { annotations = obj.Annotations } diff --git a/internal/pkg/handler/upgrade_test.go b/internal/pkg/handler/upgrade_test.go index 5bf490f..c1897f6 100644 --- a/internal/pkg/handler/upgrade_test.go +++ b/internal/pkg/handler/upgrade_test.go @@ -32,7 +32,7 @@ import ( var ( clients = kube.Clients{ - KubernetesClient: testclient.NewSimpleClientset(), + KubernetesClient: testclient.NewClientset(), CSIClient: csitestclient.NewSimpleClientset(), }