Add a disable-default-addon-namespace flag (#484)

* Add a disable-default-addon-namespace flag

if the flag is set, default addon ns will not be created
by the operator.

Signed-off-by: Jian Qiu <jqiu@redhat.com>

* Update with comments

Signed-off-by: Jian Qiu <jqiu@redhat.com>

---------

Signed-off-by: Jian Qiu <jqiu@redhat.com>
This commit is contained in:
Jian Qiu
2024-06-03 14:54:15 +08:00
committed by GitHub
parent a115625fab
commit c056181096
7 changed files with 111 additions and 32 deletions

View File

@@ -31,14 +31,20 @@ func NewKlusterletOperatorCmd() *cobra.Command {
// add disable leader election flag
flags := cmd.Flags()
cmd.Flags().BoolVar(&klOptions.SkipPlaceholderHubSecret, "skip-placeholder-hub-secret", false,
flags.BoolVar(&klOptions.SkipPlaceholderHubSecret, "skip-placeholder-hub-secret", false,
"If set, will skip ensuring a placeholder hub secret which is originally intended for pulling "+
"work image before approved")
cmd.Flags().StringVar(&klOptions.ControlPlaneNodeLabelSelector, "control-plane-node-label-selector",
if err := flags.MarkDeprecated("skip-placeholder-hub-secret", "flag is not used in the operator."); err != nil {
utilruntime.Must(err)
}
flags.StringVar(&klOptions.ControlPlaneNodeLabelSelector, "control-plane-node-label-selector",
"node-role.kubernetes.io/master=", "control plane node labels, "+
"e.g. 'environment=production', 'tier notin (frontend,backend)'")
cmd.Flags().Int32Var(&klOptions.DeploymentReplicas, "deployment-replicas", 0,
flags.Int32Var(&klOptions.DeploymentReplicas, "deployment-replicas", 0,
"Number of deployment replicas, operator will automatically determine replicas if not set")
flags.BoolVar(&klOptions.DisableAddonNamespace, "disable-default-addon-namespace", false,
"If set, will not create default open-cluster-management-agent-addon ns")
opts.AddFlags(flags)
return cmd

View File

@@ -40,6 +40,7 @@ type klusterletCleanupController struct {
managedClusterClientsBuilder managedClusterClientsBuilderInterface
controlPlaneNodeLabelSelector string
deploymentReplicas int32
disableAddonNamespace bool
}
// NewKlusterletCleanupController construct klusterlet cleanup controller
@@ -55,6 +56,7 @@ func NewKlusterletCleanupController(
operatorNamespace string,
controlPlaneNodeLabelSelector string,
deploymentReplicas int32,
disableAddonNamespace bool,
recorder events.Recorder) factory.Controller {
controller := &klusterletCleanupController{
kubeClient: kubeClient,
@@ -67,6 +69,7 @@ func NewKlusterletCleanupController(
managedClusterClientsBuilder: newManagedClusterClientsBuilder(kubeClient, apiExtensionClient, appliedManifestWorkClient, recorder),
controlPlaneNodeLabelSelector: controlPlaneNodeLabelSelector,
deploymentReplicas: deploymentReplicas,
disableAddonNamespace: disableAddonNamespace,
}
return factory.New().WithSync(controller.sync).
@@ -122,6 +125,7 @@ func (n *klusterletCleanupController) sync(ctx context.Context, controllerContex
ExternalManagedKubeConfigWorkSecret: helpers.ExternalManagedKubeConfigWork,
InstallMode: klusterlet.Spec.DeployOption.Mode,
HubApiServerHostAlias: klusterlet.Spec.HubApiServerHostAlias,
DisableAddonNamespace: n.disableAddonNamespace,
RegistrationServiceAccount: serviceAccountName("registration-sa", klusterlet),
WorkServiceAccount: serviceAccountName("work-sa", klusterlet),

View File

@@ -41,7 +41,6 @@ const (
klusterletFinalizer = "operator.open-cluster-management.io/klusterlet-cleanup"
managedResourcesEvictionTimestampAnno = "operator.open-cluster-management.io/managed-resources-eviction-timestamp"
klusterletNamespaceLabelKey = "operator.open-cluster-management.io/klusterlet"
hostedKlusterletLabelKey = "operator.open-cluster-management.io/hosted-klusterlet"
)
type klusterletController struct {
@@ -50,11 +49,11 @@ type klusterletController struct {
kubeClient kubernetes.Interface
kubeVersion *version.Version
operatorNamespace string
skipHubSecretPlaceholder bool
cache resourceapply.ResourceCache
managedClusterClientsBuilder managedClusterClientsBuilderInterface
controlPlaneNodeLabelSelector string
deploymentReplicas int32
disableAddonNamespace bool
}
type klusterletReconcile interface {
@@ -82,8 +81,8 @@ func NewKlusterletController(
operatorNamespace string,
controlPlaneNodeLabelSelector string,
deploymentReplicas int32,
recorder events.Recorder,
skipHubSecretPlaceholder bool) factory.Controller {
disableAddonNamespace bool,
recorder events.Recorder) factory.Controller {
controller := &klusterletController{
kubeClient: kubeClient,
patcher: patcher.NewPatcher[
@@ -91,11 +90,11 @@ func NewKlusterletController(
klusterletLister: klusterletInformer.Lister(),
kubeVersion: kubeVersion,
operatorNamespace: operatorNamespace,
skipHubSecretPlaceholder: skipHubSecretPlaceholder,
cache: resourceapply.NewResourceCache(),
managedClusterClientsBuilder: newManagedClusterClientsBuilder(kubeClient, apiExtensionClient, appliedManifestWorkClient, recorder),
controlPlaneNodeLabelSelector: controlPlaneNodeLabelSelector,
deploymentReplicas: deploymentReplicas,
disableAddonNamespace: disableAddonNamespace,
}
return factory.New().WithSync(controller.sync).
@@ -164,6 +163,9 @@ type klusterletConfig struct {
// ResourceRequirements is the resource requirements for the klusterlet managed containers.
// The type has to be []byte to use "indent" template function.
ResourceRequirements []byte
// DisableAddonNamespace is the flag to disable the creationg of default addon namespace.
DisableAddonNamespace bool
}
func (n *klusterletController) sync(ctx context.Context, controllerContext factory.SyncContext) error {
@@ -218,6 +220,7 @@ func (n *klusterletController) sync(ctx context.Context, controllerContext facto
WorkServiceAccount: serviceAccountName("work-sa", klusterlet),
ResourceRequirementResourceType: helpers.ResourceType(klusterlet),
ResourceRequirements: resourceRequirements,
DisableAddonNamespace: n.disableAddonNamespace,
}
managedClusterClients, err := n.managedClusterClientsBuilder.

View File

@@ -782,7 +782,9 @@ func TestRemoveOldNamespace(t *testing.T) {
Type: operatorapiv1.ConditionKlusterletApplied,
Status: metav1.ConditionTrue,
})
controller.operatorStore.Update(klusterlet)
if err := controller.operatorStore.Update(klusterlet); err != nil {
t.Fatal(err)
}
err = controller.controller.sync(context.TODO(), syncContext)
if err != nil {
t.Errorf("Expected non error when sync, %v", err)
@@ -803,6 +805,53 @@ func TestRemoveOldNamespace(t *testing.T) {
}
}
// TestSyncDeploy test deployment of klusterlet components
func TestSyncDisableAddonNamespace(t *testing.T) {
klusterlet := newKlusterlet("klusterlet", "testns", "cluster1")
bootStrapSecret := newSecret(helpers.BootstrapHubKubeConfig, "testns")
hubKubeConfigSecret := newSecret(helpers.HubKubeConfig, "testns")
hubKubeConfigSecret.Data["kubeconfig"] = []byte("dummuykubeconnfig")
namespace := newNamespace("testns")
syncContext := testingcommon.NewFakeSyncContext(t, "klusterlet")
controller := newTestController(t, klusterlet, syncContext.Recorder(), nil, bootStrapSecret, hubKubeConfigSecret, namespace)
controller.controller.disableAddonNamespace = true
err := controller.controller.sync(context.TODO(), syncContext)
if err != nil {
t.Errorf("Expected non error when sync, %v", err)
}
var ns []runtime.Object
kubeActions := controller.kubeClient.Actions()
for _, action := range kubeActions {
if action.GetVerb() == createVerb {
object := action.(clienttesting.CreateActionImpl).Object
if object.GetObjectKind().GroupVersionKind().Kind == "Namespace" {
ns = append(ns, object)
}
}
}
// Check if resources are created as expected, only 0 ns is created
if len(ns) != 0 {
t.Errorf("Expect 0 ns created in the sync loop, actual %d", len(ns))
}
operatorAction := controller.operatorClient.Actions()
testingcommon.AssertActions(t, operatorAction, "patch")
klusterlet = &operatorapiv1.Klusterlet{}
patchData := operatorAction[0].(clienttesting.PatchActionImpl).Patch
err = json.Unmarshal(patchData, klusterlet)
if err != nil {
t.Fatal(err)
}
testinghelper.AssertOnlyConditions(
t, klusterlet,
testinghelper.NamedCondition(operatorapiv1.ConditionKlusterletApplied, "KlusterletApplied", metav1.ConditionTrue),
testinghelper.NamedCondition(helpers.FeatureGatesTypeValid, helpers.FeatureGatesReasonAllValid, metav1.ConditionTrue),
)
}
// TestGetServersFromKlusterlet tests getServersFromKlusterlet func
func TestGetServersFromKlusterlet(t *testing.T) {
cases := []struct {

View File

@@ -63,29 +63,36 @@ type managedReconcile struct {
func (r *managedReconcile) reconcile(ctx context.Context, klusterlet *operatorapiv1.Klusterlet,
config klusterletConfig) (*operatorapiv1.Klusterlet, reconcileState, error) {
// For now, whether in Default or Hosted mode, the addons will be deployed on the managed cluster.
// sync image pull secret from management cluster to managed cluster for addon namespace
// TODO(zhujian7): In the future, we may consider deploy addons on the management cluster in Hosted mode.
// Ensure the addon namespace on the managed cluster
err := ensureNamespace(ctx, r.managedClusterClients.kubeClient, klusterlet, helpers.DefaultAddonNamespace, nil, r.recorder)
if err != nil {
return klusterlet, reconcileStop, err
}
// Sync pull secret to the klusterlet addon namespace
// The reason we keep syncing secret instead of adding a label to trigger addonsecretcontroller to sync is:
// addonsecretcontroller only watch namespaces in the same cluster klusterlet is running on.
// And if addons are deployed in default mode on the managed cluster, but klusterlet is deployed in hosted
// on management cluster, then we still need to sync the secret here in klusterlet-controller using `managedClusterClients.kubeClient`.
err = syncPullSecret(ctx, r.kubeClient, r.managedClusterClients.kubeClient, klusterlet, r.operatorNamespace, helpers.DefaultAddonNamespace, r.recorder)
if err != nil {
return klusterlet, reconcileStop, err
if !config.DisableAddonNamespace {
// For now, whether in Default or Hosted mode, the addons will be deployed on the managed cluster.
// sync image pull secret from management cluster to managed cluster for addon namespace
// TODO(zhujian7): In the future, we may consider deploy addons on the management cluster in Hosted mode.
// Ensure the addon namespace on the managed cluster
if err := ensureNamespace(
ctx,
r.managedClusterClients.kubeClient,
klusterlet, helpers.DefaultAddonNamespace, nil, r.recorder); err != nil {
return klusterlet, reconcileStop, err
}
// Sync pull secret to the klusterlet addon namespace
// The reason we keep syncing secret instead of adding a label to trigger addonsecretcontroller to sync is:
// addonsecretcontroller only watch namespaces in the same cluster klusterlet is running on.
// And if addons are deployed in default mode on the managed cluster, but klusterlet is deployed in hosted
// on management cluster, then we still need to sync the secret here in klusterlet-controller using `managedClusterClients.kubeClient`.
if err := syncPullSecret(
ctx,
r.kubeClient,
r.managedClusterClients.kubeClient,
klusterlet, r.operatorNamespace, helpers.DefaultAddonNamespace, r.recorder); err != nil {
return klusterlet, reconcileStop, err
}
}
err = ensureNamespace(
if err := ensureNamespace(
ctx, r.managedClusterClients.kubeClient, klusterlet, config.KlusterletNamespace, map[string]string{
klusterletNamespaceLabelKey: klusterlet.Name,
}, r.recorder)
if err != nil {
}, r.recorder); err != nil {
return klusterlet, reconcileStop, err
}
@@ -197,7 +204,10 @@ func (r *managedReconcile) clean(ctx context.Context, klusterlet *operatorapiv1.
// remove the klusterlet namespace and klusterlet addon namespace on the managed cluster
// For now, whether in Default or Hosted mode, the addons could be deployed on the managed cluster.
namespaces := []string{config.KlusterletNamespace, fmt.Sprintf("%s-addon", config.KlusterletNamespace)}
namespaces := []string{config.KlusterletNamespace}
if !config.DisableAddonNamespace {
namespaces = append(namespaces, helpers.DefaultAddonNamespace)
}
for _, namespace := range namespaces {
if err := r.managedClusterClients.kubeClient.CoreV1().Namespaces().Delete(
ctx, namespace, metav1.DeleteOptions{}); err != nil && !errors.IsNotFound(err) {

View File

@@ -7,6 +7,7 @@ import (
"k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/api/meta"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/sets"
operatorapiv1 "open-cluster-management.io/api/operator/v1"
@@ -38,8 +39,12 @@ func (r *namespaceReconcile) reconcile(
if err != nil {
return klusterlet, reconcileStop, err
}
skippedNamespaces := sets.New[string](config.KlusterletNamespace)
if !config.DisableAddonNamespace {
skippedNamespaces.Insert(helpers.DefaultAddonNamespace)
}
for _, ns := range namespaces.Items {
if ns.Name == config.KlusterletNamespace || ns.Name == helpers.DefaultAddonNamespace {
if skippedNamespaces.Has(ns.Name) {
continue
}
if err := r.managedClusterClients.kubeClient.CoreV1().Namespaces().Delete(

View File

@@ -28,6 +28,7 @@ type Options struct {
SkipPlaceholderHubSecret bool
ControlPlaneNodeLabelSelector string
DeploymentReplicas int32
DisableAddonNamespace bool
}
// RunKlusterletOperator starts a new klusterlet operator
@@ -100,8 +101,8 @@ func (o *Options) RunKlusterletOperator(ctx context.Context, controllerContext *
helpers.GetOperatorNamespace(),
o.ControlPlaneNodeLabelSelector,
o.DeploymentReplicas,
controllerContext.EventRecorder,
o.SkipPlaceholderHubSecret)
o.DisableAddonNamespace,
controllerContext.EventRecorder)
klusterletCleanupController := klusterletcontroller.NewKlusterletCleanupController(
kubeClient,
@@ -115,6 +116,7 @@ func (o *Options) RunKlusterletOperator(ctx context.Context, controllerContext *
helpers.GetOperatorNamespace(),
o.ControlPlaneNodeLabelSelector,
o.DeploymentReplicas,
o.DisableAddonNamespace,
controllerContext.EventRecorder)
ssarController := ssarcontroller.NewKlusterletSSARController(