mirror of
https://github.com/open-cluster-management-io/ocm.git
synced 2026-02-14 18:09:57 +00:00
Fix: reduce cluster rbac when accpet to false. (#675)
Signed-off-by: xuezhaojun <zxue@redhat.com>
This commit is contained in:
@@ -76,6 +76,24 @@ func ManagedClusterAssetFn(fs embed.FS, managedClusterName string) resourceapply
|
||||
}
|
||||
}
|
||||
|
||||
func ManagedClusterAssetFnWithAccepted(fs embed.FS, managedClusterName string, accepted bool) resourceapply.AssetFunc {
|
||||
return func(name string) ([]byte, error) {
|
||||
config := struct {
|
||||
ManagedClusterName string
|
||||
Accepted bool
|
||||
}{
|
||||
ManagedClusterName: managedClusterName,
|
||||
Accepted: accepted,
|
||||
}
|
||||
|
||||
template, err := fs.ReadFile(name)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return assets.MustCreateAssetFromTemplate(name, template, config).Data, nil
|
||||
}
|
||||
}
|
||||
|
||||
// FindTaintByKey returns a taint if the managed cluster has a taint with the given key.
|
||||
func FindTaintByKey(managedCluster *clusterv1.ManagedCluster, key string) *clusterv1.Taint {
|
||||
if managedCluster == nil {
|
||||
|
||||
@@ -91,7 +91,7 @@ func (r *gcClusterRbacController) reconcile(ctx context.Context, cluster *cluste
|
||||
return gcReconcileStop, err
|
||||
}
|
||||
|
||||
if err := r.removeClusterRbac(ctx, cluster.Name); err != nil {
|
||||
if err := r.removeClusterRbac(ctx, cluster.Name, cluster.Spec.HubAcceptsClient); err != nil {
|
||||
return gcReconcileContinue, err
|
||||
}
|
||||
|
||||
@@ -124,12 +124,12 @@ func (r *gcClusterRbacController) reconcile(ctx context.Context, cluster *cluste
|
||||
return gcReconcileContinue, r.clusterPatcher.RemoveFinalizer(ctx, cluster, clusterv1.ManagedClusterFinalizer)
|
||||
}
|
||||
|
||||
func (r *gcClusterRbacController) removeClusterRbac(ctx context.Context, clusterName string) error {
|
||||
func (r *gcClusterRbacController) removeClusterRbac(ctx context.Context, clusterName string, accepted bool) error {
|
||||
var errs []error
|
||||
// Clean up managed cluster manifests
|
||||
assetFn := helpers.ManagedClusterAssetFn(manifests.RBACManifests, clusterName)
|
||||
assetFn := helpers.ManagedClusterAssetFnWithAccepted(manifests.RBACManifests, clusterName, accepted)
|
||||
resourceResults := resourceapply.DeleteAll(ctx, resourceapply.NewKubeClientHolder(r.kubeClient),
|
||||
r.eventRecorder, assetFn, manifests.ClusterSpecificRBACFiles...)
|
||||
r.eventRecorder, assetFn, append(manifests.ClusterSpecificRBACFiles, manifests.ClusterSpecificRoleBindings...)...)
|
||||
for _, result := range resourceResults {
|
||||
if result.Error != nil {
|
||||
errs = append(errs, fmt.Errorf("%q (%T): %v", result.File, result.Type, result.Error))
|
||||
|
||||
@@ -126,8 +126,33 @@ func (c *managedClusterController) sync(ctx context.Context, syncCtx factory.Syn
|
||||
// Hub cluster-admin denies the current spoke cluster, we remove its related resources and update its condition.
|
||||
c.eventRecorder.Eventf("ManagedClusterDenied", "managed cluster %s is denied by hub cluster admin", managedClusterName)
|
||||
|
||||
if err := c.removeManagedClusterResources(ctx, managedClusterName); err != nil {
|
||||
return err
|
||||
// Apply(Update) the cluster specific rbac resources for this spoke cluster with hubAcceptsClient=false.
|
||||
var errs []error
|
||||
applyResults := c.applier.Apply(ctx, syncCtx.Recorder(),
|
||||
helpers.ManagedClusterAssetFnWithAccepted(manifests.RBACManifests, managedClusterName, managedCluster.Spec.HubAcceptsClient),
|
||||
manifests.ClusterSpecificRBACFiles...)
|
||||
for _, result := range applyResults {
|
||||
if result.Error != nil {
|
||||
errs = append(errs, result.Error)
|
||||
}
|
||||
}
|
||||
if aggErr := operatorhelpers.NewMultiLineAggregate(errs); aggErr != nil {
|
||||
return aggErr
|
||||
}
|
||||
|
||||
// Remove the cluster role binding files for registration-agent and work-agent.
|
||||
removeResults := resourceapply.DeleteAll(ctx,
|
||||
resourceapply.NewKubeClientHolder(c.kubeClient),
|
||||
c.eventRecorder,
|
||||
helpers.ManagedClusterAssetFn(manifests.RBACManifests, managedClusterName),
|
||||
manifests.ClusterSpecificRoleBindings...)
|
||||
for _, result := range removeResults {
|
||||
if result.Error != nil {
|
||||
errs = append(errs, fmt.Errorf("%q (%T): %v", result.File, result.Type, result.Error))
|
||||
}
|
||||
}
|
||||
if aggErr := operatorhelpers.NewMultiLineAggregate(errs); aggErr != nil {
|
||||
return aggErr
|
||||
}
|
||||
|
||||
if err = c.approver.Cleanup(ctx, managedCluster); err != nil {
|
||||
@@ -159,25 +184,22 @@ func (c *managedClusterController) sync(ctx context.Context, syncCtx factory.Syn
|
||||
},
|
||||
}
|
||||
|
||||
// Hub cluster-admin accepts the spoke cluster, we apply
|
||||
// 1. namespace for this spoke cluster.
|
||||
// 2. cluster specific rbac resources for this spoke cluster.(hubAcceptsClient=true)
|
||||
// 3. cluster specific rolebinding(registration-agent and work-agent) for this spoke cluster.
|
||||
var errs []error
|
||||
_, _, err = resourceapply.ApplyNamespace(ctx, c.kubeClient.CoreV1(), syncCtx.Recorder(), namespace)
|
||||
if err != nil {
|
||||
errs = append(errs, err)
|
||||
}
|
||||
|
||||
// Hub cluster-admin accepts the spoke cluster, we apply
|
||||
// 1. clusterrole and clusterrolebinding for this spoke cluster.
|
||||
// 2. namespace for this spoke cluster.
|
||||
// 3. role and rolebinding for this spoke cluster on its namespace.
|
||||
resourceResults := c.applier.Apply(
|
||||
ctx,
|
||||
syncCtx.Recorder(),
|
||||
helpers.ManagedClusterAssetFn(manifests.RBACManifests, managedClusterName),
|
||||
manifests.ClusterSpecificRBACFiles...,
|
||||
)
|
||||
resourceResults := c.applier.Apply(ctx, syncCtx.Recorder(),
|
||||
helpers.ManagedClusterAssetFnWithAccepted(manifests.RBACManifests, managedClusterName, managedCluster.Spec.HubAcceptsClient),
|
||||
append(manifests.ClusterSpecificRBACFiles, manifests.ClusterSpecificRoleBindings...)...)
|
||||
for _, result := range resourceResults {
|
||||
if result.Error != nil {
|
||||
errs = append(errs, fmt.Errorf("%q (%T): %v", result.File, result.Type, result.Error))
|
||||
errs = append(errs, result.Error)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -207,23 +229,6 @@ func (c *managedClusterController) sync(ctx context.Context, syncCtx factory.Syn
|
||||
return operatorhelpers.NewMultiLineAggregate(errs)
|
||||
}
|
||||
|
||||
func (c *managedClusterController) removeManagedClusterResources(ctx context.Context, managedClusterName string) error {
|
||||
var errs []error
|
||||
// Clean up managed cluster manifests
|
||||
assetFn := helpers.ManagedClusterAssetFn(manifests.RBACManifests, managedClusterName)
|
||||
resourceResults := resourceapply.DeleteAll(ctx,
|
||||
resourceapply.NewKubeClientHolder(c.kubeClient),
|
||||
c.eventRecorder,
|
||||
assetFn,
|
||||
manifests.ClusterSpecificRBACFiles...)
|
||||
for _, result := range resourceResults {
|
||||
if result.Error != nil {
|
||||
errs = append(errs, fmt.Errorf("%q (%T): %v", result.File, result.Type, result.Error))
|
||||
}
|
||||
}
|
||||
return operatorhelpers.NewMultiLineAggregate(errs)
|
||||
}
|
||||
|
||||
func (c *managedClusterController) acceptCluster(ctx context.Context, managedCluster *v1.ManagedCluster) error {
|
||||
acceptedTime := time.Now()
|
||||
|
||||
|
||||
@@ -29,29 +29,36 @@ import (
|
||||
|
||||
func TestSyncManagedCluster(t *testing.T) {
|
||||
cases := []struct {
|
||||
name string
|
||||
autoApprovalEnabled bool
|
||||
startingObjects []runtime.Object
|
||||
validateActions func(t *testing.T, actions []clienttesting.Action)
|
||||
name string
|
||||
autoApprovalEnabled bool
|
||||
startingObjects []runtime.Object
|
||||
validateClusterActions func(t *testing.T, actions []clienttesting.Action)
|
||||
validateKubeActions func(t *testing.T, actions []clienttesting.Action)
|
||||
}{
|
||||
{
|
||||
name: "sync a deleted spoke cluster",
|
||||
startingObjects: []runtime.Object{},
|
||||
validateActions: func(t *testing.T, actions []clienttesting.Action) {
|
||||
validateClusterActions: func(t *testing.T, actions []clienttesting.Action) {
|
||||
testingcommon.AssertNoActions(t, actions)
|
||||
},
|
||||
validateKubeActions: func(t *testing.T, actions []clienttesting.Action) {
|
||||
testingcommon.AssertNoActions(t, actions)
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "create a new spoke cluster",
|
||||
name: "create a new spoke cluster(not accepted before, no accept condition)",
|
||||
startingObjects: []runtime.Object{testinghelpers.NewManagedCluster()},
|
||||
validateActions: func(t *testing.T, actions []clienttesting.Action) {
|
||||
validateClusterActions: func(t *testing.T, actions []clienttesting.Action) {
|
||||
testingcommon.AssertNoActions(t, actions)
|
||||
},
|
||||
validateKubeActions: func(t *testing.T, actions []clienttesting.Action) {
|
||||
testingcommon.AssertNoActions(t, actions)
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "accept a spoke cluster",
|
||||
startingObjects: []runtime.Object{testinghelpers.NewAcceptingManagedCluster()},
|
||||
validateActions: func(t *testing.T, actions []clienttesting.Action) {
|
||||
validateClusterActions: func(t *testing.T, actions []clienttesting.Action) {
|
||||
expectedCondition := metav1.Condition{
|
||||
Type: v1.ManagedClusterConditionHubAccepted,
|
||||
Status: metav1.ConditionTrue,
|
||||
@@ -67,18 +74,34 @@ func TestSyncManagedCluster(t *testing.T) {
|
||||
}
|
||||
testingcommon.AssertCondition(t, managedCluster.Status.Conditions, expectedCondition)
|
||||
},
|
||||
validateKubeActions: func(t *testing.T, actions []clienttesting.Action) {
|
||||
testingcommon.AssertActions(t, actions,
|
||||
"get", "create", // namespace
|
||||
"create", // clusterrole
|
||||
"create", // clusterrolebinding
|
||||
"create", // registration rolebinding
|
||||
"create") // work rolebinding
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "sync an accepted spoke cluster",
|
||||
startingObjects: []runtime.Object{testinghelpers.NewAcceptedManagedCluster()},
|
||||
validateActions: func(t *testing.T, actions []clienttesting.Action) {
|
||||
validateClusterActions: func(t *testing.T, actions []clienttesting.Action) {
|
||||
testingcommon.AssertNoActions(t, actions)
|
||||
},
|
||||
validateKubeActions: func(t *testing.T, actions []clienttesting.Action) {
|
||||
testingcommon.AssertActions(t, actions,
|
||||
"get", "create", // namespace
|
||||
"create", // clusterrole
|
||||
"create", // clusterrolebinding
|
||||
"create", // registration rolebinding
|
||||
"create") // work rolebinding
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "deny an accepted spoke cluster",
|
||||
startingObjects: []runtime.Object{testinghelpers.NewDeniedManagedCluster("True")},
|
||||
validateActions: func(t *testing.T, actions []clienttesting.Action) {
|
||||
validateClusterActions: func(t *testing.T, actions []clienttesting.Action) {
|
||||
expectedCondition := metav1.Condition{
|
||||
Type: v1.ManagedClusterConditionHubAccepted,
|
||||
Status: metav1.ConditionFalse,
|
||||
@@ -94,11 +117,21 @@ func TestSyncManagedCluster(t *testing.T) {
|
||||
}
|
||||
testingcommon.AssertCondition(t, managedCluster.Status.Conditions, expectedCondition)
|
||||
},
|
||||
validateKubeActions: func(t *testing.T, actions []clienttesting.Action) {
|
||||
testingcommon.AssertActions(t, actions,
|
||||
"create", // clusterrole
|
||||
"create", // clusterrolebinding
|
||||
"delete", // registration rolebinding
|
||||
"delete") // work rolebinding
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "delete a spoke cluster",
|
||||
startingObjects: []runtime.Object{testinghelpers.NewDeletingManagedCluster()},
|
||||
validateActions: func(t *testing.T, actions []clienttesting.Action) {
|
||||
validateClusterActions: func(t *testing.T, actions []clienttesting.Action) {
|
||||
testingcommon.AssertNoActions(t, actions)
|
||||
},
|
||||
validateKubeActions: func(t *testing.T, actions []clienttesting.Action) {
|
||||
testingcommon.AssertNoActions(t, actions)
|
||||
},
|
||||
},
|
||||
@@ -106,7 +139,7 @@ func TestSyncManagedCluster(t *testing.T) {
|
||||
name: "should accept the clusters when auto approval is enabled",
|
||||
autoApprovalEnabled: true,
|
||||
startingObjects: []runtime.Object{testinghelpers.NewManagedCluster()},
|
||||
validateActions: func(t *testing.T, actions []clienttesting.Action) {
|
||||
validateClusterActions: func(t *testing.T, actions []clienttesting.Action) {
|
||||
testingcommon.AssertActions(t, actions, "patch")
|
||||
},
|
||||
},
|
||||
@@ -114,7 +147,7 @@ func TestSyncManagedCluster(t *testing.T) {
|
||||
name: "should add the auto approval annotation to an accepted cluster when auto approval is enabled",
|
||||
autoApprovalEnabled: true,
|
||||
startingObjects: []runtime.Object{testinghelpers.NewAcceptedManagedCluster()},
|
||||
validateActions: func(t *testing.T, actions []clienttesting.Action) {
|
||||
validateClusterActions: func(t *testing.T, actions []clienttesting.Action) {
|
||||
testingcommon.AssertActions(t, actions, "patch")
|
||||
patch := actions[0].(clienttesting.PatchAction).GetPatch()
|
||||
managedCluster := &v1.ManagedCluster{}
|
||||
@@ -165,7 +198,10 @@ func TestSyncManagedCluster(t *testing.T) {
|
||||
t.Errorf("unexpected err: %v", syncErr)
|
||||
}
|
||||
|
||||
c.validateActions(t, clusterClient.Actions())
|
||||
c.validateClusterActions(t, clusterClient.Actions())
|
||||
if c.validateKubeActions != nil {
|
||||
c.validateKubeActions(t, kubeClient.Actions())
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
@@ -6,10 +6,17 @@ import "embed"
|
||||
var RBACManifests embed.FS
|
||||
|
||||
// ClusterSpecificRBACFiles are cluster-specific RBAC manifests.
|
||||
// Created when a managed cluster is accepted and removed when a managed cluster is removed or not accepted.
|
||||
// Created when a managed cluster is created.
|
||||
// Updated according to the managed cluster's spec.hubAcceptsClient.
|
||||
// Removed when a managed cluster is removed.
|
||||
var ClusterSpecificRBACFiles = []string{
|
||||
"rbac/managedcluster-clusterrole.yaml",
|
||||
"rbac/managedcluster-clusterrolebinding.yaml",
|
||||
}
|
||||
|
||||
// ClusterSpecificRoleBindings are also cluster-specific rolebindings.
|
||||
// Created when a managed cluster is accepted and removed when a managed cluster is removed or not accepted.
|
||||
var ClusterSpecificRoleBindings = []string{
|
||||
"rbac/managedcluster-registration-rolebinding.yaml",
|
||||
"rbac/managedcluster-work-rolebinding.yaml",
|
||||
}
|
||||
|
||||
@@ -5,6 +5,7 @@ metadata:
|
||||
labels:
|
||||
open-cluster-management.io/cluster-name: "{{ .ManagedClusterName }}"
|
||||
rules:
|
||||
{{- if .Accepted }}
|
||||
# Allow agent to rotate its certificate
|
||||
- apiGroups: ["certificates.k8s.io"]
|
||||
resources: ["certificatesigningrequests"]
|
||||
@@ -22,3 +23,10 @@ rules:
|
||||
resources: ["managedclusters/status"]
|
||||
resourceNames: ["{{ .ManagedClusterName }}"]
|
||||
verbs: ["patch", "update"]
|
||||
{{- else }}
|
||||
# Only allow agent to get/list/watch its owner managed cluster
|
||||
- apiGroups: ["cluster.open-cluster-management.io"]
|
||||
resources: ["managedclusters"]
|
||||
resourceNames: ["{{ .ManagedClusterName }}"]
|
||||
verbs: ["get", "list", "watch"]
|
||||
{{- end }}
|
||||
|
||||
148
test/integration/registration/managedcluster_accept_test.go
Normal file
148
test/integration/registration/managedcluster_accept_test.go
Normal file
@@ -0,0 +1,148 @@
|
||||
package registration_test
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"reflect"
|
||||
|
||||
. "github.com/onsi/ginkgo/v2"
|
||||
. "github.com/onsi/gomega"
|
||||
rbacv1 "k8s.io/api/rbac/v1"
|
||||
"k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/util/rand"
|
||||
|
||||
clusterv1 "open-cluster-management.io/api/cluster/v1"
|
||||
)
|
||||
|
||||
var _ = Describe("ManagedCluster set hubAcceptsClient from true to false", Label("managedcluster-accept"), func() {
|
||||
var managedCluster *clusterv1.ManagedCluster
|
||||
BeforeEach(func() {
|
||||
managedClusterName := fmt.Sprintf("managedcluster-%s", rand.String(6))
|
||||
managedCluster = &clusterv1.ManagedCluster{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: managedClusterName,
|
||||
},
|
||||
Spec: clusterv1.ManagedClusterSpec{
|
||||
HubAcceptsClient: true,
|
||||
},
|
||||
}
|
||||
_, err := clusterClient.ClusterV1().ManagedClusters().Create(context.Background(), managedCluster, metav1.CreateOptions{})
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
// Check rbac files should be created
|
||||
Eventually(func() error {
|
||||
_, err := kubeClient.RbacV1().ClusterRoles().Get(context.Background(), mclClusterRoleName(managedCluster.Name), metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// check clusterrole is correct
|
||||
clusterRole, err := kubeClient.RbacV1().ClusterRoles().Get(context.Background(), mclClusterRoleName(managedCluster.Name), metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if len(clusterRole.Rules) != 4 {
|
||||
return fmt.Errorf("expected 4 rules, got %d rules", len(clusterRole.Rules))
|
||||
}
|
||||
|
||||
return nil
|
||||
}, eventuallyTimeout, eventuallyInterval).Should(Succeed())
|
||||
|
||||
Eventually(func() error {
|
||||
_, err := kubeClient.RbacV1().ClusterRoleBindings().Get(context.Background(), mclClusterRoleBindingName(managedCluster.Name), metav1.GetOptions{})
|
||||
return err
|
||||
}, eventuallyTimeout, eventuallyInterval).Should(Succeed())
|
||||
|
||||
Eventually(func() error {
|
||||
_, err := kubeClient.RbacV1().RoleBindings(managedCluster.Name).
|
||||
Get(context.Background(), registrationRoleBindingName(managedCluster.Name), metav1.GetOptions{})
|
||||
return err
|
||||
}, eventuallyTimeout, eventuallyInterval).Should(Succeed())
|
||||
|
||||
Eventually(func() error {
|
||||
_, err := kubeClient.RbacV1().RoleBindings(managedCluster.Name).Get(context.Background(), workRoleBindingName(managedCluster.Name), metav1.GetOptions{})
|
||||
return err
|
||||
}, eventuallyTimeout, eventuallyInterval).Should(Succeed())
|
||||
})
|
||||
|
||||
It("should set hubAcceptsClient to false", func() {
|
||||
// Set hubAcceptsClient to false
|
||||
Eventually(func() error {
|
||||
mc, err := clusterClient.ClusterV1().ManagedClusters().Get(context.Background(), managedCluster.Name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
mc.Spec.HubAcceptsClient = false
|
||||
_, err = clusterClient.ClusterV1().ManagedClusters().Update(context.Background(), mc, metav1.UpdateOptions{})
|
||||
return err
|
||||
}, eventuallyTimeout, eventuallyInterval).Should(Succeed())
|
||||
|
||||
// Check rbac files:
|
||||
// * clusterrole should remain, but with a different scope
|
||||
// * clusterrolebinding should remain
|
||||
// * registrationRolebinding should be deleted
|
||||
// * workRolebinding should be deleted
|
||||
Eventually(func() error {
|
||||
// Check clusterrole is updated with reduced permissions
|
||||
clusterRole, err := kubeClient.RbacV1().ClusterRoles().Get(context.Background(), mclClusterRoleName(managedCluster.Name), metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Should only have rule for get/list/watch managedcluster
|
||||
if len(clusterRole.Rules) != 1 {
|
||||
return fmt.Errorf("expected 1 rule, got %d rules", len(clusterRole.Rules))
|
||||
}
|
||||
|
||||
rule := clusterRole.Rules[0]
|
||||
expectedRule := rbacv1.PolicyRule{
|
||||
APIGroups: []string{"cluster.open-cluster-management.io"},
|
||||
Resources: []string{"managedclusters"},
|
||||
ResourceNames: []string{managedCluster.Name},
|
||||
Verbs: []string{"get", "list", "watch"},
|
||||
}
|
||||
if !reflect.DeepEqual(rule, expectedRule) {
|
||||
return fmt.Errorf("cluster role %s does not have expected rule", mclClusterRoleName(managedCluster.Name))
|
||||
}
|
||||
|
||||
return nil
|
||||
}, eventuallyTimeout, eventuallyInterval).Should(Succeed())
|
||||
|
||||
Eventually(func() error {
|
||||
// ClusterRoleBinding should still exist
|
||||
_, err := kubeClient.RbacV1().ClusterRoleBindings().Get(context.Background(), mclClusterRoleBindingName(managedCluster.Name), metav1.GetOptions{})
|
||||
return err
|
||||
}, eventuallyTimeout, eventuallyInterval).Should(Succeed())
|
||||
|
||||
Eventually(func() error {
|
||||
// Registration rolebinding should be deleted
|
||||
_, err := kubeClient.RbacV1().RoleBindings(managedCluster.Name).
|
||||
Get(context.Background(), registrationRoleBindingName(managedCluster.Name), metav1.GetOptions{})
|
||||
if err == nil {
|
||||
return fmt.Errorf("registration rolebinding should be deleted")
|
||||
}
|
||||
if !errors.IsNotFound(err) {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}, eventuallyTimeout, eventuallyInterval).Should(Succeed())
|
||||
|
||||
Eventually(func() error {
|
||||
// Work rolebinding should be deleted
|
||||
wrb, err := kubeClient.RbacV1().RoleBindings(managedCluster.Name).Get(context.Background(), workRoleBindingName(managedCluster.Name), metav1.GetOptions{})
|
||||
if err == nil {
|
||||
// Here we check DeletionTimestamp because there is finalizer "cluster.open-cluster-management.io/manifest-work-cleanup" on the rolebinding.
|
||||
if wrb.DeletionTimestamp.IsZero() {
|
||||
return fmt.Errorf("work rolebinding should be deleted")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
if !errors.IsNotFound(err) {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}, eventuallyTimeout, eventuallyInterval).Should(Succeed())
|
||||
|
||||
})
|
||||
})
|
||||
Reference in New Issue
Block a user