mirror of
https://github.com/open-cluster-management-io/ocm.git
synced 2026-05-11 03:39:26 +00:00
deleting managed cluster
This commit is contained in:
1
go.mod
1
go.mod
@@ -7,6 +7,7 @@ require (
|
||||
github.com/onsi/ginkgo v1.11.0
|
||||
github.com/onsi/gomega v1.8.1
|
||||
github.com/open-cluster-management/api v0.0.0-20200602195039-a516cac2e038
|
||||
github.com/openshift/api v0.0.0-20200326160804-ecb9283fe820
|
||||
github.com/openshift/build-machinery-go v0.0.0-20200424080330-082bf86082cc
|
||||
github.com/openshift/generic-admission-server v1.14.1-0.20200514123932-ccc9079d8bdb
|
||||
github.com/openshift/library-go v0.0.0-20200401114229-ffab8c6e83a9
|
||||
|
||||
@@ -2,17 +2,45 @@ package helpers
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"net/url"
|
||||
"path/filepath"
|
||||
"time"
|
||||
|
||||
clusterclientset "github.com/open-cluster-management/api/client/cluster/clientset/versioned"
|
||||
clusterv1 "github.com/open-cluster-management/api/cluster/v1"
|
||||
"github.com/open-cluster-management/registration/pkg/hub/managedcluster/bindata"
|
||||
|
||||
"github.com/openshift/api"
|
||||
"github.com/openshift/library-go/pkg/assets"
|
||||
"github.com/openshift/library-go/pkg/operator/events"
|
||||
"github.com/openshift/library-go/pkg/operator/resource/resourceapply"
|
||||
"github.com/openshift/library-go/pkg/operator/resource/resourcehelper"
|
||||
errorhelpers "github.com/openshift/library-go/pkg/operator/v1helpers"
|
||||
|
||||
certificatesv1beta1 "k8s.io/api/certificates/v1beta1"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
rbacv1 "k8s.io/api/rbac/v1"
|
||||
"k8s.io/apimachinery/pkg/api/equality"
|
||||
"k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/runtime/serializer"
|
||||
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
"k8s.io/client-go/util/retry"
|
||||
)
|
||||
|
||||
var (
|
||||
genericScheme = runtime.NewScheme()
|
||||
genericCodecs = serializer.NewCodecFactory(genericScheme)
|
||||
genericCodec = genericCodecs.UniversalDeserializer()
|
||||
)
|
||||
|
||||
func init() {
|
||||
utilruntime.Must(api.InstallKube(genericScheme))
|
||||
}
|
||||
|
||||
func IsConditionTrue(condition *clusterv1.StatusCondition) bool {
|
||||
if condition == nil {
|
||||
return false
|
||||
@@ -128,3 +156,149 @@ func IsValidHTTPSURL(serverURL string) bool {
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
// CleanUpManagedClusterManifests clean up managed cluster resources from its manifest files
|
||||
func CleanUpManagedClusterManifests(
|
||||
ctx context.Context,
|
||||
client kubernetes.Interface,
|
||||
recorder events.Recorder,
|
||||
assetFunc resourceapply.AssetFunc,
|
||||
files ...string) error {
|
||||
errs := []error{}
|
||||
for _, file := range files {
|
||||
objectRaw, err := assetFunc(file)
|
||||
if err != nil {
|
||||
errs = append(errs, err)
|
||||
continue
|
||||
}
|
||||
object, _, err := genericCodec.Decode(objectRaw, nil, nil)
|
||||
if err != nil {
|
||||
errs = append(errs, err)
|
||||
continue
|
||||
}
|
||||
switch t := object.(type) {
|
||||
case *corev1.Namespace:
|
||||
err = client.CoreV1().Namespaces().Delete(ctx, t.Name, metav1.DeleteOptions{})
|
||||
case *rbacv1.Role:
|
||||
err = client.RbacV1().Roles(t.Namespace).Delete(ctx, t.Name, metav1.DeleteOptions{})
|
||||
case *rbacv1.RoleBinding:
|
||||
err = client.RbacV1().RoleBindings(t.Namespace).Delete(ctx, t.Name, metav1.DeleteOptions{})
|
||||
case *rbacv1.ClusterRole:
|
||||
err = client.RbacV1().ClusterRoles().Delete(ctx, t.Name, metav1.DeleteOptions{})
|
||||
case *rbacv1.ClusterRoleBinding:
|
||||
err = client.RbacV1().ClusterRoleBindings().Delete(ctx, t.Name, metav1.DeleteOptions{})
|
||||
default:
|
||||
err = fmt.Errorf("unhandled type %T", object)
|
||||
}
|
||||
if errors.IsNotFound(err) {
|
||||
continue
|
||||
}
|
||||
if err != nil {
|
||||
errs = append(errs, err)
|
||||
continue
|
||||
}
|
||||
gvk := resourcehelper.GuessObjectGroupVersionKind(object)
|
||||
recorder.Eventf(fmt.Sprintf("ManagedCluster%sDeleted", gvk.Kind), "Deleted %s", resourcehelper.FormatResourceForCLIWithNamespace(object))
|
||||
}
|
||||
return errorhelpers.NewMultiLineAggregate(errs)
|
||||
}
|
||||
|
||||
// CleanUpGroupFromClusterRoleBindings search all clusterrolebings for managed cluster group and remove the subject entry
|
||||
// or delete the clusterrolebinding if it's the only subject.
|
||||
func CleanUpGroupFromClusterRoleBindings(
|
||||
ctx context.Context,
|
||||
client kubernetes.Interface,
|
||||
recorder events.Recorder,
|
||||
managedClusterGroup string) error {
|
||||
clusterRoleBindings, err := client.RbacV1().ClusterRoleBindings().List(ctx, metav1.ListOptions{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for i := range clusterRoleBindings.Items {
|
||||
clusterRoleBinding := clusterRoleBindings.Items[i]
|
||||
subjects := clusterRoleBinding.Subjects
|
||||
newSubjects := []rbacv1.Subject{}
|
||||
for _, subject := range subjects {
|
||||
if subject.Kind == "Group" && subject.Name == managedClusterGroup {
|
||||
continue
|
||||
}
|
||||
newSubjects = append(newSubjects, subject)
|
||||
}
|
||||
// no other subjects, remove this clusterrolebinding
|
||||
if len(newSubjects) == 0 {
|
||||
err := client.RbacV1().ClusterRoleBindings().Delete(ctx, clusterRoleBinding.Name, metav1.DeleteOptions{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
recorder.Eventf("ClusterRoleBindingDeleted", fmt.Sprintf("Deleted ClusterRoleBinding %q", clusterRoleBinding.Name))
|
||||
continue
|
||||
}
|
||||
// there are other subjects, only remove the cluster managed group
|
||||
if len(newSubjects) != len(subjects) {
|
||||
clusterRoleBinding.Subjects = newSubjects
|
||||
_, err := client.RbacV1().ClusterRoleBindings().Update(ctx, &clusterRoleBinding, metav1.UpdateOptions{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
recorder.Eventf("ClusterRoleBindingUpdated", fmt.Sprintf("Updated ClusterRoleBinding %q", clusterRoleBinding.Name))
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// CleanUpGroupFromRoleBindings search all rolebings for managed cluster group and remove the subject entry
|
||||
// or delete the rolebinding if it's the only subject.
|
||||
func CleanUpGroupFromRoleBindings(
|
||||
ctx context.Context,
|
||||
client kubernetes.Interface,
|
||||
recorder events.Recorder,
|
||||
managedClusterGroup string) error {
|
||||
roleBindings, err := client.RbacV1().RoleBindings(metav1.NamespaceAll).List(ctx, metav1.ListOptions{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for i := range roleBindings.Items {
|
||||
roleBinding := roleBindings.Items[i]
|
||||
subjects := roleBinding.Subjects
|
||||
newSubjects := []rbacv1.Subject{}
|
||||
for _, subject := range subjects {
|
||||
if subject.Kind == "Group" && subject.Name == managedClusterGroup {
|
||||
continue
|
||||
}
|
||||
newSubjects = append(newSubjects, subject)
|
||||
}
|
||||
// no other subjects, remove this rolebinding
|
||||
if len(newSubjects) == 0 {
|
||||
err := client.RbacV1().RoleBindings(roleBinding.Namespace).Delete(ctx, roleBinding.Name, metav1.DeleteOptions{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
recorder.Eventf("RoleBindingDeleted", fmt.Sprintf("Deleted RoleBinding %q/%q", roleBinding.Namespace, roleBinding.Name))
|
||||
continue
|
||||
}
|
||||
// there are other subjects, only remove the cluster managed group
|
||||
if len(newSubjects) != len(subjects) {
|
||||
roleBinding.Subjects = newSubjects
|
||||
_, err := client.RbacV1().RoleBindings(roleBinding.Namespace).Update(ctx, &roleBinding, metav1.UpdateOptions{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
recorder.Eventf("RoleBindingUpdated", fmt.Sprintf("Updated RoleBinding %q/%q", roleBinding.Namespace, roleBinding.Name))
|
||||
continue
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func ManagedClusterAssetFn(manifestDir, managedClusterName string) resourceapply.AssetFunc {
|
||||
return func(name string) ([]byte, error) {
|
||||
config := struct {
|
||||
ManagedClusterName string
|
||||
}{
|
||||
ManagedClusterName: managedClusterName,
|
||||
}
|
||||
return assets.MustCreateAssetFromTemplate(name, bindata.MustAsset(filepath.Join(manifestDir, name)), config).Data, nil
|
||||
}
|
||||
}
|
||||
|
||||
@@ -2,16 +2,29 @@ package helpers
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"reflect"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
clusterfake "github.com/open-cluster-management/api/client/cluster/clientset/versioned/fake"
|
||||
clusterv1 "github.com/open-cluster-management/api/cluster/v1"
|
||||
"github.com/openshift/library-go/pkg/operator/events/eventstesting"
|
||||
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
rbacv1 "k8s.io/api/rbac/v1"
|
||||
"k8s.io/apimachinery/pkg/api/equality"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/util/diff"
|
||||
fakekube "k8s.io/client-go/kubernetes/fake"
|
||||
clienttesting "k8s.io/client-go/testing"
|
||||
)
|
||||
|
||||
const testManagedClusterGroup = "system:open-cluster-management:testgroup"
|
||||
|
||||
func TestUpdateStatusCondition(t *testing.T) {
|
||||
nowish := metav1.Now()
|
||||
beforeish := metav1.Time{Time: nowish.Add(-10 * time.Second)}
|
||||
@@ -144,6 +157,226 @@ func TestIsValidHTTPSURL(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestCleanUpManagedClusterManifests(t *testing.T) {
|
||||
applyFiles := map[string]runtime.Object{
|
||||
"namespace": newUnstructured("v1", "Namespace", "", "n1"),
|
||||
"clusterrole": newUnstructured("rbac.authorization.k8s.io/v1", "ClusterRole", "", "cr1"),
|
||||
"clusterrolebinding": newUnstructured("rbac.authorization.k8s.io/v1", "ClusterRoleBinding", "", "crb1"),
|
||||
"role": newUnstructured("rbac.authorization.k8s.io/v1", "Role", "n1", "r1"),
|
||||
"rolebinding": newUnstructured("rbac.authorization.k8s.io/v1", "RoleBinding", "n1", "rb1"),
|
||||
}
|
||||
cases := []struct {
|
||||
name string
|
||||
applyObject []runtime.Object
|
||||
applyFiles map[string]runtime.Object
|
||||
validateActions func(t *testing.T, actions []clienttesting.Action)
|
||||
expectedErr string
|
||||
}{
|
||||
{
|
||||
name: "delete applied objects",
|
||||
applyObject: []runtime.Object{
|
||||
&corev1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: "n1"}},
|
||||
&rbacv1.ClusterRole{ObjectMeta: metav1.ObjectMeta{Name: "cr1"}},
|
||||
&rbacv1.ClusterRoleBinding{ObjectMeta: metav1.ObjectMeta{Name: "crb1"}},
|
||||
&rbacv1.Role{ObjectMeta: metav1.ObjectMeta{Name: "r1", Namespace: "n1"}},
|
||||
&rbacv1.RoleBinding{ObjectMeta: metav1.ObjectMeta{Name: "rb1", Namespace: "n1"}},
|
||||
},
|
||||
applyFiles: applyFiles,
|
||||
validateActions: func(t *testing.T, actions []clienttesting.Action) {
|
||||
assertDeleteActions(t, len(applyFiles), actions)
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "there are no applied objects",
|
||||
applyObject: []runtime.Object{},
|
||||
applyFiles: applyFiles,
|
||||
validateActions: func(t *testing.T, actions []clienttesting.Action) {
|
||||
assertDeleteActions(t, len(applyFiles), actions)
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "unhandled types",
|
||||
applyObject: []runtime.Object{},
|
||||
applyFiles: map[string]runtime.Object{"secret": newUnstructured("v1", "Secret", "n1", "s1")},
|
||||
expectedErr: "unhandled type *v1.Secret",
|
||||
validateActions: func(t *testing.T, actions []clienttesting.Action) {
|
||||
if len(actions) != 0 {
|
||||
t.Errorf("expected no actions, but %v", actions)
|
||||
}
|
||||
},
|
||||
},
|
||||
}
|
||||
for _, c := range cases {
|
||||
t.Run(c.name, func(t *testing.T) {
|
||||
kubeClient := fakekube.NewSimpleClientset(c.applyObject...)
|
||||
cleanUpErr := CleanUpManagedClusterManifests(
|
||||
context.TODO(),
|
||||
kubeClient,
|
||||
eventstesting.NewTestingEventRecorder(t),
|
||||
func(name string) ([]byte, error) {
|
||||
if c.applyFiles[name] == nil {
|
||||
return nil, fmt.Errorf("Failed to find file")
|
||||
}
|
||||
return json.Marshal(c.applyFiles[name])
|
||||
},
|
||||
getApplyFileNames(c.applyFiles)...,
|
||||
)
|
||||
if len(c.expectedErr) > 0 && cleanUpErr == nil {
|
||||
t.Errorf("expected %q error", c.expectedErr)
|
||||
return
|
||||
}
|
||||
if len(c.expectedErr) > 0 && cleanUpErr != nil && cleanUpErr.Error() != c.expectedErr {
|
||||
t.Errorf("expected %q error, got %q", c.expectedErr, cleanUpErr.Error())
|
||||
return
|
||||
}
|
||||
if len(c.expectedErr) == 0 && cleanUpErr != nil {
|
||||
t.Errorf("unexpected err: %v", cleanUpErr)
|
||||
return
|
||||
}
|
||||
|
||||
c.validateActions(t, kubeClient.Actions())
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestCleanUpGroupFromClusterRoleBindings(t *testing.T) {
|
||||
cases := []struct {
|
||||
name string
|
||||
object []runtime.Object
|
||||
validateActions func(t *testing.T, actions []clienttesting.Action)
|
||||
}{
|
||||
{
|
||||
name: "clean up group from clusterrolebindings",
|
||||
object: []runtime.Object{
|
||||
&rbacv1.ClusterRoleBinding{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "crb1"},
|
||||
Subjects: []rbacv1.Subject{
|
||||
{Kind: "Group", Name: testManagedClusterGroup},
|
||||
},
|
||||
},
|
||||
&rbacv1.ClusterRoleBinding{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "crb2"},
|
||||
Subjects: []rbacv1.Subject{
|
||||
{Kind: "Group", Name: testManagedClusterGroup},
|
||||
{Kind: "Group", Name: "test"},
|
||||
{Kind: "User", Name: testManagedClusterGroup},
|
||||
},
|
||||
},
|
||||
&rbacv1.ClusterRoleBinding{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "crb3"},
|
||||
Subjects: []rbacv1.Subject{
|
||||
{Kind: "Group", Name: "test"},
|
||||
},
|
||||
},
|
||||
},
|
||||
validateActions: func(t *testing.T, actions []clienttesting.Action) {
|
||||
if len(actions) != 3 {
|
||||
t.Errorf("expected 3 actions, but %v", actions)
|
||||
}
|
||||
if actions[1].(clienttesting.DeleteActionImpl).Name != "crb1" {
|
||||
t.Errorf("expected to delete crb1, but %v", actions[1])
|
||||
}
|
||||
actual := (actions[2].(clienttesting.UpdateActionImpl).Object).(*rbacv1.ClusterRoleBinding)
|
||||
expected := []rbacv1.Subject{{Kind: "Group", Name: "test"}, {Kind: "User", Name: testManagedClusterGroup}}
|
||||
if !reflect.DeepEqual(actual.Subjects, expected) {
|
||||
t.Errorf("expected %v, but %v", expected, actual.Subjects)
|
||||
}
|
||||
},
|
||||
},
|
||||
}
|
||||
for _, c := range cases {
|
||||
t.Run(c.name, func(t *testing.T) {
|
||||
kubeClient := fakekube.NewSimpleClientset(c.object...)
|
||||
err := CleanUpGroupFromClusterRoleBindings(
|
||||
context.TODO(),
|
||||
kubeClient,
|
||||
eventstesting.NewTestingEventRecorder(t),
|
||||
testManagedClusterGroup,
|
||||
)
|
||||
if err != nil {
|
||||
t.Errorf("unexpected err: %v", err)
|
||||
return
|
||||
}
|
||||
c.validateActions(t, kubeClient.Actions())
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestCleanUpGroupFromRoleBindings(t *testing.T) {
|
||||
cases := []struct {
|
||||
name string
|
||||
object []runtime.Object
|
||||
validateActions func(t *testing.T, actions []clienttesting.Action)
|
||||
}{
|
||||
{
|
||||
name: "clean up group from rolebindings",
|
||||
object: []runtime.Object{
|
||||
&rbacv1.RoleBinding{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "rb1", Namespace: "n1"},
|
||||
Subjects: []rbacv1.Subject{
|
||||
{Kind: "Group", Name: testManagedClusterGroup},
|
||||
},
|
||||
},
|
||||
&rbacv1.RoleBinding{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "rb2", Namespace: "n1"},
|
||||
Subjects: []rbacv1.Subject{
|
||||
{Kind: "Group", Name: testManagedClusterGroup},
|
||||
{Kind: "Group", Name: "test"},
|
||||
{Kind: "User", Name: testManagedClusterGroup},
|
||||
},
|
||||
},
|
||||
&rbacv1.RoleBinding{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "rb3", Namespace: "n2"},
|
||||
Subjects: []rbacv1.Subject{
|
||||
{Kind: "Group", Name: "test"},
|
||||
},
|
||||
},
|
||||
},
|
||||
validateActions: func(t *testing.T, actions []clienttesting.Action) {
|
||||
if len(actions) != 3 {
|
||||
t.Errorf("expected 3 actions, but %v", actions)
|
||||
}
|
||||
if actions[1].(clienttesting.DeleteActionImpl).Name != "rb1" ||
|
||||
actions[1].(clienttesting.DeleteActionImpl).Namespace != "n1" {
|
||||
t.Errorf("expected to delete crb1, but %v", actions[1])
|
||||
}
|
||||
actual := (actions[2].(clienttesting.UpdateActionImpl).Object).(*rbacv1.RoleBinding)
|
||||
expected := []rbacv1.Subject{{Kind: "Group", Name: "test"}, {Kind: "User", Name: testManagedClusterGroup}}
|
||||
if !reflect.DeepEqual(actual.Subjects, expected) {
|
||||
t.Errorf("expected %v, but %v", expected, actual.Subjects)
|
||||
}
|
||||
},
|
||||
},
|
||||
}
|
||||
for _, c := range cases {
|
||||
t.Run(c.name, func(t *testing.T) {
|
||||
kubeClient := fakekube.NewSimpleClientset(c.object...)
|
||||
err := CleanUpGroupFromRoleBindings(
|
||||
context.TODO(),
|
||||
kubeClient,
|
||||
eventstesting.NewTestingEventRecorder(t),
|
||||
testManagedClusterGroup,
|
||||
)
|
||||
if err != nil {
|
||||
t.Errorf("unexpected err: %v", err)
|
||||
return
|
||||
}
|
||||
c.validateActions(t, kubeClient.Actions())
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func assertDeleteActions(t *testing.T, actionCounts int, actions []clienttesting.Action) {
|
||||
if len(actions) != actionCounts {
|
||||
t.Errorf("expected %d actions, but %v", actionCounts, actions)
|
||||
}
|
||||
for _, action := range actions {
|
||||
if action.GetVerb() != "delete" {
|
||||
t.Errorf("expected delete actions, but %v", action)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func newCondition(name, status, reason, message string, lastTransition *metav1.Time) clusterv1.StatusCondition {
|
||||
ret := clusterv1.StatusCondition{
|
||||
Type: name,
|
||||
@@ -156,3 +389,25 @@ func newCondition(name, status, reason, message string, lastTransition *metav1.T
|
||||
}
|
||||
return ret
|
||||
}
|
||||
|
||||
func newUnstructured(apiVersion, kind, namespace, name string) *unstructured.Unstructured {
|
||||
object := &unstructured.Unstructured{
|
||||
Object: map[string]interface{}{
|
||||
"apiVersion": apiVersion,
|
||||
"kind": kind,
|
||||
"metadata": map[string]interface{}{
|
||||
"namespace": namespace,
|
||||
"name": name,
|
||||
},
|
||||
},
|
||||
}
|
||||
return object
|
||||
}
|
||||
|
||||
func getApplyFileNames(applyFiles map[string]runtime.Object) []string {
|
||||
keys := []string{}
|
||||
for key := range applyFiles {
|
||||
keys = append(keys, key)
|
||||
}
|
||||
return keys
|
||||
}
|
||||
|
||||
@@ -3,17 +3,16 @@ package managedcluster
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"path/filepath"
|
||||
|
||||
clientset "github.com/open-cluster-management/api/client/cluster/clientset/versioned"
|
||||
v1 "github.com/open-cluster-management/api/cluster/v1"
|
||||
"github.com/open-cluster-management/registration/pkg/helpers"
|
||||
"github.com/open-cluster-management/registration/pkg/hub/managedcluster/bindata"
|
||||
"github.com/openshift/library-go/pkg/assets"
|
||||
|
||||
"github.com/openshift/library-go/pkg/controller/factory"
|
||||
"github.com/openshift/library-go/pkg/operator/events"
|
||||
"github.com/openshift/library-go/pkg/operator/resource/resourceapply"
|
||||
operatorhelpers "github.com/openshift/library-go/pkg/operator/v1helpers"
|
||||
|
||||
"k8s.io/apimachinery/pkg/api/errors"
|
||||
"k8s.io/apimachinery/pkg/api/meta"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
@@ -28,6 +27,16 @@ const (
|
||||
managedClusterFinalizer = "cluster.open-cluster-management.io/api-resource-cleanup"
|
||||
)
|
||||
|
||||
var staticFiles = []string{
|
||||
"manifests/managedcluster-clusterrole.yaml",
|
||||
"manifests/managedcluster-clusterrolebinding.yaml",
|
||||
"manifests/managedcluster-namespace.yaml",
|
||||
"manifests/managedcluster-registration-role.yaml",
|
||||
"manifests/managedcluster-registration-rolebinding.yaml",
|
||||
"manifests/managedcluster-work-role.yaml",
|
||||
"manifests/managedcluster-work-rolebinding.yaml",
|
||||
}
|
||||
|
||||
// managedClusterController reconciles instances of ManagedCluster on the hub.
|
||||
type managedClusterController struct {
|
||||
kubeClient kubernetes.Interface
|
||||
@@ -125,21 +134,8 @@ func (c *managedClusterController) sync(ctx context.Context, syncCtx factory.Syn
|
||||
resourceResults := resourceapply.ApplyDirectly(
|
||||
resourceapply.NewKubeClientHolder(c.kubeClient),
|
||||
syncCtx.Recorder(),
|
||||
func(name string) ([]byte, error) {
|
||||
config := struct {
|
||||
ManagedClusterName string
|
||||
}{
|
||||
ManagedClusterName: managedClusterName,
|
||||
}
|
||||
return assets.MustCreateAssetFromTemplate(name, bindata.MustAsset(filepath.Join(manifestDir, name)), config).Data, nil
|
||||
},
|
||||
"manifests/managedcluster-clusterrole.yaml",
|
||||
"manifests/managedcluster-clusterrolebinding.yaml",
|
||||
"manifests/managedcluster-namespace.yaml",
|
||||
"manifests/managedcluster-registration-role.yaml",
|
||||
"manifests/managedcluster-registration-rolebinding.yaml",
|
||||
"manifests/managedcluster-work-role.yaml",
|
||||
"manifests/managedcluster-work-rolebinding.yaml",
|
||||
helpers.ManagedClusterAssetFn(manifestDir, managedClusterName),
|
||||
staticFiles...,
|
||||
)
|
||||
errs := []error{}
|
||||
for _, result := range resourceResults {
|
||||
@@ -178,28 +174,21 @@ func (c *managedClusterController) sync(ctx context.Context, syncCtx factory.Syn
|
||||
}
|
||||
|
||||
func (c *managedClusterController) removeManagedClusterResources(ctx context.Context, managedClusterName string) error {
|
||||
err := c.kubeClient.CoreV1().Namespaces().Delete(ctx, managedClusterName, metav1.DeleteOptions{})
|
||||
if err != nil && !errors.IsNotFound(err) {
|
||||
return err
|
||||
errs := []error{}
|
||||
// Cleap up managed cluster manifests
|
||||
assetFn := helpers.ManagedClusterAssetFn(manifestDir, managedClusterName)
|
||||
if err := helpers.CleanUpManagedClusterManifests(ctx, c.kubeClient, c.eventRecorder, assetFn, staticFiles...); err != nil {
|
||||
errs = append(errs, err)
|
||||
}
|
||||
c.eventRecorder.Eventf("ManagedClusterNamespaceDeleted", "namespace %s is deleted", managedClusterName)
|
||||
|
||||
clusterRoleName := fmt.Sprintf("%s:%s", clusterRolePrefix, managedClusterName)
|
||||
err = c.kubeClient.RbacV1().ClusterRoles().Delete(ctx, clusterRoleName, metav1.DeleteOptions{})
|
||||
if err != nil && !errors.IsNotFound(err) {
|
||||
return err
|
||||
// Clean up managed cluster group from clusterrolebindings and rolebindings.
|
||||
managedClusterGroup := fmt.Sprintf("system:open-cluster-management:%s", managedClusterName)
|
||||
if err := helpers.CleanUpGroupFromClusterRoleBindings(ctx, c.kubeClient, c.eventRecorder, managedClusterGroup); err != nil {
|
||||
errs = append(errs, err)
|
||||
}
|
||||
c.eventRecorder.Eventf("ManagedClusterClusterRoleDeleted", "clusterrole %s is deleted", clusterRoleName)
|
||||
|
||||
//TODO search all clusterroles and roles for this group and remove the entry or delete the clusterrolebinding if it's the only subject.
|
||||
clusterRoleBindingName := fmt.Sprintf("%s:%s", clusterRolePrefix, managedClusterName)
|
||||
err = c.kubeClient.RbacV1().ClusterRoleBindings().Delete(ctx, clusterRoleBindingName, metav1.DeleteOptions{})
|
||||
if err != nil && !errors.IsNotFound(err) {
|
||||
return err
|
||||
if err := helpers.CleanUpGroupFromRoleBindings(ctx, c.kubeClient, c.eventRecorder, managedClusterGroup); err != nil {
|
||||
errs = append(errs, err)
|
||||
}
|
||||
c.eventRecorder.Eventf("ManagedClusterClusterRoleBindingDeleted", "clusterrolebinding %s is deleted", clusterRoleBindingName)
|
||||
|
||||
return nil
|
||||
return operatorhelpers.NewMultiLineAggregate(errs)
|
||||
}
|
||||
|
||||
func (c *managedClusterController) removeManagedClusterFinalizer(ctx context.Context, managedCluster *v1.ManagedCluster) error {
|
||||
|
||||
Reference in New Issue
Block a user