From ecfb6c08880ee53144e35d79ae6a83c579c57ac2 Mon Sep 17 00:00:00 2001 From: Wei Liu Date: Wed, 14 Jun 2023 23:03:43 +0800 Subject: [PATCH] merge the cluster capacity (#184) Signed-off-by: Wei Liu --- Makefile | 2 +- pkg/registration/helpers/testing/assertion.go | 29 +++++------- .../helpers/testing/testinghelpers.go | 16 +++---- .../managedcluster/resource_reconcile.go | 8 ++++ .../managedcluster/resource_reconcile_test.go | 44 ++++++++++++------- 5 files changed, 53 insertions(+), 46 deletions(-) diff --git a/Makefile b/Makefile index 3bf21d258..6521d74e0 100644 --- a/Makefile +++ b/Makefile @@ -66,7 +66,7 @@ verify-crds: patch-crd verify-gocilint: go install github.com/golangci/golangci-lint/cmd/golangci-lint@v1.53.2 - golangci-lint run --timeout=3m --modules-download-mode vendor ./... + golangci-lint run --timeout=5m --modules-download-mode vendor ./... install-golang-gci: go install github.com/daixiang0/gci@v0.10.1 diff --git a/pkg/registration/helpers/testing/assertion.go b/pkg/registration/helpers/testing/assertion.go index bdcfa3c18..503d479f3 100644 --- a/pkg/registration/helpers/testing/assertion.go +++ b/pkg/registration/helpers/testing/assertion.go @@ -2,15 +2,14 @@ package testing import ( "bytes" - "io/ioutil" "os" - "reflect" "testing" authorizationv1 "k8s.io/api/authorization/v1" certv1 "k8s.io/api/certificates/v1" certv1beta1 "k8s.io/api/certificates/v1beta1" coordinationv1 "k8s.io/api/coordination/v1" + "k8s.io/apimachinery/pkg/api/equality" "k8s.io/apimachinery/pkg/api/meta" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" @@ -26,7 +25,7 @@ func AssertFinalizers(t *testing.T, obj runtime.Object, finalizers []string) { if len(actual) == 0 && len(finalizers) == 0 { return } - if !reflect.DeepEqual(actual, finalizers) { + if !equality.Semantic.DeepEqual(actual, finalizers) { t.Fatal(diff.ObjectDiff(actual, finalizers)) } } @@ -37,28 +36,24 @@ func AssertManagedClusterClientConfigs(t *testing.T, actual, expected []clusterv if len(actual) == 0 && len(expected) == 0 { return } - if !reflect.DeepEqual(actual, expected) { + if !equality.Semantic.DeepEqual(actual, expected) { t.Errorf("expected client configs %#v but got: %#v", expected, actual) } } -// AssertManagedClusterStatus sserts the actual managed cluster status is the same +// AssertManagedClusterStatus asserts the actual managed cluster status is the same // with the expected func AssertManagedClusterStatus(t *testing.T, actual, expected clusterv1.ManagedClusterStatus) { - if !reflect.DeepEqual(actual.Version, expected.Version) { + if !equality.Semantic.DeepEqual(actual.Version, expected.Version) { t.Errorf("expected version %#v but got: %#v", expected.Version, actual.Version) } - if !actual.Capacity["cpu"].Equal(expected.Capacity["cpu"]) { - t.Errorf("expected cpu capacity %#v but got: %#v", expected.Capacity["cpu"], actual.Capacity["cpu"]) + + if !equality.Semantic.DeepEqual(actual.Capacity, expected.Capacity) { + t.Errorf("expected cluster capacity %#v but got: %#v", expected.Capacity, actual.Capacity) } - if !actual.Capacity["memory"].Equal(expected.Capacity["memory"]) { - t.Errorf("expected memory capacity %#v but got: %#v", expected.Capacity["memory"], actual.Capacity["memory"]) - } - if !actual.Allocatable["cpu"].Equal(expected.Allocatable["cpu"]) { - t.Errorf("expected cpu allocatable %#v but got: %#v", expected.Allocatable["cpu"], actual.Allocatable["cpu"]) - } - if !actual.Allocatable["memory"].Equal(expected.Allocatable["memory"]) { - t.Errorf("expected memory alocatabel %#v but got: %#v", expected.Allocatable["memory"], actual.Allocatable["memory"]) + + if !equality.Semantic.DeepEqual(actual.Allocatable, expected.Allocatable) { + t.Errorf("expected cluster allocatable %#v but got: %#v", expected.Allocatable, actual.Allocatable) } } @@ -142,7 +137,7 @@ func AssertFileExist(t *testing.T, filePath string) { // AssertFileContent asserts a given file content func AssertFileContent(t *testing.T, filePath string, expectedContent []byte) { - content, _ := ioutil.ReadFile(filePath) + content, _ := os.ReadFile(filePath) if !bytes.Equal(content, expectedContent) { t.Errorf("expect %v, but got %v", expectedContent, content) } diff --git a/pkg/registration/helpers/testing/testinghelpers.go b/pkg/registration/helpers/testing/testinghelpers.go index 00c26a416..5b53f2bb2 100644 --- a/pkg/registration/helpers/testing/testinghelpers.go +++ b/pkg/registration/helpers/testing/testinghelpers.go @@ -8,10 +8,10 @@ import ( "crypto/x509" "crypto/x509/pkix" "encoding/pem" - "io/ioutil" "math/big" "math/rand" "net" + "os" "time" certv1 "k8s.io/api/certificates/v1" @@ -118,16 +118,10 @@ func NewJoinedManagedCluster() *clusterv1.ManagedCluster { return managedCluster } -func NewManagedClusterWithStatus(capacity, allocatable corev1.ResourceList) *clusterv1.ManagedCluster { +func NewManagedClusterWithStatus(capacity, allocatable clusterv1.ResourceList) *clusterv1.ManagedCluster { managedCluster := NewJoinedManagedCluster() - managedCluster.Status.Capacity = clusterv1.ResourceList{ - "cpu": capacity.Cpu().DeepCopy(), - "memory": capacity.Memory().DeepCopy(), - } - managedCluster.Status.Allocatable = clusterv1.ResourceList{ - "cpu": allocatable.Cpu().DeepCopy(), - "memory": allocatable.Memory().DeepCopy(), - } + managedCluster.Status.Capacity = capacity + managedCluster.Status.Allocatable = allocatable managedCluster.Status.Version = clusterv1.ManagedClusterVersion{ Kubernetes: kubeversion.Get().GitVersion, } @@ -499,7 +493,7 @@ func NewTestCert(commonName string, duration time.Duration) *TestCert { } func WriteFile(filename string, data []byte) { - if err := ioutil.WriteFile(filename, data, 0600); err != nil { + if err := os.WriteFile(filename, data, 0600); err != nil { panic(err) } } diff --git a/pkg/registration/spoke/managedcluster/resource_reconcile.go b/pkg/registration/spoke/managedcluster/resource_reconcile.go index 942ae6093..c43722607 100644 --- a/pkg/registration/spoke/managedcluster/resource_reconcile.go +++ b/pkg/registration/spoke/managedcluster/resource_reconcile.go @@ -36,6 +36,14 @@ func (r *resoureReconcile) reconcile(ctx context.Context, cluster *clusterv1.Man return cluster, reconcileStop, fmt.Errorf("unable to get capacity and allocatable of managed cluster %q: %w", cluster.Name, err) } + // we allow other components update the cluster capacity, so we need merge the capacity to this updated, if + // one current capacity entry does not exist in this updated capacity, we add it back. + for key, val := range cluster.Status.Capacity { + if _, ok := capacity[key]; !ok { + capacity[key] = val + } + } + cluster.Status.Capacity = capacity cluster.Status.Allocatable = allocatable cluster.Status.Version = *clusterVersion diff --git a/pkg/registration/spoke/managedcluster/resource_reconcile_test.go b/pkg/registration/spoke/managedcluster/resource_reconcile_test.go index 7355ba8a2..f4cc2f6d1 100644 --- a/pkg/registration/spoke/managedcluster/resource_reconcile_test.go +++ b/pkg/registration/spoke/managedcluster/resource_reconcile_test.go @@ -9,7 +9,6 @@ import ( "time" "github.com/openshift/library-go/pkg/operator/events/eventstesting" - corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" @@ -82,27 +81,30 @@ func TestHealthCheck(t *testing.T) { nodes []runtime.Object httpStatus int responseMsg string - validateActions func(t *testing.T, actions []clienttesting.Action) + validateActions func(t *testing.T, clusterClient *clusterfake.Clientset) expectedErr string }{ { - name: "there are no managed clusters", - clusters: []runtime.Object{}, - validateActions: testingcommon.AssertNoActions, - expectedErr: "unable to get managed cluster \"testmanagedcluster\" from hub: managedcluster.cluster.open-cluster-management.io \"testmanagedcluster\" not found", + name: "there are no managed clusters", + clusters: []runtime.Object{}, + validateActions: func(t *testing.T, clusterClient *clusterfake.Clientset) { + testingcommon.AssertNoActions(t, clusterClient.Actions()) + }, + expectedErr: "unable to get managed cluster \"testmanagedcluster\" from hub: managedcluster.cluster.open-cluster-management.io \"testmanagedcluster\" not found", }, { name: "kube-apiserver is not health", clusters: []runtime.Object{testinghelpers.NewAcceptedManagedCluster()}, httpStatus: http.StatusInternalServerError, responseMsg: "internal server error", - validateActions: func(t *testing.T, actions []clienttesting.Action) { + validateActions: func(t *testing.T, clusterClient *clusterfake.Clientset) { expectedCondition := metav1.Condition{ Type: clusterv1.ManagedClusterConditionAvailable, Status: metav1.ConditionFalse, Reason: "ManagedClusterKubeAPIServerUnavailable", Message: "The kube-apiserver is not ok, status code: 500, an error on the server (\"internal server error\") has prevented the request from succeeding", } + actions := clusterClient.Actions() testingcommon.AssertActions(t, actions, "patch") patch := actions[0].(clienttesting.PatchAction).GetPatch() managedCluster := &clusterv1.ManagedCluster{} @@ -120,7 +122,7 @@ func TestHealthCheck(t *testing.T) { testinghelpers.NewNode("testnode1", testinghelpers.NewResourceList(32, 64), testinghelpers.NewResourceList(16, 32)), }, httpStatus: http.StatusOK, - validateActions: func(t *testing.T, actions []clienttesting.Action) { + validateActions: func(t *testing.T, clusterClient *clusterfake.Clientset) { expectedCondition := metav1.Condition{ Type: clusterv1.ManagedClusterConditionAvailable, Status: metav1.ConditionTrue, @@ -140,6 +142,7 @@ func TestHealthCheck(t *testing.T) { clusterv1.ResourceMemory: *resource.NewQuantity(int64(1024*1024*32), resource.BinarySI), }, } + actions := clusterClient.Actions() testingcommon.AssertActions(t, actions, "patch") patch := actions[0].(clienttesting.PatchAction).GetPatch() managedCluster := &clusterv1.ManagedCluster{} @@ -156,13 +159,14 @@ func TestHealthCheck(t *testing.T) { clusters: []runtime.Object{testinghelpers.NewAcceptedManagedCluster()}, nodes: []runtime.Object{}, httpStatus: http.StatusNotFound, - validateActions: func(t *testing.T, actions []clienttesting.Action) { + validateActions: func(t *testing.T, clusterClient *clusterfake.Clientset) { expectedCondition := metav1.Condition{ Type: clusterv1.ManagedClusterConditionAvailable, Status: metav1.ConditionTrue, Reason: "ManagedClusterAvailable", Message: "Managed cluster is available", } + actions := clusterClient.Actions() testingcommon.AssertActions(t, actions, "patch") patch := actions[0].(clienttesting.PatchAction).GetPatch() managedCluster := &clusterv1.ManagedCluster{} @@ -178,13 +182,14 @@ func TestHealthCheck(t *testing.T) { clusters: []runtime.Object{testinghelpers.NewAcceptedManagedCluster()}, nodes: []runtime.Object{}, httpStatus: http.StatusForbidden, - validateActions: func(t *testing.T, actions []clienttesting.Action) { + validateActions: func(t *testing.T, clusterClient *clusterfake.Clientset) { expectedCondition := metav1.Condition{ Type: clusterv1.ManagedClusterConditionAvailable, Status: metav1.ConditionTrue, Reason: "ManagedClusterAvailable", Message: "Managed cluster is available", } + actions := clusterClient.Actions() testingcommon.AssertActions(t, actions, "patch") patch := actions[0].(clienttesting.PatchAction).GetPatch() managedCluster := &clusterv1.ManagedCluster{} @@ -199,18 +204,22 @@ func TestHealthCheck(t *testing.T) { name: "merge managed cluster status", clusters: []runtime.Object{ testinghelpers.NewManagedClusterWithStatus( - corev1.ResourceList{ + clusterv1.ResourceList{ "sockets": *resource.NewQuantity(int64(1200), resource.DecimalExponent), "cores": *resource.NewQuantity(int64(128), resource.DecimalExponent), }, - testinghelpers.NewResourceList(16, 32)), + clusterv1.ResourceList{ + clusterv1.ResourceCPU: *resource.NewQuantity(int64(16), resource.DecimalExponent), + clusterv1.ResourceMemory: *resource.NewQuantity(int64(1024*1024*32), resource.BinarySI), + }, + ), }, nodes: []runtime.Object{ testinghelpers.NewNode("testnode1", testinghelpers.NewResourceList(32, 64), testinghelpers.NewResourceList(16, 32)), testinghelpers.NewNode("testnode2", testinghelpers.NewResourceList(32, 64), testinghelpers.NewResourceList(16, 32)), }, httpStatus: http.StatusOK, - validateActions: func(t *testing.T, actions []clienttesting.Action) { + validateActions: func(t *testing.T, clusterClient *clusterfake.Clientset) { expectedCondition := metav1.Condition{ Type: clusterv1.ManagedClusterConditionJoined, Status: metav1.ConditionTrue, @@ -232,10 +241,11 @@ func TestHealthCheck(t *testing.T) { clusterv1.ResourceMemory: *resource.NewQuantity(int64(1024*1024*64), resource.BinarySI), }, } + actions := clusterClient.Actions() testingcommon.AssertActions(t, actions, "patch") - patch := actions[0].(clienttesting.PatchAction).GetPatch() - managedCluster := &clusterv1.ManagedCluster{} - err := json.Unmarshal(patch, managedCluster) + + managedCluster, err := clusterClient.ClusterV1().ManagedClusters().Get( + context.TODO(), testinghelpers.TestManagedClusterName, metav1.GetOptions{}) if err != nil { t.Fatal(err) } @@ -279,7 +289,7 @@ func TestHealthCheck(t *testing.T) { syncErr := ctrl.sync(context.TODO(), testingcommon.NewFakeSyncContext(t, "")) testingcommon.AssertError(t, syncErr, c.expectedErr) - c.validateActions(t, clusterClient.Actions()) + c.validateActions(t, clusterClient) }) } }