From 2139c813adbe9b78425690d038859d7f23f70c5f Mon Sep 17 00:00:00 2001 From: AshvinBambhaniya2003 <156189340+AshvinBambhaniya2003@users.noreply.github.com> Date: Mon, 15 Sep 2025 21:07:55 +0530 Subject: [PATCH] Feat(multicluster): Enhance Unit Test Coverage for Multicluster Packages (#6892) * feat(multicluster): Enhance unit test coverage for multicluster utilities This commit introduces a comprehensive suite of unit tests for the multicluster management functions in pkg/multicluster. Key changes include: - `cluster_management_test.go`: Improves the structure of TestDetachCluster and TestRenameCluster by organizing test cases into a collection, which enhances clarity and simplifies adding new scenarios. - `utils_test.go` and `virtual_cluster_test.go`: Adds new test cases to validate additional utility and virtual cluster helper functions, increasing overall test coverage. These additions improve the overall test coverage and ensure the correctness and reliability of multicluster operations. Signed-off-by: Ashvin Bambhaniya * feat(multicluster): Add unit tests for multicluster workflow provider This commit introduces new unit tests for the multicluster workflow provider located in pkg/workflow/providers/multicluster. Key additions include: - Comprehensive tests for the Deploy workflow step, covering parameter validation, error handling, and successful deployment scenarios. - New tests for GetPlacementsFromTopologyPolicies to ensure correct placement resolution from topology policies, including error cases and default behaviors. These additions improve the test coverage and ensure the robustness of the multicluster workflow provider. Signed-off-by: Ashvin Bambhaniya * fix(multicluster): Correct duplicate import in utils_test.go This commit resolves a linting error (ST1019) in pkg/multicluster/utils_test.go caused by the k8s.io/api/core/v1 package being imported twice with different aliases (v1 and corev1). The redundant import alias v1 has been removed, and the corresponding type reference for []v1.Secret has been updated to []corev1.Secret to maintain consistency. Signed-off-by: Ashvin Bambhaniya * test(multicluster): fix cross-test side effects The TestListExistingClusterSecrets function mutates the global variable ClusterGatewaySecretNamespace without restoring its original value. This can lead to unpredictable behavior in other tests that rely on this variable. This commit fixes the issue by saving the value of ClusterGatewaySecretNamespace before the test runs and restoring it afterward using a defer statement. Signed-off-by: Ashvin Bambhaniya * test(multicluster): remove redundant test case in TestContext The `TestContextWithClusterName` sub-test in `TestContext` is redundant, as its functionality is already covered by the more comprehensive `TestClusterNameInContext` sub-test. This commit removes the unnecessary test to improve the clarity and maintainability of the test suite without sacrificing coverage. Signed-off-by: Ashvin Bambhaniya --------- Signed-off-by: Ashvin Bambhaniya --- pkg/multicluster/cluster_management_test.go | 1088 +++++++++++++++++ pkg/multicluster/utils_test.go | 276 ++++- pkg/multicluster/virtual_cluster_test.go | 104 ++ .../multicluster/multicluster_test.go | 239 +++- 4 files changed, 1682 insertions(+), 25 deletions(-) create mode 100644 pkg/multicluster/cluster_management_test.go diff --git a/pkg/multicluster/cluster_management_test.go b/pkg/multicluster/cluster_management_test.go new file mode 100644 index 000000000..4a7199830 --- /dev/null +++ b/pkg/multicluster/cluster_management_test.go @@ -0,0 +1,1088 @@ +/* +Copyright 2021 The KubeVela Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package multicluster + +import ( + "context" + "os" + "path/filepath" + "testing" + + clusterv1alpha1 "github.com/oam-dev/cluster-gateway/pkg/apis/cluster/v1alpha1" + clustercommon "github.com/oam-dev/cluster-gateway/pkg/common" + "github.com/pkg/errors" + "github.com/stretchr/testify/require" + corev1 "k8s.io/api/core/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + clientcmdapi "k8s.io/client-go/tools/clientcmd/api" + ocmclusterv1 "open-cluster-management.io/api/cluster/v1" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/client/fake" + + "github.com/oam-dev/kubevela/apis/core.oam.dev/common" + + "github.com/oam-dev/kubevela/apis/core.oam.dev/v1beta1" +) + +func TestKubeClusterConfig_SetClusterName(t *testing.T) { + testCases := []struct { + name string + initialName string + newName string + expectedName string + }{ + { + name: "Non-empty name", + initialName: "old", + newName: "new", + expectedName: "new", + }, + { + name: "Empty name", + initialName: "old", + newName: "", + expectedName: "old", + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + cfg := &KubeClusterConfig{ClusterName: tc.initialName} + out := cfg.SetClusterName(tc.newName) + require.Equal(t, cfg, out) + require.Equal(t, tc.expectedName, cfg.ClusterName) + }) + } +} + +func TestKubeClusterConfig_SetCreateNamespace(t *testing.T) { + cfg := &KubeClusterConfig{} + + out := cfg.SetCreateNamespace("ns-1") + require.Equal(t, cfg, out) + require.Equal(t, "ns-1", cfg.CreateNamespace) + + out = cfg.SetCreateNamespace("") + require.Equal(t, cfg, out) + require.Equal(t, "", cfg.CreateNamespace) +} + +func TestKubeClusterConfig_Validate(t *testing.T) { + testCases := []struct { + name string + clusterName string + expectErr bool + }{ + { + name: "Empty name", + clusterName: "", + expectErr: true, + }, + { + name: "Local name", + clusterName: ClusterLocalName, + expectErr: true, + }, + { + name: "Valid name", + clusterName: "prod", + expectErr: false, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + cfg := &KubeClusterConfig{ClusterName: tc.clusterName} + err := cfg.Validate() + if tc.expectErr { + require.Error(t, err) + } else { + require.NoError(t, err) + } + }) + } +} + +func newTestScheme() *runtime.Scheme { + s := runtime.NewScheme() + _ = corev1.AddToScheme(s) + _ = v1beta1.AddToScheme(s) + _ = ocmclusterv1.AddToScheme(s) + return s +} + +// mockClient is a mock implementation of client.Client for testing. +// It allows injecting errors for different client operations. +type mockClient struct { + client.Client + listErr error + deleteErr error + createErr error + getErr error + updateErr error +} + +func (m *mockClient) Get(ctx context.Context, key client.ObjectKey, obj client.Object, opts ...client.GetOption) error { + if m.getErr != nil { + return m.getErr + } + return m.Client.Get(ctx, key, obj, opts...) +} + +func (m *mockClient) List(ctx context.Context, list client.ObjectList, opts ...client.ListOption) error { + if m.listErr != nil { + if _, ok := list.(*v1beta1.ResourceTrackerList); ok { + return m.listErr + } + } + return m.Client.List(ctx, list, opts...) +} + +func (m *mockClient) Delete(ctx context.Context, obj client.Object, opts ...client.DeleteOption) error { + if m.deleteErr != nil { + if _, ok := obj.(*ocmclusterv1.ManagedCluster); ok { + return m.deleteErr + } + if _, ok := obj.(*corev1.Secret); ok { + return m.deleteErr + } + } + return m.Client.Delete(ctx, obj, opts...) +} + +func (m *mockClient) Create(ctx context.Context, obj client.Object, opts ...client.CreateOption) error { + if m.createErr != nil { + return m.createErr + } + return m.Client.Create(ctx, obj, opts...) +} + +func (m *mockClient) Update(ctx context.Context, obj client.Object, opts ...client.UpdateOption) error { + if m.updateErr != nil { + return m.updateErr + } + return m.Client.Update(ctx, obj, opts...) +} + +func makeBaseClusterConfig(clusterName string) *KubeClusterConfig { + return &KubeClusterConfig{ + FilePath: "", + ClusterName: clusterName, + CreateNamespace: "", // avoid PostRegistration side effects + Config: &clientcmdapi.Config{}, + Cluster: &clientcmdapi.Cluster{ + Server: "https://example:6443", + CertificateAuthorityData: []byte("ca-bytes"), + InsecureSkipTLSVerify: false, + }, + AuthInfo: &clientcmdapi.AuthInfo{}, + } +} + +func TestRegisterByVelaSecret(t *testing.T) { + ctx := context.Background() + scheme := newTestScheme() + oldNS := ClusterGatewaySecretNamespace + ClusterGatewaySecretNamespace = "vela-system" + t.Cleanup(func() { ClusterGatewaySecretNamespace = oldNS }) + + testCases := []struct { + name string + cfg *KubeClusterConfig + cli client.Client + expectErr bool + verify func(t *testing.T, cli client.Client, cfg *KubeClusterConfig) + }{ + { + name: "Token and endpoint", + cfg: func() *KubeClusterConfig { + cfg := makeBaseClusterConfig("c-token") + cfg.AuthInfo.Token = "my-token" + return cfg + }(), + cli: fake.NewClientBuilder().WithScheme(scheme).Build(), + verify: func(t *testing.T, cli client.Client, cfg *KubeClusterConfig) { + var sec corev1.Secret + require.NoError(t, cli.Get(ctx, client.ObjectKey{Name: cfg.ClusterName, Namespace: ClusterGatewaySecretNamespace}, &sec)) + require.Equal(t, []byte("my-token"), sec.Data["token"]) + require.Equal(t, []byte("https://example:6443"), sec.Data["endpoint"]) + require.Equal(t, []byte("ca-bytes"), sec.Data["ca.crt"]) + require.Equal(t, string(clusterv1alpha1.CredentialTypeServiceAccountToken), sec.Labels[clustercommon.LabelKeyClusterCredentialType]) + }, + }, + { + name: "Token no CA when insecure", + cfg: func() *KubeClusterConfig { + cfg := makeBaseClusterConfig("c-token-insecure") + cfg.Cluster.InsecureSkipTLSVerify = true + cfg.AuthInfo.Token = "tok" + return cfg + }(), + cli: fake.NewClientBuilder().WithScheme(scheme).Build(), + verify: func(t *testing.T, cli client.Client, cfg *KubeClusterConfig) { + var sec corev1.Secret + require.NoError(t, cli.Get(ctx, client.ObjectKey{Name: cfg.ClusterName, Namespace: ClusterGatewaySecretNamespace}, &sec)) + require.Nil(t, sec.Data["ca.crt"]) + }, + }, + { + name: "Exec success", + cfg: func() *KubeClusterConfig { + dir := t.TempDir() + script := filepath.Join(dir, "print-token.sh") + require.NoError(t, os.WriteFile(script, []byte("#!/usr/bin/env bash\necho '{\"status\":{\"token\":\"exec-token\"}}'\n"), 0755)) + cfg := makeBaseClusterConfig("c-exec") + cfg.AuthInfo.Exec = &clientcmdapi.ExecConfig{Command: script} + return cfg + }(), + cli: fake.NewClientBuilder().WithScheme(scheme).Build(), + verify: func(t *testing.T, cli client.Client, cfg *KubeClusterConfig) { + var sec corev1.Secret + require.NoError(t, cli.Get(ctx, client.ObjectKey{Name: cfg.ClusterName, Namespace: ClusterGatewaySecretNamespace}, &sec)) + require.Equal(t, []byte("exec-token"), sec.Data["token"]) + require.Equal(t, string(clusterv1alpha1.CredentialTypeServiceAccountToken), sec.Labels[clustercommon.LabelKeyClusterCredentialType]) + }, + }, + { + name: "Exec failure", + cfg: func() *KubeClusterConfig { + dir := t.TempDir() + cfg := makeBaseClusterConfig("c-exec-fail") + cfg.AuthInfo.Exec = &clientcmdapi.ExecConfig{Command: filepath.Join(dir, "fail.sh")} + require.NoError(t, os.WriteFile(cfg.AuthInfo.Exec.Command, []byte("#!/usr/bin/env bash\nexit 1\n"), 0755)) + return cfg + }(), + cli: fake.NewClientBuilder().WithScheme(scheme).Build(), + expectErr: true, + }, + { + name: "X509 and proxy", + cfg: func() *KubeClusterConfig { + cfg := makeBaseClusterConfig("c-x509") + cfg.AuthInfo.ClientCertificateData = []byte("crt") + cfg.AuthInfo.ClientKeyData = []byte("key") + cfg.Cluster.ProxyURL = "http://proxy.example:8080" + return cfg + }(), + cli: fake.NewClientBuilder().WithScheme(scheme).Build(), + verify: func(t *testing.T, cli client.Client, cfg *KubeClusterConfig) { + var sec corev1.Secret + require.NoError(t, cli.Get(ctx, client.ObjectKey{Name: cfg.ClusterName, Namespace: ClusterGatewaySecretNamespace}, &sec)) + require.Equal(t, []byte("crt"), sec.Data["tls.crt"]) + require.Equal(t, []byte("key"), sec.Data["tls.key"]) + require.Equal(t, []byte("http://proxy.example:8080"), sec.Data["proxy-url"]) + require.Equal(t, string(clusterv1alpha1.CredentialTypeX509Certificate), sec.Labels[clustercommon.LabelKeyClusterCredentialType]) + }, + }, + { + name: "Get error from createOrUpdate", + cfg: func() *KubeClusterConfig { + cfg := makeBaseClusterConfig("c-get-err") + cfg.AuthInfo.Token = "tok" + cfg.ClusterAlreadyExistCallback = func(string) bool { return true } + return cfg + }(), + cli: func() client.Client { + clusterName := "c-get-err" + pre := &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: clusterName, + Namespace: ClusterGatewaySecretNamespace, + Labels: map[string]string{clustercommon.LabelKeyClusterCredentialType: string(clusterv1alpha1.CredentialTypeServiceAccountToken)}, + ResourceVersion: "1", + }, + } + base := fake.NewClientBuilder().WithScheme(scheme).WithObjects(pre).Build() + return &getErrorClient{Client: base, name: clusterName, namespace: ClusterGatewaySecretNamespace} + }(), + expectErr: true, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + err := tc.cfg.RegisterByVelaSecret(ctx, tc.cli) + if tc.expectErr { + require.Error(t, err) + } else { + require.NoError(t, err) + } + if tc.verify != nil { + tc.verify(t, tc.cli, tc.cfg) + } + }) + } +} + +func TestLoadKubeClusterConfigFromFile(t *testing.T) { + testCases := []struct { + name string + content string + expectErr bool + verify func(t *testing.T, cfg *KubeClusterConfig) + }{ + { + name: "Valid kubeconfig", + content: ` +apiVersion: v1 +clusters: +- cluster: + server: https://example.com + name: test-cluster +contexts: +- context: + cluster: test-cluster + user: test-user + name: test-context +current-context: test-context +kind: Config +users: +- name: test-user + user: + token: test-token +`, + verify: func(t *testing.T, cfg *KubeClusterConfig) { + require.NotNil(t, cfg) + require.Equal(t, "test-cluster", cfg.ClusterName) + require.Equal(t, "https://example.com:443", cfg.Cluster.Server) + require.Equal(t, "test-token", cfg.AuthInfo.Token) + }, + }, + { + name: "File does not exist", + content: "", + expectErr: true, + }, + { + name: "Invalid kubeconfig", + content: "invalid-yaml", + expectErr: true, + }, + { + name: "No current context", + content: ` +apiVersion: v1 +clusters: +- cluster: + server: https://example.com + name: test-cluster +`, + expectErr: true, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + var path string + if tc.content != "" { + tmpfile, err := os.CreateTemp("", "kubeconfig") + require.NoError(t, err) + defer os.Remove(tmpfile.Name()) + _, err = tmpfile.Write([]byte(tc.content)) + require.NoError(t, err) + err = tmpfile.Close() + require.NoError(t, err) + path = tmpfile.Name() + } else { + path = "/non-existent-file" + } + + cfg, err := LoadKubeClusterConfigFromFile(path) + if tc.expectErr { + require.Error(t, err) + } else { + require.NoError(t, err) + } + if tc.verify != nil { + tc.verify(t, cfg) + } + }) + } +} + +func TestDetachCluster(t *testing.T) { + ctx := context.Background() + scheme := newTestScheme() + ClusterGatewaySecretNamespace = "vela-system" + + testCases := []struct { + name string + clusterName string + options []DetachClusterOption + cli client.Client + wantErr bool + wantErrMsg string + }{ + { + name: "removeClusterFromResourceTrackers returns error", + cli: &mockClient{ + Client: fake.NewClientBuilder().WithScheme(scheme).Build(), + listErr: errors.New("list error"), + }, + clusterName: "any-cluster", + wantErr: true, + wantErrMsg: "list error", + }, + { + name: "Detach local returns ErrReservedLocalClusterName", + cli: fake.NewClientBuilder().WithScheme(scheme).Build(), + clusterName: ClusterLocalName, + wantErr: true, + wantErrMsg: ErrReservedLocalClusterName.Error(), + }, + { + name: "OCM Loading kubeconfig fails", + cli: fake.NewClientBuilder().WithScheme(scheme).WithObjects(&corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "ocm-load-cfg-fail", + Namespace: ClusterGatewaySecretNamespace, + Labels: map[string]string{clustercommon.LabelKeyClusterCredentialType: string(clusterv1alpha1.CredentialTypeOCMManagedCluster)}, + }, + }).Build(), + clusterName: "ocm-load-cfg-fail", + options: []DetachClusterOption{DetachClusterManagedClusterKubeConfigPathOption("non-existent-path")}, + wantErr: true, + }, + { + name: "OCM BuildConfig fails", + cli: func() client.Client { + tmpfile, err := os.CreateTemp("", "kubeconfig") + require.NoError(t, err) + defer os.Remove(tmpfile.Name()) + _, err = tmpfile.Write([]byte("invalid kubeconfig")) + require.NoError(t, err) + err = tmpfile.Close() + require.NoError(t, err) + return fake.NewClientBuilder().WithScheme(scheme).WithObjects(&corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "ocm-build-cfg-fail", + Namespace: ClusterGatewaySecretNamespace, + Labels: map[string]string{clustercommon.LabelKeyClusterCredentialType: string(clusterv1alpha1.CredentialTypeOCMManagedCluster)}, + }, + }).Build() + }(), + clusterName: "ocm-build-cfg-fail", + options: func() []DetachClusterOption { + tmpfile, err := os.CreateTemp("", "kubeconfig") + require.NoError(t, err) + t.Cleanup(func() { os.Remove(tmpfile.Name()) }) + _, err = tmpfile.Write([]byte("invalid kubeconfig")) + require.NoError(t, err) + err = tmpfile.Close() + require.NoError(t, err) + return []DetachClusterOption{DetachClusterManagedClusterKubeConfigPathOption(tmpfile.Name())} + }(), + wantErr: true, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + err := DetachCluster(ctx, tc.cli, tc.clusterName, tc.options...) + if tc.wantErr { + require.Error(t, err) + if tc.wantErrMsg != "" { + require.Contains(t, err.Error(), tc.wantErrMsg) + } + } else { + require.NoError(t, err) + } + }) + } +} + +func TestRenameCluster(t *testing.T) { + ctx := context.Background() + scheme := newTestScheme() + ClusterGatewaySecretNamespace = "vela-system" + + testCases := []struct { + name string + oldClusterName string + newClusterName string + cli client.Client + wantErr bool + wantErrMsg string + postCheck func(t *testing.T, cli client.Client) + }{ + { + name: "New name is local: returns ErrReservedLocalClusterName", + oldClusterName: "old-cluster", + newClusterName: ClusterLocalName, + cli: fake.NewClientBuilder().WithScheme(scheme).Build(), + wantErr: true, + wantErrMsg: ErrReservedLocalClusterName.Error(), + }, + { + name: "getMutableClusterSecret error: wraps with 'is not mutable now'", + oldClusterName: "non-existent-cluster", + newClusterName: "new-cluster", + cli: fake.NewClientBuilder().WithScheme(scheme).Build(), + wantErr: true, + wantErrMsg: "is not mutable now", + }, + { + name: "ensureClusterNotExists returns ErrClusterExists: error returned", + oldClusterName: "old-cluster", + newClusterName: "existing-cluster", + cli: fake.NewClientBuilder().WithScheme(scheme).WithObjects( + &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "old-cluster", + Namespace: ClusterGatewaySecretNamespace, + Labels: map[string]string{clustercommon.LabelKeyClusterCredentialType: string(clusterv1alpha1.CredentialTypeX509Certificate)}, + }, + }, + &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "existing-cluster", + Namespace: ClusterGatewaySecretNamespace, + Labels: map[string]string{clustercommon.LabelKeyClusterCredentialType: string(clusterv1alpha1.CredentialTypeX509Certificate)}, + }, + Data: map[string][]byte{"endpoint": []byte("https://example.com")}, + }, + ).Build(), + wantErr: true, + wantErrMsg: ErrClusterExists.Error(), + }, + { + name: "Delete old secret fails: error", + oldClusterName: "old-cluster", + newClusterName: "new-cluster", + cli: &mockClient{ + Client: fake.NewClientBuilder().WithScheme(scheme).WithObjects(&corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "old-cluster", + Namespace: ClusterGatewaySecretNamespace, + Labels: map[string]string{clustercommon.LabelKeyClusterCredentialType: string(clusterv1alpha1.CredentialTypeX509Certificate)}, + }, + }).Build(), + deleteErr: errors.New("delete failed"), + }, + wantErr: true, + wantErrMsg: "delete failed", + }, + { + name: "Create new secret fails: error", + oldClusterName: "old-cluster", + newClusterName: "new-cluster", + cli: &mockClient{ + Client: fake.NewClientBuilder().WithScheme(scheme).WithObjects(&corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "old-cluster", + Namespace: ClusterGatewaySecretNamespace, + Labels: map[string]string{clustercommon.LabelKeyClusterCredentialType: string(clusterv1alpha1.CredentialTypeX509Certificate)}, + }, + }).Build(), + createErr: errors.New("create failed"), + }, + wantErr: true, + wantErrMsg: "create failed", + }, + { + name: "Success: Old deleted, new created with same labels/annotations", + oldClusterName: "old-cluster", + newClusterName: "new-cluster", + cli: fake.NewClientBuilder().WithScheme(scheme).WithObjects(&corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "old-cluster", + Namespace: ClusterGatewaySecretNamespace, + Labels: map[string]string{clustercommon.LabelKeyClusterCredentialType: string(clusterv1alpha1.CredentialTypeX509Certificate), "label-key": "label-value"}, + Annotations: map[string]string{"anno-key": "anno-value"}, + }, + Data: map[string][]byte{"key": []byte("value")}, + }).Build(), + postCheck: func(t *testing.T, cli client.Client) { + err := cli.Get(ctx, client.ObjectKey{Name: "old-cluster", Namespace: ClusterGatewaySecretNamespace}, &corev1.Secret{}) + require.True(t, apierrors.IsNotFound(err)) + newSecret := &corev1.Secret{} + err = cli.Get(ctx, client.ObjectKey{Name: "new-cluster", Namespace: ClusterGatewaySecretNamespace}, newSecret) + require.NoError(t, err) + require.Equal(t, "new-cluster", newSecret.Name) + require.Equal(t, map[string]string{clustercommon.LabelKeyClusterCredentialType: string(clusterv1alpha1.CredentialTypeX509Certificate), "label-key": "label-value"}, newSecret.Labels) + require.Equal(t, map[string]string{"anno-key": "anno-value"}, newSecret.Annotations) + require.Equal(t, map[string][]byte{"key": []byte("value")}, newSecret.Data) + }, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + err := RenameCluster(ctx, tc.cli, tc.oldClusterName, tc.newClusterName) + if tc.wantErr { + require.Error(t, err) + if tc.wantErrMsg != "" { + require.Contains(t, err.Error(), tc.wantErrMsg) + } + } else { + require.NoError(t, err) + } + if tc.postCheck != nil { + tc.postCheck(t, tc.cli) + } + }) + } +} + +// mock client to inject Get error on second secret fetch (createOrUpdate path) +type getErrorClient struct { + client.Client + name string + namespace string + count int +} + +func (g *getErrorClient) Get(ctx context.Context, key client.ObjectKey, obj client.Object, opts ...client.GetOption) error { + if key.Name == g.name && key.Namespace == g.namespace { + g.count++ + if g.count >= 2 { // first call used by existence check, second by createOrUpdate + return errors.New("injected get error") + } + } + return g.Client.Get(ctx, key, obj, opts...) +} + +func TestEnsureClusterNotExists(t *testing.T) { + ctx := context.Background() + scheme := newTestScheme() + + testCases := []struct { + name string + cli client.Client + cluster string + expectErr bool + }{ + { + name: "Cluster does not exist", + cli: fake.NewClientBuilder().WithScheme(scheme).Build(), + cluster: "non-existent", + expectErr: false, + }, + { + name: "Cluster exists", + cli: fake.NewClientBuilder().WithScheme(scheme).WithObjects(&corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "existing-cluster", + Namespace: ClusterGatewaySecretNamespace, + Labels: map[string]string{clustercommon.LabelKeyClusterCredentialType: string(clusterv1alpha1.CredentialTypeX509Certificate)}, + }, + Data: map[string][]byte{"endpoint": []byte("https://example.com")}, + }).Build(), + cluster: "existing-cluster", + expectErr: true, + }, + { + name: "Client error", + cli: &mockClient{ + Client: fake.NewClientBuilder().WithScheme(scheme).Build(), + getErr: errors.New("client error"), + }, + cluster: "any-cluster", + expectErr: true, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + err := ensureClusterNotExists(ctx, tc.cli, tc.cluster) + if tc.expectErr { + require.Error(t, err) + } else { + require.NoError(t, err) + } + }) + } +} + +func TestEnsureNamespaceExists(t *testing.T) { + ctx := context.Background() + scheme := newTestScheme() + + testCases := []struct { + name string + cli client.Client + cluster string + namespace string + expectErr bool + }{ + { + name: "Namespace already exists", + cli: fake.NewClientBuilder().WithScheme(scheme).WithObjects(&corev1.Namespace{ + ObjectMeta: metav1.ObjectMeta{Name: "existing-ns"}, + }).Build(), + cluster: "any-cluster", + namespace: "existing-ns", + expectErr: false, + }, + { + name: "Namespace does not exist", + cli: fake.NewClientBuilder().WithScheme(scheme).Build(), + cluster: "any-cluster", + namespace: "new-ns", + expectErr: false, + }, + { + name: "Client Get error", + cli: &mockClient{ + Client: fake.NewClientBuilder().WithScheme(scheme).Build(), + getErr: errors.New("client error"), + }, + cluster: "any-cluster", + namespace: "any-ns", + expectErr: true, + }, + { + name: "Client Create error", + cli: &mockClient{ + Client: fake.NewClientBuilder().WithScheme(scheme).Build(), + createErr: errors.New("client error"), + }, + cluster: "any-cluster", + namespace: "new-ns", + expectErr: true, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + err := ensureNamespaceExists(ctx, tc.cli, tc.cluster, tc.namespace) + if tc.expectErr { + require.Error(t, err) + } else { + require.NoError(t, err) + } + }) + } +} + +func TestGetMutableClusterSecret(t *testing.T) { + ctx := context.Background() + scheme := newTestScheme() + ClusterGatewaySecretNamespace = "vela-system" + + testCases := []struct { + name string + cli client.Client + cluster string + expectErr bool + }{ + { + name: "Secret does not exist", + cli: fake.NewClientBuilder().WithScheme(scheme).Build(), + cluster: "non-existent", + expectErr: true, + }, + { + name: "Secret exists but no credential type label", + cli: fake.NewClientBuilder().WithScheme(scheme).WithObjects(&corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "no-label", + Namespace: ClusterGatewaySecretNamespace, + }, + }).Build(), + cluster: "no-label", + expectErr: true, + }, + { + name: "Secret exists with credential type label", + cli: fake.NewClientBuilder().WithScheme(scheme).WithObjects(&corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "with-label", + Namespace: ClusterGatewaySecretNamespace, + Labels: map[string]string{clustercommon.LabelKeyClusterCredentialType: string(clusterv1alpha1.CredentialTypeX509Certificate)}, + }, + }).Build(), + cluster: "with-label", + expectErr: false, + }, + { + name: "Client Get error", + cli: &mockClient{ + Client: fake.NewClientBuilder().WithScheme(scheme).Build(), + getErr: errors.New("client error"), + }, + cluster: "any-cluster", + expectErr: true, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + _, err := getMutableClusterSecret(ctx, tc.cli, tc.cluster) + if tc.expectErr { + require.Error(t, err) + } else { + require.NoError(t, err) + } + }) + } +} + +func TestRemoveClusterFromResourceTrackers(t *testing.T) { + ctx := context.Background() + scheme := newTestScheme() + + testCases := []struct { + name string + cli client.Client + cluster string + expectErr bool + verify func(t *testing.T, cli client.Client) + }{ + { + name: "No resource trackers", + cli: fake.NewClientBuilder().WithScheme(scheme).Build(), + cluster: "any-cluster", + expectErr: false, + }, + { + name: "Resource trackers exist, but none reference the cluster", + cli: fake.NewClientBuilder().WithScheme(scheme).WithObjects(&v1beta1.ResourceTracker{ + ObjectMeta: metav1.ObjectMeta{Name: "rt-1"}, + Spec: v1beta1.ResourceTrackerSpec{ + ManagedResources: []v1beta1.ManagedResource{ + {ClusterObjectReference: common.ClusterObjectReference{Cluster: "other-cluster"}}, + }, + }, + }).Build(), + cluster: "any-cluster", + expectErr: false, + }, + { + name: "Resource trackers exist and some reference the cluster", + cli: fake.NewClientBuilder().WithScheme(scheme).WithObjects(&v1beta1.ResourceTracker{ + ObjectMeta: metav1.ObjectMeta{Name: "rt-1"}, + Spec: v1beta1.ResourceTrackerSpec{ + ManagedResources: []v1beta1.ManagedResource{ + {ClusterObjectReference: common.ClusterObjectReference{Cluster: "cluster-to-remove"}}, + {ClusterObjectReference: common.ClusterObjectReference{Cluster: "other-cluster"}}, + }, + }, + }).Build(), + cluster: "cluster-to-remove", + verify: func(t *testing.T, cli client.Client) { + var rt v1beta1.ResourceTracker + require.NoError(t, cli.Get(ctx, client.ObjectKey{Name: "rt-1"}, &rt)) + require.Len(t, rt.Spec.ManagedResources, 1) + require.Equal(t, "other-cluster", rt.Spec.ManagedResources[0].Cluster) + }, + }, + { + name: "Client List error", + cli: &mockClient{ + Client: fake.NewClientBuilder().WithScheme(scheme).Build(), + listErr: errors.New("client error"), + }, + cluster: "any-cluster", + expectErr: true, + }, + { + name: "Client Update error", + cli: &mockClient{ + Client: fake.NewClientBuilder().WithScheme(scheme).WithObjects(&v1beta1.ResourceTracker{ + ObjectMeta: metav1.ObjectMeta{Name: "rt-1"}, + Spec: v1beta1.ResourceTrackerSpec{ + ManagedResources: []v1beta1.ManagedResource{ + {ClusterObjectReference: common.ClusterObjectReference{Cluster: "cluster-to-remove"}}, + }, + }, + }).Build(), + updateErr: errors.New("client error"), + }, + cluster: "cluster-to-remove", + expectErr: true, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + err := removeClusterFromResourceTrackers(ctx, tc.cli, tc.cluster) + if tc.expectErr { + require.Error(t, err) + } else { + require.NoError(t, err) + } + if tc.verify != nil { + tc.verify(t, tc.cli) + } + }) + } +} + +func TestGetTokenFromExec(t *testing.T) { + testCases := []struct { + name string + execCfg *clientcmdapi.ExecConfig + setup func(t *testing.T, cfg *clientcmdapi.ExecConfig) + expectErr bool + }{ + { + name: "Valid exec config", + execCfg: &clientcmdapi.ExecConfig{}, + setup: func(t *testing.T, cfg *clientcmdapi.ExecConfig) { + dir := t.TempDir() + script := filepath.Join(dir, "test.sh") + require.NoError(t, os.WriteFile(script, []byte("#!/bin/sh\necho '{\"status\":{\"token\":\"test-token\"}}'"), 0755)) + cfg.Command = script + }, + }, + { + name: "Exec command fails", + execCfg: &clientcmdapi.ExecConfig{Command: "/bin/false"}, + expectErr: true, + }, + { + name: "Invalid JSON output", + execCfg: &clientcmdapi.ExecConfig{}, + setup: func(t *testing.T, cfg *clientcmdapi.ExecConfig) { + dir := t.TempDir() + script := filepath.Join(dir, "test.sh") + require.NoError(t, os.WriteFile(script, []byte("#!/bin/sh\necho 'invalid-json'"), 0755)) + cfg.Command = script + }, + expectErr: true, + }, + { + name: "No token in JSON output", + execCfg: &clientcmdapi.ExecConfig{}, + setup: func(t *testing.T, cfg *clientcmdapi.ExecConfig) { + dir := t.TempDir() + script := filepath.Join(dir, "test.sh") + require.NoError(t, os.WriteFile(script, []byte("#!/bin/sh\necho '{\"status\":{}}'"), 0755)) + cfg.Command = script + }, + expectErr: true, + }, + { + name: "Command with invalid characters", + execCfg: &clientcmdapi.ExecConfig{Command: "/bin/echo; ls"}, + expectErr: true, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + if tc.setup != nil { + tc.setup(t, tc.execCfg) + } + _, err := getTokenFromExec(tc.execCfg) + if tc.expectErr { + require.Error(t, err) + } else { + require.NoError(t, err) + } + }) + } +} + +func TestAliasCluster(t *testing.T) { + ctx := context.Background() + scheme := newTestScheme() + ClusterGatewaySecretNamespace = "vela-system" + + // The secret that will be used in some test cases + clusterSecret := &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-cluster", + Namespace: ClusterGatewaySecretNamespace, + Labels: map[string]string{clustercommon.LabelKeyClusterCredentialType: string(clusterv1alpha1.CredentialTypeX509Certificate)}, + }, + Data: map[string][]byte{ + "endpoint": []byte("https://example.com"), + }, + } + + testCases := []struct { + name string + clusterName string + aliasName string + cli client.Client + wantErr error // for specific error types + wantErrMsg string // for substring match + postCheck func(t *testing.T, cli client.Client) + }{ + { + name: "Successfully alias cluster", + clusterName: "test-cluster", + aliasName: "my-alias", + cli: fake.NewClientBuilder().WithScheme(scheme).WithObjects(clusterSecret.DeepCopy()).Build(), + postCheck: func(t *testing.T, cli client.Client) { + updatedSecret := &corev1.Secret{} + err := cli.Get(ctx, client.ObjectKey{Name: "test-cluster", Namespace: ClusterGatewaySecretNamespace}, updatedSecret) + require.NoError(t, err) + annotations := updatedSecret.GetAnnotations() + require.NotNil(t, annotations) + require.Equal(t, "my-alias", annotations[clusterv1alpha1.AnnotationClusterAlias]) + }, + }, + { + name: "Local cluster returns error", + clusterName: ClusterLocalName, + aliasName: "some-alias", + cli: fake.NewClientBuilder().WithScheme(scheme).Build(), + wantErr: ErrReservedLocalClusterName, + }, + { + name: "Cluster not found error", + clusterName: "non-existent-cluster", + aliasName: "my-alias", + cli: fake.NewClientBuilder().WithScheme(scheme).Build(), + wantErrMsg: "no such cluster", + }, + { + name: "GetVirtualCluster fails", + clusterName: "test-cluster", + aliasName: "my-alias", + cli: &mockClient{ + Client: fake.NewClientBuilder().WithScheme(scheme).Build(), + getErr: errors.New("get error"), + }, + wantErrMsg: "get error", + }, + { + name: "Client update fails", + clusterName: "test-cluster", + aliasName: "my-alias", + cli: &mockClient{ + Client: fake.NewClientBuilder().WithScheme(scheme).WithObjects(clusterSecret.DeepCopy()).Build(), + updateErr: errors.New("update failed"), + }, + wantErrMsg: "update failed", + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + err := AliasCluster(ctx, tc.cli, tc.clusterName, tc.aliasName) + + if tc.wantErr != nil { + require.Equal(t, tc.wantErr, err) + } else if tc.wantErrMsg != "" { + require.Error(t, err) + require.Contains(t, err.Error(), tc.wantErrMsg) + } else { + require.NoError(t, err) + } + + if tc.postCheck != nil { + tc.postCheck(t, tc.cli) + } + }) + } +} diff --git a/pkg/multicluster/utils_test.go b/pkg/multicluster/utils_test.go index 0d0e47596..f0ba9046e 100644 --- a/pkg/multicluster/utils_test.go +++ b/pkg/multicluster/utils_test.go @@ -22,12 +22,18 @@ import ( "github.com/oam-dev/cluster-gateway/pkg/apis/cluster/v1alpha1" clustercommon "github.com/oam-dev/cluster-gateway/pkg/common" - v1 "k8s.io/api/core/v1" - v12 "k8s.io/apimachinery/pkg/apis/meta/v1" + "github.com/pkg/errors" + "github.com/stretchr/testify/require" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + apiregistrationv1 "k8s.io/kube-aggregator/pkg/apis/apiregistration/v1" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/client/fake" - "github.com/oam-dev/kubevela/pkg/utils/common" + "github.com/oam-dev/kubevela/pkg/oam" + + velacommon "github.com/oam-dev/kubevela/pkg/utils/common" ) func TestUpgradeExistingClusterSecret(t *testing.T) { @@ -37,28 +43,262 @@ func TestUpgradeExistingClusterSecret(t *testing.T) { ClusterGatewaySecretNamespace = oldClusterGatewaySecretNamespace }() ctx := context.Background() - c := fake.NewClientBuilder().WithScheme(common.Scheme).Build() - secret := &v1.Secret{ - ObjectMeta: v12.ObjectMeta{ + c := fake.NewClientBuilder().WithScheme(velacommon.Scheme).Build() + secret := &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ Name: "example-outdated-cluster-secret", Namespace: "default", Labels: map[string]string{ "cluster.core.oam.dev/cluster-credential": "tls", }, }, - Type: v1.SecretTypeTLS, + Type: corev1.SecretTypeTLS, } - if err := c.Create(ctx, secret); err != nil { - t.Fatalf("failed to create fake outdated cluster secret, err: %v", err) + require.NoError(t, c.Create(ctx, secret)) + require.NoError(t, UpgradeExistingClusterSecret(ctx, c)) + newSecret := &corev1.Secret{} + require.NoError(t, c.Get(ctx, client.ObjectKeyFromObject(secret), newSecret)) + require.Equal(t, string(v1alpha1.CredentialTypeX509Certificate), newSecret.Labels[clustercommon.LabelKeyClusterCredentialType]) +} + +func TestContext(t *testing.T) { + t.Run("TestClusterNameInContext", func(t *testing.T) { + ctx := context.Background() + require.Equal(t, "", ClusterNameInContext(ctx)) + ctx = ContextWithClusterName(ctx, "my-cluster") + require.Equal(t, "my-cluster", ClusterNameInContext(ctx)) + }) + + t.Run("TestContextInLocalCluster", func(t *testing.T) { + ctx := context.Background() + ctx = ContextInLocalCluster(ctx) + require.Equal(t, ClusterLocalName, ClusterNameInContext(ctx)) + }) +} + +func TestResourcesWithClusterName(t *testing.T) { + testCases := []struct { + name string + clusterName string + objs []*unstructured.Unstructured + expected []*unstructured.Unstructured + }{ + { + name: "Empty slice", + clusterName: "my-cluster", + objs: []*unstructured.Unstructured{}, + expected: nil, + }, + { + name: "Nil object", + clusterName: "my-cluster", + objs: []*unstructured.Unstructured{nil}, + expected: nil, + }, + { + name: "Object without cluster name label", + clusterName: "my-cluster", + objs: []*unstructured.Unstructured{{Object: map[string]interface{}{}}}, + expected: []*unstructured.Unstructured{{ + Object: map[string]interface{}{ + "metadata": map[string]interface{}{ + "labels": map[string]interface{}{ + oam.LabelAppCluster: "my-cluster", + }, + }, + }, + }}, + }, + { + name: "Object with existing cluster name label", + clusterName: "my-cluster", + objs: []*unstructured.Unstructured{{ + Object: map[string]interface{}{ + "metadata": map[string]interface{}{ + "labels": map[string]interface{}{ + oam.LabelAppCluster: "other-cluster", + }, + }, + }, + }}, + expected: []*unstructured.Unstructured{{ + Object: map[string]interface{}{ + "metadata": map[string]interface{}{ + "labels": map[string]interface{}{ + oam.LabelAppCluster: "other-cluster", + }, + }, + }, + }}, + }, } - if err := UpgradeExistingClusterSecret(ctx, c); err != nil { - t.Fatalf("expect no error while upgrading outdated cluster secret but encounter error: %v", err) - } - newSecret := &v1.Secret{} - if err := c.Get(ctx, client.ObjectKeyFromObject(secret), newSecret); err != nil { - t.Fatalf("found error while getting updated cluster secret: %v", err) - } - if newSecret.Labels[clustercommon.LabelKeyClusterCredentialType] != string(v1alpha1.CredentialTypeX509Certificate) { - t.Fatalf("updated secret label should has credential type x509") + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + result := ResourcesWithClusterName(tc.clusterName, tc.objs...) + require.Equal(t, tc.expected, result) + }) + } +} + +func TestGetClusterGatewayService(t *testing.T) { + ctx := context.Background() + scheme := newTestScheme() + apiregistrationv1.AddToScheme(scheme) + + testCases := []struct { + name string + cli client.Client + expectErr bool + verify func(t *testing.T, svc *apiregistrationv1.ServiceReference) + }{ + { + name: "APIService not found", + cli: fake.NewClientBuilder().WithScheme(scheme).Build(), + expectErr: true, + }, + { + name: "APIService found but no service spec", + cli: fake.NewClientBuilder().WithScheme(scheme).WithObjects(&apiregistrationv1.APIService{ + ObjectMeta: metav1.ObjectMeta{Name: "v1alpha1.cluster.core.oam.dev"}, + }).Build(), + expectErr: true, + }, + { + name: "APIService found but not available", + cli: fake.NewClientBuilder().WithScheme(scheme).WithObjects(&apiregistrationv1.APIService{ + ObjectMeta: metav1.ObjectMeta{Name: "v1alpha1.cluster.core.oam.dev"}, + Spec: apiregistrationv1.APIServiceSpec{ + Service: &apiregistrationv1.ServiceReference{ + Name: "my-service", + Namespace: "my-namespace", + }, + }, + Status: apiregistrationv1.APIServiceStatus{ + Conditions: []apiregistrationv1.APIServiceCondition{ + { + Type: apiregistrationv1.Available, + Status: apiregistrationv1.ConditionFalse, + }, + }, + }, + }).Build(), + expectErr: true, + verify: func(t *testing.T, svc *apiregistrationv1.ServiceReference) { + require.NotNil(t, svc) + require.Equal(t, "my-service", svc.Name) + }, + }, + { + name: "APIService found and available", + cli: fake.NewClientBuilder().WithScheme(scheme).WithObjects(&apiregistrationv1.APIService{ + ObjectMeta: metav1.ObjectMeta{Name: "v1alpha1.cluster.core.oam.dev"}, + Spec: apiregistrationv1.APIServiceSpec{ + Service: &apiregistrationv1.ServiceReference{ + Name: "my-service", + Namespace: "my-namespace", + }, + }, + Status: apiregistrationv1.APIServiceStatus{ + Conditions: []apiregistrationv1.APIServiceCondition{ + { + Type: apiregistrationv1.Available, + Status: apiregistrationv1.ConditionTrue, + }, + }, + }, + }).Build(), + verify: func(t *testing.T, svc *apiregistrationv1.ServiceReference) { + require.NotNil(t, svc) + require.Equal(t, "my-service", svc.Name) + }, + }, + { + name: "Client Get error", + cli: &mockClient{ + Client: fake.NewClientBuilder().WithScheme(scheme).Build(), + getErr: errors.New("client error"), + }, + expectErr: true, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + svc, err := GetClusterGatewayService(ctx, tc.cli) + if tc.expectErr { + require.Error(t, err) + } else { + require.NoError(t, err) + } + if tc.verify != nil { + tc.verify(t, svc) + } + }) + } +} + +func TestListExistingClusterSecrets(t *testing.T) { + oldClusterGatewaySecretNamespace := ClusterGatewaySecretNamespace + defer func() { + ClusterGatewaySecretNamespace = oldClusterGatewaySecretNamespace + }() + ctx := context.Background() + scheme := newTestScheme() + ClusterGatewaySecretNamespace = "vela-system" + + testCases := []struct { + name string + cli client.Client + expectErr bool + verify func(t *testing.T, secrets []corev1.Secret) + }{ + { + name: "No secrets exist", + cli: fake.NewClientBuilder().WithScheme(scheme).Build(), + verify: func(t *testing.T, secrets []corev1.Secret) { + require.Empty(t, secrets) + }, + }, + { + name: "Secrets exist, but none have the required label", + cli: fake.NewClientBuilder().WithScheme(scheme).WithObjects(&corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "no-label", + Namespace: ClusterGatewaySecretNamespace, + }, + }).Build(), + verify: func(t *testing.T, secrets []corev1.Secret) { + require.Empty(t, secrets) + }, + }, + { + name: "Secrets exist with the required label", + cli: fake.NewClientBuilder().WithScheme(scheme).WithObjects(&corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "with-label", + Namespace: ClusterGatewaySecretNamespace, + Labels: map[string]string{clustercommon.LabelKeyClusterCredentialType: string(v1alpha1.CredentialTypeX509Certificate)}, + }, + }).Build(), + verify: func(t *testing.T, secrets []corev1.Secret) { + require.Len(t, secrets, 1) + require.Equal(t, "with-label", secrets[0].Name) + }, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + secrets, err := ListExistingClusterSecrets(ctx, tc.cli) + if tc.expectErr { + require.Error(t, err) + } else { + require.NoError(t, err) + } + if tc.verify != nil { + tc.verify(t, secrets) + } + }) } } diff --git a/pkg/multicluster/virtual_cluster_test.go b/pkg/multicluster/virtual_cluster_test.go index 7c936bdcd..128edb591 100644 --- a/pkg/multicluster/virtual_cluster_test.go +++ b/pkg/multicluster/virtual_cluster_test.go @@ -18,6 +18,7 @@ package multicluster import ( "context" + "encoding/json" . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" @@ -155,6 +156,109 @@ var _ = Describe("Test Virtual Cluster", func() { Expect(cv.Major).Should(BeEquivalentTo("1")) }) + It("Test virtual cluster helpers", func() { + By("Test FullName") + vcWithAlias := &VirtualCluster{Name: "test", Alias: "alias"} + Expect(vcWithAlias.FullName()).To(Equal("test (alias)")) + vcWithoutAlias := &VirtualCluster{Name: "test"} + Expect(vcWithoutAlias.FullName()).To(Equal("test")) + + By("Test get/set cluster alias") + secret := &v1.Secret{} + setClusterAlias(secret, "my-alias") + Expect(getClusterAlias(secret)).To(Equal("my-alias")) + annots := secret.GetAnnotations() + Expect(annots).ToNot(BeNil()) + Expect(annots[v1alpha1.AnnotationClusterAlias]).To(Equal("my-alias")) + + By("Test NewVirtualClusterFromLocal") + vc := NewVirtualClusterFromLocal() + Expect(vc.Name).To(Equal(ClusterLocalName)) + Expect(vc.Accepted).To(BeTrue()) + Expect(vc.EndPoint).To(Equal(types.ClusterBlankEndpoint)) + + By("Test MatchVirtualClusterLabels") + ClusterGatewaySecretNamespace = "vela-system" // as set in other test + labels := MatchVirtualClusterLabels{"key": "val"} + opts := &client.ListOptions{} + labels.ApplyToList(opts) + Expect(opts.Namespace).To(Equal(ClusterGatewaySecretNamespace)) + Expect(opts.LabelSelector).NotTo(BeNil()) + Expect(opts.LabelSelector.String()).To(ContainSubstring("key=val")) + Expect(opts.LabelSelector.String()).To(ContainSubstring(clustercommon.LabelKeyClusterCredentialType)) + + delOpts := &client.DeleteAllOfOptions{} + labels.ApplyToDeleteAllOf(delOpts) + Expect(delOpts.ListOptions.Namespace).To(Equal(ClusterGatewaySecretNamespace)) + Expect(delOpts.ListOptions.LabelSelector).NotTo(BeNil()) + Expect(delOpts.ListOptions.LabelSelector.String()).To(ContainSubstring("key=val")) + + By("Test get/set cluster version") + versionedSecret := &v1.Secret{} + cv := types.ClusterVersion{Major: "1", Minor: "20", GitVersion: "v1.20.0"} + setClusterVersion(versionedSecret, cv) + + newCV, err := getClusterVersionFromObject(versionedSecret) + Expect(err).To(Succeed()) + Expect(newCV).To(Equal(cv)) + + versionedSecret.Annotations = nil + _, err = getClusterVersionFromObject(versionedSecret) + Expect(err).ToNot(Succeed()) + + secretWithEmptyAnnotation := &v1.Secret{ObjectMeta: metav1.ObjectMeta{Annotations: map[string]string{}}} + _, err = getClusterVersionFromObject(secretWithEmptyAnnotation) + Expect(err).ToNot(Succeed()) + }) + + It("Test GetVersionInfoFromObject", func() { + ClusterGatewaySecretNamespace = "vela-system3" + ctx := context.Background() + ns := &v1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: ClusterGatewaySecretNamespace}} + Expect(k8sClient.Create(ctx, ns)).Should(Succeed()) + defer func() { + Expect(k8sClient.Delete(ctx, ns)).Should(Succeed()) + }() + + By("Setup a secret with version info") + cv := types.ClusterVersion{Major: "1", Minor: "21", GitVersion: "v1.21.0"} + cvJSON, err := json.Marshal(cv) + Expect(err).To(Succeed()) + secret := &v1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "cluster-with-version", + Namespace: ClusterGatewaySecretNamespace, + Labels: map[string]string{clustercommon.LabelKeyClusterCredentialType: "X509"}, + Annotations: map[string]string{ + types.AnnotationClusterVersion: string(cvJSON), + }, + }, + } + Expect(k8sClient.Create(ctx, secret)).Should(Succeed()) + + By("Test getting version from the secret") + retrievedCV := GetVersionInfoFromObject(ctx, k8sClient, "cluster-with-version") + Expect(retrievedCV).To(Equal(cv)) + + By("Test with a cluster that doesn't exist, should fallback to control plane version") + originalCPVersion := types.ControlPlaneClusterVersion + types.ControlPlaneClusterVersion = types.ClusterVersion{GitVersion: "v1.22.0"} + defer func() { types.ControlPlaneClusterVersion = originalCPVersion }() + retrievedCV = GetVersionInfoFromObject(ctx, k8sClient, "non-existent-cluster") + Expect(retrievedCV).To(Equal(types.ControlPlaneClusterVersion)) + + By("Test with a secret without version info, should fallback") + secretNoVersion := &v1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "cluster-no-version", + Namespace: ClusterGatewaySecretNamespace, + Labels: map[string]string{clustercommon.LabelKeyClusterCredentialType: "X509"}, + }, + } + Expect(k8sClient.Create(ctx, secretNoVersion)).Should(Succeed()) + retrievedCV = GetVersionInfoFromObject(ctx, k8sClient, "cluster-no-version") + Expect(retrievedCV).To(Equal(types.ControlPlaneClusterVersion)) + }) }) type fakeClient struct { diff --git a/pkg/workflow/providers/multicluster/multicluster_test.go b/pkg/workflow/providers/multicluster/multicluster_test.go index 47b622f96..bc1ab059d 100644 --- a/pkg/workflow/providers/multicluster/multicluster_test.go +++ b/pkg/workflow/providers/multicluster/multicluster_test.go @@ -20,30 +20,63 @@ import ( "context" "testing" + "cuelang.org/go/cue" "github.com/stretchr/testify/require" corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime" + "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/client/fake" clusterv1alpha1 "github.com/oam-dev/cluster-gateway/pkg/apis/cluster/v1alpha1" clustercommon "github.com/oam-dev/cluster-gateway/pkg/common" + "github.com/oam-dev/kubevela/apis/core.oam.dev/common" + "github.com/oam-dev/kubevela/apis/core.oam.dev/v1alpha1" + "github.com/oam-dev/kubevela/apis/core.oam.dev/v1beta1" "github.com/oam-dev/kubevela/apis/types" + "github.com/oam-dev/kubevela/pkg/appfile" "github.com/oam-dev/kubevela/pkg/multicluster" - "github.com/oam-dev/kubevela/pkg/utils/common" + commontypes "github.com/oam-dev/kubevela/pkg/utils/common" oamprovidertypes "github.com/oam-dev/kubevela/pkg/workflow/providers/types" + + wfmock "github.com/kubevela/workflow/pkg/mock" ) +// mockAction is a mock implementation of types.Action for testing. +type mockAction struct { + wfmock.Action + WaitCalled bool + WaitReason string +} + +// Wait records that the wait action was called. +func (a *mockAction) Wait(reason string) { + a.WaitCalled = true + a.WaitReason = reason +} + func TestListClusters(t *testing.T) { - multicluster.ClusterGatewaySecretNamespace = types.DefaultKubeVelaNS r := require.New(t) + originalNS := multicluster.ClusterGatewaySecretNamespace + multicluster.ClusterGatewaySecretNamespace = types.DefaultKubeVelaNS + t.Cleanup(func() { + multicluster.ClusterGatewaySecretNamespace = originalNS + }) ctx := context.Background() - cli := fake.NewClientBuilder().WithScheme(common.Scheme).Build() + cli := fake.NewClientBuilder().WithScheme(commontypes.Scheme).Build() clusterNames := []string{"cluster-a", "cluster-b"} for _, secretName := range clusterNames { - secret := &corev1.Secret{} - secret.Name = secretName - secret.Namespace = multicluster.ClusterGatewaySecretNamespace - secret.Labels = map[string]string{clustercommon.LabelKeyClusterCredentialType: string(clusterv1alpha1.CredentialTypeX509Certificate)} + secret := &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: secretName, + Namespace: multicluster.ClusterGatewaySecretNamespace, + Labels: map[string]string{ + clustercommon.LabelKeyClusterCredentialType: string(clusterv1alpha1.CredentialTypeX509Certificate), + }, + }, + } r.NoError(cli.Create(context.Background(), secret)) } res, err := ListClusters(ctx, &oamprovidertypes.Params[any]{ @@ -54,3 +87,195 @@ func TestListClusters(t *testing.T) { r.NoError(err) r.Equal(clusterNames, res.Returns.Outputs.Clusters) } + +func TestDeploy(t *testing.T) { + r := require.New(t) + ctx := context.Background() + cli := fake.NewClientBuilder().WithScheme(commontypes.Scheme).Build() + + // Mock component functions + componentApply := func(ctx context.Context, comp common.ApplicationComponent, patcher *cue.Value, clusterName string, overrideNamespace string) (*unstructured.Unstructured, []*unstructured.Unstructured, bool, error) { + return nil, nil, true, nil + } + componentHealthCheck := func(ctx context.Context, comp common.ApplicationComponent, patcher *cue.Value, clusterName string, overrideNamespace string) (bool, *common.ApplicationComponentStatus, *unstructured.Unstructured, []*unstructured.Unstructured, error) { + return true, nil, nil, nil, nil + } + workloadRender := func(ctx context.Context, comp common.ApplicationComponent) (*appfile.Component, error) { + return &appfile.Component{}, nil + } + + createMockParams := func(parallelism int64) *DeployParams { + action := &mockAction{} + return &DeployParams{ + Params: DeployParameter{ + Parallelism: parallelism, + IgnoreTerraformComponent: true, + Policies: []string{}, + }, + RuntimeParams: oamprovidertypes.RuntimeParams{ + Action: action, + KubeClient: cli, + ComponentApply: componentApply, + ComponentHealthCheck: componentHealthCheck, + WorkloadRender: workloadRender, + Appfile: &appfile.Appfile{ + Name: "test-app", + Namespace: "default", + Policies: []v1beta1.AppPolicy{}, + }, + }, + } + } + + cases := map[string]struct { + reason string + params *DeployParams + expectError bool + errorContains string + expectPanic bool + }{ + "parallelism zero validation error": { + reason: "Should return a validation error for zero parallelism", + params: createMockParams(0), + expectError: true, + errorContains: "parallelism cannot be smaller than 1", + }, + "parallelism negative validation error": { + reason: "Should return a validation error for negative parallelism", + params: createMockParams(-1), + expectError: true, + errorContains: "parallelism cannot be smaller than 1", + }, + "parameters nil pointer handling": { + reason: "Should panic when params are nil", + params: nil, + expectPanic: true, + }, + "successful deployment healthy": { + reason: "Should execute successfully with valid parameters", + params: createMockParams(1), + }, + "successful deployment unhealthy wait": { + reason: "Should execute successfully even with higher parallelism", + params: createMockParams(2), + }, + "executor deploy error propagation": { + reason: "Should pass validation and any errors should be from the executor", + params: createMockParams(1), + }, + } + + for name, tc := range cases { + t.Run(name, func(t *testing.T) { + if tc.expectPanic { + r.Panics(func() { + _, _ = Deploy(ctx, tc.params) + }) + return + } + + result, err := Deploy(ctx, tc.params) + + if tc.expectError { + r.Error(err) + if tc.errorContains != "" { + r.Contains(err.Error(), tc.errorContains) + } + } else { + r.NoError(err) + r.Nil(result) + } + }) + } +} + +func TestGetPlacementsFromTopologyPolicies(t *testing.T) { + r := require.New(t) + ctx := context.Background() + scheme := commontypes.Scheme + r.NoError(v1alpha1.AddToScheme(scheme)) + + topologyPolicy := &v1alpha1.Policy{ + ObjectMeta: metav1.ObjectMeta{Name: "my-topology", Namespace: "default"}, + Type: v1alpha1.TopologyPolicyType, + Properties: &runtime.RawExtension{ + Raw: []byte(`{"clusters":["local"],"namespace":"topo-ns"}`), + }, + } + + appFileTopologyPolicy := v1beta1.AppPolicy{ + Name: "my-topology", + Type: v1alpha1.TopologyPolicyType, + Properties: &runtime.RawExtension{ + Raw: []byte(`{"clusters":["local"],"namespace":"topo-ns"}`), + }, + } + + cases := map[string]struct { + reason string + policiesInAppfile []v1beta1.AppPolicy + policiesToGet []string + objectsToCreate []client.Object + expectedPlacements []v1alpha1.PlacementDecision + expectError bool + errorContains string + }{ + "Successful placement resolution with single policy": { + reason: "Should resolve placement from a single topology policy", + objectsToCreate: []client.Object{topologyPolicy}, + policiesInAppfile: []v1beta1.AppPolicy{appFileTopologyPolicy}, + policiesToGet: []string{"my-topology"}, + expectedPlacements: []v1alpha1.PlacementDecision{{Cluster: "local", Namespace: "topo-ns"}}, + }, + "Policy not found in appfile": { + reason: "Should return an error if the policy is not found in the appfile", + policiesToGet: []string{"non-existent-policy"}, + expectError: true, + errorContains: "policy non-existent-policy not found", + }, + "Empty policy list returns default local placement": { + reason: "Should return default local placement when no policies are specified", + policiesToGet: []string{}, + expectedPlacements: []v1alpha1.PlacementDecision{{Cluster: "local"}}, + }, + "Nil policy names list returns default local placement": { + reason: "Should return default local placement when the policy list is nil", + policiesToGet: nil, + expectedPlacements: []v1alpha1.PlacementDecision{{Cluster: "local"}}, + }, + "Empty appfile policies list with a policy name": { + reason: "Should return an error if appfile has no policies but a policy is requested", + policiesToGet: []string{"some-policy"}, + expectError: true, + errorContains: "policy some-policy not found", + }, + } + + for name, tc := range cases { + t.Run(name, func(t *testing.T) { + cli := fake.NewClientBuilder().WithScheme(scheme).WithObjects(tc.objectsToCreate...).Build() + af := &appfile.Appfile{ + Name: "test-app", + Namespace: "default", + Policies: tc.policiesInAppfile, + } + params := &PoliciesParams{ + Params: PoliciesVars{Policies: tc.policiesToGet}, + RuntimeParams: oamprovidertypes.RuntimeParams{KubeClient: cli, Appfile: af}, + } + + result, err := GetPlacementsFromTopologyPolicies(ctx, params) + + if tc.expectError { + r.Error(err) + if tc.errorContains != "" { + r.Contains(err.Error(), tc.errorContains) + } + } else { + r.NoError(err) + r.NotNil(result) + r.Equal(tc.expectedPlacements, result.Returns.Placements) + } + }) + } +}