Ignore already existing error when creating cluster (#1142)
Some checks failed
Scorecard supply-chain security / Scorecard analysis (push) Failing after 43s
Post / coverage (push) Failing after 47s
Post / images (amd64, addon-manager) (push) Failing after 33s
Post / images (amd64, placement) (push) Failing after 39s
Post / images (amd64, registration) (push) Failing after 32s
Post / images (amd64, registration-operator) (push) Failing after 37s
Post / images (amd64, work) (push) Failing after 39s
Post / images (arm64, addon-manager) (push) Failing after 42s
Post / images (arm64, placement) (push) Failing after 42s
Post / images (arm64, registration) (push) Failing after 36s
Post / images (arm64, registration-operator) (push) Failing after 34s
Post / images (arm64, work) (push) Failing after 27s
Post / image manifest (addon-manager) (push) Has been skipped
Post / image manifest (placement) (push) Has been skipped
Post / image manifest (registration) (push) Has been skipped
Post / image manifest (registration-operator) (push) Has been skipped
Post / image manifest (work) (push) Has been skipped
Post / trigger clusteradm e2e (push) Has been skipped

In integration test, there is change that creating cluster fails
since the cluster is created in the test. The alreadyExist
error should be ignored

Signed-off-by: Jian Qiu <jqiu@redhat.com>
This commit is contained in:
Jian Qiu
2025-08-28 11:33:42 +08:00
committed by GitHub
parent d7c82f4d4a
commit c5f6e30ab8
4 changed files with 36 additions and 9 deletions

View File

@@ -52,10 +52,11 @@ func NewManagedClusterCreatingController(
}
func (c *managedClusterCreatingController) sync(ctx context.Context, syncCtx factory.SyncContext) error {
logger := klog.FromContext(ctx)
existingCluster, err := c.hubClusterClient.ClusterV1().ManagedClusters().Get(ctx, c.clusterName, metav1.GetOptions{})
// ManagedCluster is only allowed created during bootstrap. After bootstrap secret expired, an unauthorized error will be got, output log at the debug level
if err != nil && skipUnauthorizedError(err) == nil {
klog.V(4).Infof("unable to get the managed cluster %q from hub: %v", c.clusterName, err)
logger.V(4).Info("unable to get the managed cluster from hub", "clusterName", c.clusterName, "err", err)
return nil
}
@@ -76,8 +77,16 @@ func (c *managedClusterCreatingController) sync(ctx context.Context, syncCtx fac
}
_, err = c.hubClusterClient.ClusterV1().ManagedClusters().Create(ctx, managedCluster, metav1.CreateOptions{})
// ManagedCluster is only allowed created during bootstrap. After bootstrap secret expired, an unauthorized error will be got, skip it
if skipUnauthorizedError(err) != nil {
if errors.IsAlreadyExists(err) {
logger.V(4).Info("managed cluster already exists", "clusterName", c.clusterName)
return nil
}
if err != nil {
// Unauthorized/Forbidden after bootstrap: skip without emitting a create event.
if skipUnauthorizedError(err) == nil {
return nil
}
return fmt.Errorf("unable to create managed cluster with name %q on hub: %w", c.clusterName, err)
}
syncCtx.Recorder().Eventf("ManagedClusterCreated", "Managed cluster %q created on hub", c.clusterName)

View File

@@ -68,7 +68,7 @@ var _ = ginkgo.Describe("Addon Health Check", ginkgo.Label("addon-lease"), func(
return err
}
if !meta.IsStatusConditionTrue(found.Status.Conditions, "Available") {
return fmt.Errorf("condition should be available")
return fmt.Errorf("condition should be available, got %v", found.Status.Conditions)
}
return nil
}).Should(gomega.Succeed())

View File

@@ -130,7 +130,7 @@ var _ = ginkgo.Describe("Enable addon management feature gate", ginkgo.Ordered,
gomega.Expect(err).ToNot(gomega.HaveOccurred())
ginkgo.By(fmt.Sprintf("create the addon %v on the managed cluster namespace %v", addOnName, universalClusterName))
err = hub.CreateManagedClusterAddOn(universalClusterName, addOnName, "test-ns") // the install namespace will be ignored
err = hub.CreateManagedClusterAddOn(universalClusterName, addOnName, addonInstallNamespace)
if err != nil {
klog.Errorf("failed to create managed cluster addon %v on the managed cluster namespace %v: %v", addOnName, universalClusterName, err)
gomega.Expect(errors.IsAlreadyExists(err)).To(gomega.BeTrue())

View File

@@ -9,6 +9,7 @@ import (
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/meta"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/util/retry"
addonv1alpha1 "open-cluster-management.io/api/addon/v1alpha1"
)
@@ -21,13 +22,30 @@ func (hub *Hub) CreateManagedClusterAddOn(managedClusterNamespace, addOnName, in
Namespace: managedClusterNamespace,
Name: addOnName,
},
Spec: addonv1alpha1.ManagedClusterAddOnSpec{
InstallNamespace: installNamespace,
},
Spec: addonv1alpha1.ManagedClusterAddOnSpec{},
},
metav1.CreateOptions{},
)
return err
if err != nil {
return err
}
return retry.RetryOnConflict(retry.DefaultBackoff, func() error {
addOn, err := hub.AddonClient.AddonV1alpha1().ManagedClusterAddOns(managedClusterNamespace).Get(
context.TODO(), addOnName, metav1.GetOptions{})
if err != nil {
return err
}
if addOn.Status.Namespace == installNamespace {
return nil
}
addOn.Status.Namespace = installNamespace
_, err = hub.AddonClient.AddonV1alpha1().ManagedClusterAddOns(managedClusterNamespace).UpdateStatus(
context.TODO(), addOn, metav1.UpdateOptions{})
return err
})
}
func (hub *Hub) CreateManagedClusterAddOnLease(addOnInstallNamespace, addOnName string) error {