mirror of
https://github.com/open-cluster-management-io/ocm.git
synced 2026-05-08 18:27:21 +00:00
Merge pull request #99 from elgnay/max_custom_cluster_claims
Replace flag cluster-claims-max with max-custom-cluster-claims
This commit is contained in:
@@ -26,27 +26,27 @@ import (
|
||||
|
||||
// managedClusterClaimController exposes cluster claims created on managed cluster on hub after it joins the hub.
|
||||
type managedClusterClaimController struct {
|
||||
clusterName string
|
||||
hubClusterClient clientset.Interface
|
||||
hubClusterLister clusterv1listers.ManagedClusterLister
|
||||
claimLister clusterv1alpha1listers.ClusterClaimLister
|
||||
maxClusterClaims int
|
||||
clusterName string
|
||||
hubClusterClient clientset.Interface
|
||||
hubClusterLister clusterv1listers.ManagedClusterLister
|
||||
claimLister clusterv1alpha1listers.ClusterClaimLister
|
||||
maxCustomClusterClaims int
|
||||
}
|
||||
|
||||
// NewManagedClusterClaimController creates a new managed cluster claim controller on the managed cluster.
|
||||
func NewManagedClusterClaimController(
|
||||
clusterName string,
|
||||
maxClusterClaims int,
|
||||
maxCustomClusterClaims int,
|
||||
hubClusterClient clientset.Interface,
|
||||
hubManagedClusterInformer clusterv1informer.ManagedClusterInformer,
|
||||
claimInformer clusterv1alpha1informer.ClusterClaimInformer,
|
||||
recorder events.Recorder) factory.Controller {
|
||||
c := &managedClusterClaimController{
|
||||
clusterName: clusterName,
|
||||
maxClusterClaims: maxClusterClaims,
|
||||
hubClusterClient: hubClusterClient,
|
||||
hubClusterLister: hubManagedClusterInformer.Lister(),
|
||||
claimLister: claimInformer.Lister(),
|
||||
clusterName: clusterName,
|
||||
maxCustomClusterClaims: maxCustomClusterClaims,
|
||||
hubClusterClient: hubClusterClient,
|
||||
hubClusterLister: hubManagedClusterInformer.Lister(),
|
||||
claimLister: claimInformer.Lister(),
|
||||
}
|
||||
|
||||
return factory.New().
|
||||
@@ -56,7 +56,7 @@ func NewManagedClusterClaimController(
|
||||
return accessor.GetName()
|
||||
}, hubManagedClusterInformer.Informer()).
|
||||
WithSync(c.sync).
|
||||
ToController("ManagedClusterClaimController", recorder)
|
||||
ToController("ClusterClaimController", recorder)
|
||||
}
|
||||
|
||||
// sync maintains the cluster claims in status of the managed cluster on hub once it joins the hub.
|
||||
@@ -81,7 +81,7 @@ func (c managedClusterClaimController) sync(ctx context.Context, syncCtx factory
|
||||
func (c managedClusterClaimController) exposeClaims(ctx context.Context, syncCtx factory.SyncContext,
|
||||
managedCluster *clusterv1.ManagedCluster) error {
|
||||
reservedClaims := []clusterv1.ManagedClusterClaim{}
|
||||
customizedClaims := []clusterv1.ManagedClusterClaim{}
|
||||
customClaims := []clusterv1.ManagedClusterClaim{}
|
||||
clusterClaims, err := c.claimLister.List(labels.Everything())
|
||||
if err != nil {
|
||||
return fmt.Errorf("unable to list cluster claims: %w", err)
|
||||
@@ -97,7 +97,7 @@ func (c managedClusterClaimController) exposeClaims(ctx context.Context, syncCtx
|
||||
reservedClaims = append(reservedClaims, managedClusterClaim)
|
||||
continue
|
||||
}
|
||||
customizedClaims = append(customizedClaims, managedClusterClaim)
|
||||
customClaims = append(customClaims, managedClusterClaim)
|
||||
}
|
||||
|
||||
// sort claims by name
|
||||
@@ -105,18 +105,20 @@ func (c managedClusterClaimController) exposeClaims(ctx context.Context, syncCtx
|
||||
return reservedClaims[i].Name < reservedClaims[j].Name
|
||||
})
|
||||
|
||||
sort.SliceStable(customizedClaims, func(i, j int) bool {
|
||||
return customizedClaims[i].Name < customizedClaims[j].Name
|
||||
sort.SliceStable(customClaims, func(i, j int) bool {
|
||||
return customClaims[i].Name < customClaims[j].Name
|
||||
})
|
||||
|
||||
// merge and truncated claims
|
||||
claims := append(reservedClaims, customizedClaims...)
|
||||
if total := len(claims); total > c.maxClusterClaims {
|
||||
claims = claims[:c.maxClusterClaims]
|
||||
syncCtx.Recorder().Eventf("ExposedClusterClaimsTruncated", "%d cluster claims are found. It exceeds the max cluster claims number (%d). %d cluster claims are not exposed.",
|
||||
total, c.maxClusterClaims, total-c.maxClusterClaims)
|
||||
// truncate custom claims if the number exceeds `max-custom-cluster-claims`
|
||||
if n := len(customClaims); n > c.maxCustomClusterClaims {
|
||||
customClaims = customClaims[:c.maxCustomClusterClaims]
|
||||
syncCtx.Recorder().Eventf("CustomClusterClaimsTruncated", "%d cluster claims are found. It exceeds the max number of custom cluster claims (%d). %d custom cluster claims are not exposed.",
|
||||
n, c.maxCustomClusterClaims, n-c.maxCustomClusterClaims)
|
||||
}
|
||||
|
||||
// merge reserved claims and custom claims
|
||||
claims := append(reservedClaims, customClaims...)
|
||||
|
||||
// update the status of the managed cluster
|
||||
updateStatusFuncs := []helpers.UpdateManagedClusterStatusFunc{updateClusterClaimsFn(clusterv1.ManagedClusterStatus{
|
||||
ClusterClaims: claims,
|
||||
|
||||
@@ -83,11 +83,11 @@ func TestSync(t *testing.T) {
|
||||
}
|
||||
|
||||
ctrl := managedClusterClaimController{
|
||||
clusterName: testinghelpers.TestManagedClusterName,
|
||||
maxClusterClaims: 20,
|
||||
hubClusterClient: clusterClient,
|
||||
hubClusterLister: clusterInformerFactory.Cluster().V1().ManagedClusters().Lister(),
|
||||
claimLister: clusterInformerFactory.Cluster().V1alpha1().ClusterClaims().Lister(),
|
||||
clusterName: testinghelpers.TestManagedClusterName,
|
||||
maxCustomClusterClaims: 20,
|
||||
hubClusterClient: clusterClient,
|
||||
hubClusterLister: clusterInformerFactory.Cluster().V1().ManagedClusters().Lister(),
|
||||
claimLister: clusterInformerFactory.Cluster().V1alpha1().ClusterClaims().Lister(),
|
||||
}
|
||||
|
||||
syncErr := ctrl.sync(context.TODO(), testinghelpers.NewFakeSyncContext(t, ""))
|
||||
@@ -100,12 +100,12 @@ func TestSync(t *testing.T) {
|
||||
|
||||
func TestExposeClaims(t *testing.T) {
|
||||
cases := []struct {
|
||||
name string
|
||||
cluster *clusterv1.ManagedCluster
|
||||
claims []*clusterv1alpha1.ClusterClaim
|
||||
maxClusterClaims int
|
||||
validateActions func(t *testing.T, actions []clienttesting.Action)
|
||||
expectedErr string
|
||||
name string
|
||||
cluster *clusterv1.ManagedCluster
|
||||
claims []*clusterv1alpha1.ClusterClaim
|
||||
maxCustomClusterClaims int
|
||||
validateActions func(t *testing.T, actions []clienttesting.Action)
|
||||
expectedErr string
|
||||
}{
|
||||
{
|
||||
name: "sync claims into status of the managed cluster",
|
||||
@@ -136,7 +136,7 @@ func TestExposeClaims(t *testing.T) {
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "truncate cluster claims",
|
||||
name: "truncate custom cluster claims",
|
||||
cluster: testinghelpers.NewJoinedManagedCluster(),
|
||||
claims: []*clusterv1alpha1.ClusterClaim{
|
||||
{
|
||||
@@ -172,7 +172,7 @@ func TestExposeClaims(t *testing.T) {
|
||||
},
|
||||
},
|
||||
},
|
||||
maxClusterClaims: 3,
|
||||
maxCustomClusterClaims: 2,
|
||||
validateActions: func(t *testing.T, actions []clienttesting.Action) {
|
||||
testinghelpers.AssertActions(t, actions, "get", "update")
|
||||
cluster := actions[1].(clienttesting.UpdateActionImpl).Object
|
||||
@@ -232,16 +232,16 @@ func TestExposeClaims(t *testing.T) {
|
||||
clusterInformerFactory.Cluster().V1alpha1().ClusterClaims().Informer().GetStore().Add(claim)
|
||||
}
|
||||
|
||||
if c.maxClusterClaims == 0 {
|
||||
c.maxClusterClaims = 20
|
||||
if c.maxCustomClusterClaims == 0 {
|
||||
c.maxCustomClusterClaims = 20
|
||||
}
|
||||
|
||||
ctrl := managedClusterClaimController{
|
||||
clusterName: testinghelpers.TestManagedClusterName,
|
||||
maxClusterClaims: c.maxClusterClaims,
|
||||
hubClusterClient: clusterClient,
|
||||
hubClusterLister: clusterInformerFactory.Cluster().V1().ManagedClusters().Lister(),
|
||||
claimLister: clusterInformerFactory.Cluster().V1alpha1().ClusterClaims().Lister(),
|
||||
clusterName: testinghelpers.TestManagedClusterName,
|
||||
maxCustomClusterClaims: c.maxCustomClusterClaims,
|
||||
hubClusterClient: clusterClient,
|
||||
hubClusterLister: clusterInformerFactory.Cluster().V1().ManagedClusters().Lister(),
|
||||
claimLister: clusterInformerFactory.Cluster().V1alpha1().ClusterClaims().Lister(),
|
||||
}
|
||||
|
||||
syncErr := ctrl.exposeClaims(context.TODO(), testinghelpers.NewFakeSyncContext(t, c.cluster.Name), c.cluster)
|
||||
|
||||
@@ -49,7 +49,7 @@ type SpokeAgentOptions struct {
|
||||
HubKubeconfigDir string
|
||||
SpokeExternalServerURLs []string
|
||||
ClusterHealthCheckPeriod time.Duration
|
||||
MaxClusterClaims int
|
||||
MaxCustomClusterClaims int
|
||||
}
|
||||
|
||||
// NewSpokeAgentOptions returns a SpokeAgentOptions
|
||||
@@ -58,7 +58,7 @@ func NewSpokeAgentOptions() *SpokeAgentOptions {
|
||||
HubKubeconfigSecret: "hub-kubeconfig-secret",
|
||||
HubKubeconfigDir: "/spoke/hub-kubeconfig",
|
||||
ClusterHealthCheckPeriod: 1 * time.Minute,
|
||||
MaxClusterClaims: 20,
|
||||
MaxCustomClusterClaims: 20,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -260,7 +260,7 @@ func (o *SpokeAgentOptions) RunSpokeAgent(ctx context.Context, controllerContext
|
||||
// create managedClusterClaimController to sync cluster claims
|
||||
managedClusterClaimController := managedcluster.NewManagedClusterClaimController(
|
||||
o.ClusterName,
|
||||
o.MaxClusterClaims,
|
||||
o.MaxCustomClusterClaims,
|
||||
hubClusterClient,
|
||||
hubClusterInformerFactory.Cluster().V1().ManagedClusters(),
|
||||
spokeClusterInformers.Cluster().V1alpha1().ClusterClaims(),
|
||||
@@ -296,8 +296,8 @@ func (o *SpokeAgentOptions) AddFlags(fs *pflag.FlagSet) {
|
||||
"A list of reachable spoke cluster api server URLs for hub cluster.")
|
||||
fs.DurationVar(&o.ClusterHealthCheckPeriod, "cluster-healthcheck-period", o.ClusterHealthCheckPeriod,
|
||||
"The period to check managed cluster kube-apiserver health")
|
||||
fs.IntVar(&o.MaxClusterClaims, "cluster-claims-max", o.MaxClusterClaims,
|
||||
"The max number of cluster claims to expose.")
|
||||
fs.IntVar(&o.MaxCustomClusterClaims, "max-custom-cluster-claims", o.MaxCustomClusterClaims,
|
||||
"The max number of custom cluster claims to expose.")
|
||||
}
|
||||
|
||||
// Validate verifies the inputs.
|
||||
|
||||
@@ -24,7 +24,7 @@ import (
|
||||
var _ = ginkgo.Describe("Cluster Claim", func() {
|
||||
var managedClusterName, hubKubeconfigSecret, hubKubeconfigDir string
|
||||
var claims []*clusterv1alpha1.ClusterClaim
|
||||
var maxClusterClaims int
|
||||
var maxCustomClusterClaims int
|
||||
var err error
|
||||
|
||||
ginkgo.JustBeforeEach(func() {
|
||||
@@ -55,7 +55,7 @@ var _ = ginkgo.Describe("Cluster Claim", func() {
|
||||
HubKubeconfigSecret: hubKubeconfigSecret,
|
||||
HubKubeconfigDir: hubKubeconfigDir,
|
||||
ClusterHealthCheckPeriod: 1 * time.Minute,
|
||||
MaxClusterClaims: maxClusterClaims,
|
||||
MaxCustomClusterClaims: maxCustomClusterClaims,
|
||||
}
|
||||
err := agentOptions.RunSpokeAgent(context.Background(), &controllercmd.ControllerContext{
|
||||
KubeConfig: spokeCfg,
|
||||
@@ -145,7 +145,7 @@ var _ = ginkgo.Describe("Cluster Claim", func() {
|
||||
|
||||
ginkgo.Context("Sync all claims", func() {
|
||||
ginkgo.BeforeEach(func() {
|
||||
maxClusterClaims = 20
|
||||
maxCustomClusterClaims = 20
|
||||
claims = []*clusterv1alpha1.ClusterClaim{
|
||||
{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
@@ -248,7 +248,7 @@ var _ = ginkgo.Describe("Cluster Claim", func() {
|
||||
|
||||
ginkgo.Context("Truncate exposed claims", func() {
|
||||
ginkgo.BeforeEach(func() {
|
||||
maxClusterClaims = 5
|
||||
maxCustomClusterClaims = 5
|
||||
claims = []*clusterv1alpha1.ClusterClaim{}
|
||||
for i := 0; i < 10; i++ {
|
||||
claims = append(claims, &clusterv1alpha1.ClusterClaim{
|
||||
@@ -272,7 +272,7 @@ var _ = ginkgo.Describe("Cluster Claim", func() {
|
||||
return false
|
||||
}
|
||||
|
||||
return len(spokeCluster.Status.ClusterClaims) == maxClusterClaims
|
||||
return len(spokeCluster.Status.ClusterClaims) == maxCustomClusterClaims
|
||||
}, eventuallyTimeout, eventuallyInterval).Should(gomega.BeTrue())
|
||||
})
|
||||
})
|
||||
|
||||
Reference in New Issue
Block a user