mirror of
https://github.com/open-cluster-management-io/ocm.git
synced 2026-02-14 18:09:57 +00:00
set configured condition in mca (#635)
Signed-off-by: haoqing0110 <qhao@redhat.com>
This commit is contained in:
@@ -59,7 +59,7 @@ metadata:
|
||||
categories: Integration & Delivery,OpenShift Optional
|
||||
certified: "false"
|
||||
containerImage: quay.io/open-cluster-management/registration-operator:latest
|
||||
createdAt: "2024-07-14T00:10:06Z"
|
||||
createdAt: "2024-10-08T08:24:18Z"
|
||||
description: Manages the installation and upgrade of the ClusterManager.
|
||||
operators.operatorframework.io/builder: operator-sdk-v1.32.0
|
||||
operators.operatorframework.io/project_layout: go.kubebuilder.io/v3
|
||||
|
||||
@@ -289,6 +289,39 @@ spec:
|
||||
If it is set empty, use the default value: 50
|
||||
format: int32
|
||||
type: integer
|
||||
registrationDriver:
|
||||
description: This provides driver details required to register
|
||||
with hub
|
||||
properties:
|
||||
authType:
|
||||
default: csr
|
||||
description: Type of the authentication used by managedcluster
|
||||
to register as well as pull work from hub. Possible values
|
||||
are csr and awsirsa.
|
||||
enum:
|
||||
- csr
|
||||
- awsirsa
|
||||
type: string
|
||||
awsIrsa:
|
||||
description: |-
|
||||
Contain the details required for registering with hub cluster (ie: an EKS cluster) using AWS IAM roles for service account.
|
||||
This is required only when the authType is awsirsa.
|
||||
properties:
|
||||
hubClusterArn:
|
||||
description: |-
|
||||
The arn of the hub cluster (ie: an EKS cluster). This will be required to pass information to hub, which hub will use to create IAM identities for this klusterlet.
|
||||
Example - arn:eks:us-west-2:12345678910:cluster/hub-cluster1.
|
||||
minLength: 1
|
||||
type: string
|
||||
managedClusterArn:
|
||||
description: |-
|
||||
The arn of the managed cluster (ie: an EKS cluster). This will be required to generate the md5hash which will be used as a suffix to create IAM role on hub
|
||||
as well as used by kluslerlet-agent, to assume role suffixed with the md5hash, on startup.
|
||||
Example - arn:eks:us-west-2:12345678910:cluster/managed-cluster1.
|
||||
minLength: 1
|
||||
type: string
|
||||
type: object
|
||||
type: object
|
||||
type: object
|
||||
registrationImagePullSpec:
|
||||
description: |-
|
||||
|
||||
@@ -289,6 +289,39 @@ spec:
|
||||
If it is set empty, use the default value: 50
|
||||
format: int32
|
||||
type: integer
|
||||
registrationDriver:
|
||||
description: This provides driver details required to register
|
||||
with hub
|
||||
properties:
|
||||
authType:
|
||||
default: csr
|
||||
description: Type of the authentication used by managedcluster
|
||||
to register as well as pull work from hub. Possible values
|
||||
are csr and awsirsa.
|
||||
enum:
|
||||
- csr
|
||||
- awsirsa
|
||||
type: string
|
||||
awsIrsa:
|
||||
description: |-
|
||||
Contain the details required for registering with hub cluster (ie: an EKS cluster) using AWS IAM roles for service account.
|
||||
This is required only when the authType is awsirsa.
|
||||
properties:
|
||||
hubClusterArn:
|
||||
description: |-
|
||||
The arn of the hub cluster (ie: an EKS cluster). This will be required to pass information to hub, which hub will use to create IAM identities for this klusterlet.
|
||||
Example - arn:eks:us-west-2:12345678910:cluster/hub-cluster1.
|
||||
minLength: 1
|
||||
type: string
|
||||
managedClusterArn:
|
||||
description: |-
|
||||
The arn of the managed cluster (ie: an EKS cluster). This will be required to generate the md5hash which will be used as a suffix to create IAM role on hub
|
||||
as well as used by kluslerlet-agent, to assume role suffixed with the md5hash, on startup.
|
||||
Example - arn:eks:us-west-2:12345678910:cluster/managed-cluster1.
|
||||
minLength: 1
|
||||
type: string
|
||||
type: object
|
||||
type: object
|
||||
type: object
|
||||
registrationImagePullSpec:
|
||||
description: |-
|
||||
|
||||
@@ -31,7 +31,7 @@ metadata:
|
||||
categories: Integration & Delivery,OpenShift Optional
|
||||
certified: "false"
|
||||
containerImage: quay.io/open-cluster-management/registration-operator:latest
|
||||
createdAt: "2024-07-14T00:10:07Z"
|
||||
createdAt: "2024-10-08T08:24:18Z"
|
||||
description: Manages the installation and upgrade of the Klusterlet.
|
||||
operators.operatorframework.io/builder: operator-sdk-v1.32.0
|
||||
operators.operatorframework.io/project_layout: go.kubebuilder.io/v3
|
||||
|
||||
@@ -289,6 +289,39 @@ spec:
|
||||
If it is set empty, use the default value: 50
|
||||
format: int32
|
||||
type: integer
|
||||
registrationDriver:
|
||||
description: This provides driver details required to register
|
||||
with hub
|
||||
properties:
|
||||
authType:
|
||||
default: csr
|
||||
description: Type of the authentication used by managedcluster
|
||||
to register as well as pull work from hub. Possible values
|
||||
are csr and awsirsa.
|
||||
enum:
|
||||
- csr
|
||||
- awsirsa
|
||||
type: string
|
||||
awsIrsa:
|
||||
description: |-
|
||||
Contain the details required for registering with hub cluster (ie: an EKS cluster) using AWS IAM roles for service account.
|
||||
This is required only when the authType is awsirsa.
|
||||
properties:
|
||||
hubClusterArn:
|
||||
description: |-
|
||||
The arn of the hub cluster (ie: an EKS cluster). This will be required to pass information to hub, which hub will use to create IAM identities for this klusterlet.
|
||||
Example - arn:eks:us-west-2:12345678910:cluster/hub-cluster1.
|
||||
minLength: 1
|
||||
type: string
|
||||
managedClusterArn:
|
||||
description: |-
|
||||
The arn of the managed cluster (ie: an EKS cluster). This will be required to generate the md5hash which will be used as a suffix to create IAM role on hub
|
||||
as well as used by kluslerlet-agent, to assume role suffixed with the md5hash, on startup.
|
||||
Example - arn:eks:us-west-2:12345678910:cluster/managed-cluster1.
|
||||
minLength: 1
|
||||
type: string
|
||||
type: object
|
||||
type: object
|
||||
type: object
|
||||
registrationImagePullSpec:
|
||||
description: |-
|
||||
|
||||
2
go.mod
2
go.mod
@@ -33,7 +33,7 @@ require (
|
||||
k8s.io/kube-aggregator v0.30.3
|
||||
k8s.io/utils v0.0.0-20240310230437-4693a0247e57
|
||||
open-cluster-management.io/addon-framework v0.10.1-0.20240703130731-ba7fd000a03a
|
||||
open-cluster-management.io/api v0.14.1-0.20240627145512-bd6f2229b53c
|
||||
open-cluster-management.io/api v0.14.1-0.20241008081048-f6c658202790
|
||||
open-cluster-management.io/sdk-go v0.14.1-0.20240918072645-225dcf1b6866
|
||||
sigs.k8s.io/controller-runtime v0.18.5
|
||||
sigs.k8s.io/kube-storage-version-migrator v0.0.6-0.20230721195810-5c8923c5ff96
|
||||
|
||||
4
go.sum
4
go.sum
@@ -441,8 +441,8 @@ k8s.io/utils v0.0.0-20240310230437-4693a0247e57 h1:gbqbevonBh57eILzModw6mrkbwM0g
|
||||
k8s.io/utils v0.0.0-20240310230437-4693a0247e57/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0=
|
||||
open-cluster-management.io/addon-framework v0.10.1-0.20240703130731-ba7fd000a03a h1:La1cYE3xkPFS2OJnsPQbkkahKE7yabuPcIISRfb4qsg=
|
||||
open-cluster-management.io/addon-framework v0.10.1-0.20240703130731-ba7fd000a03a/go.mod h1:C1VETu/CIQKYfMiVAgNzPEUHjCpL9P1Z/KsGhHa4kl4=
|
||||
open-cluster-management.io/api v0.14.1-0.20240627145512-bd6f2229b53c h1:gYfgkX/U6fv2d3Ly8D6N1GM9zokORupLSgCxx791zZw=
|
||||
open-cluster-management.io/api v0.14.1-0.20240627145512-bd6f2229b53c/go.mod h1:9erZEWEn4bEqh0nIX2wA7f/s3KCuFycQdBrPrRzi0QM=
|
||||
open-cluster-management.io/api v0.14.1-0.20241008081048-f6c658202790 h1:XszHWAR6PhYXBFPN4qgk8D5HVl8W/61j+bNMsXVuW7U=
|
||||
open-cluster-management.io/api v0.14.1-0.20241008081048-f6c658202790/go.mod h1:9erZEWEn4bEqh0nIX2wA7f/s3KCuFycQdBrPrRzi0QM=
|
||||
open-cluster-management.io/sdk-go v0.14.1-0.20240918072645-225dcf1b6866 h1:nxYrSsYwl9Mq8DuaJ0K98PCpuGsai+AvXbggMfZDCGI=
|
||||
open-cluster-management.io/sdk-go v0.14.1-0.20240918072645-225dcf1b6866/go.mod h1:jCyXPY900UK1n4xwUBWSz27s7lcXN/fhIDF6xu3jIHw=
|
||||
sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.29.0 h1:/U5vjBbQn3RChhv7P11uhYvCSm5G2GaIi5AIGBS6r4c=
|
||||
|
||||
@@ -3,7 +3,10 @@ package addonconfiguration
|
||||
import (
|
||||
"context"
|
||||
|
||||
"k8s.io/apimachinery/pkg/api/meta"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
utilerrors "k8s.io/apimachinery/pkg/util/errors"
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
|
||||
addonv1alpha1 "open-cluster-management.io/api/addon/v1alpha1"
|
||||
addonv1alpha1client "open-cluster-management.io/api/client/addon/clientset/versioned"
|
||||
@@ -19,13 +22,50 @@ type managedClusterAddonConfigurationReconciler struct {
|
||||
func (d *managedClusterAddonConfigurationReconciler) reconcile(
|
||||
ctx context.Context, cma *addonv1alpha1.ClusterManagementAddOn, graph *configurationGraph) (*addonv1alpha1.ClusterManagementAddOn, reconcileState, error) {
|
||||
var errs []error
|
||||
configured := sets.Set[string]{}
|
||||
|
||||
// Update the config references and set the "configured" condition to true for addons that are ready for rollout.
|
||||
// These addons are part of the current rollout batch according to the strategy.
|
||||
for _, addon := range graph.getAddonsToUpdate() {
|
||||
mca := d.mergeAddonConfig(addon.mca, addon.desiredConfigs)
|
||||
patcher := patcher.NewPatcher[
|
||||
*addonv1alpha1.ManagedClusterAddOn, addonv1alpha1.ManagedClusterAddOnSpec, addonv1alpha1.ManagedClusterAddOnStatus](
|
||||
d.addonClient.AddonV1alpha1().ManagedClusterAddOns(mca.Namespace))
|
||||
_, err := patcher.PatchStatus(ctx, mca, mca.Status, addon.mca.Status)
|
||||
// update mca config references in status
|
||||
newAddon := d.mergeAddonConfig(addon.mca, addon.desiredConfigs)
|
||||
// update mca configured condition to true
|
||||
d.setCondition(newAddon, metav1.ConditionTrue, "ConfigurationsConfigured", "Configurations configured")
|
||||
|
||||
err := d.patchAddonStatus(ctx, newAddon, addon.mca)
|
||||
if err != nil {
|
||||
errs = append(errs, err)
|
||||
}
|
||||
|
||||
configured.Insert(addon.mca.Namespace)
|
||||
}
|
||||
|
||||
// Set the "configured" condition to false for addons whose configurations have not been synced yet
|
||||
// but are waiting for rollout.
|
||||
for _, addon := range graph.getAddonsToApply() {
|
||||
// Skip addons that have already been configured.
|
||||
if configured.Has(addon.mca.Namespace) {
|
||||
continue
|
||||
}
|
||||
newAddon := addon.mca.DeepCopy()
|
||||
d.setCondition(newAddon, metav1.ConditionFalse, "ConfigurationsNotConfigured", "Configurations updated and not configured yet")
|
||||
|
||||
err := d.patchAddonStatus(ctx, newAddon, addon.mca)
|
||||
if err != nil {
|
||||
errs = append(errs, err)
|
||||
}
|
||||
}
|
||||
|
||||
// Set the "configured" condition to true for addons that have successfully completed rollout.
|
||||
// This includes:
|
||||
// a. Addons without any configurations that have had their rollout status set to success in setRolloutStatus().
|
||||
// b. Addons with configurations and already rollout successfully. In upgrade scenario, when the
|
||||
// addon configurations do not change while addon components upgrade, should set condition to true.
|
||||
for _, addon := range graph.getAddonsSucceeded() {
|
||||
newAddon := addon.mca.DeepCopy()
|
||||
d.setCondition(newAddon, metav1.ConditionTrue, "ConfigurationsConfigured", "Configurations configured")
|
||||
|
||||
err := d.patchAddonStatus(ctx, newAddon, addon.mca)
|
||||
if err != nil {
|
||||
errs = append(errs, err)
|
||||
}
|
||||
@@ -88,3 +128,25 @@ func (d *managedClusterAddonConfigurationReconciler) mergeAddonConfig(
|
||||
mcaCopy.Status.ConfigReferences = configRefs
|
||||
return mcaCopy
|
||||
}
|
||||
|
||||
// setCondition updates the configured condition for the addon
|
||||
func (d *managedClusterAddonConfigurationReconciler) setCondition(
|
||||
addon *addonv1alpha1.ManagedClusterAddOn, status metav1.ConditionStatus, reason, message string) {
|
||||
meta.SetStatusCondition(&addon.Status.Conditions, metav1.Condition{
|
||||
Type: addonv1alpha1.ManagedClusterAddOnConditionConfigured,
|
||||
Status: status,
|
||||
Reason: reason,
|
||||
Message: message,
|
||||
})
|
||||
}
|
||||
|
||||
// patchAddonStatus patches the status of the addon
|
||||
func (d *managedClusterAddonConfigurationReconciler) patchAddonStatus(
|
||||
ctx context.Context, newaddon *addonv1alpha1.ManagedClusterAddOn, oldaddon *addonv1alpha1.ManagedClusterAddOn) error {
|
||||
patcher := patcher.NewPatcher[
|
||||
*addonv1alpha1.ManagedClusterAddOn, addonv1alpha1.ManagedClusterAddOnSpec, addonv1alpha1.ManagedClusterAddOnStatus](
|
||||
d.addonClient.AddonV1alpha1().ManagedClusterAddOns(newaddon.Namespace))
|
||||
|
||||
_, err := patcher.PatchStatus(ctx, newaddon, newaddon.Status, oldaddon.Status)
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -8,6 +8,7 @@ import (
|
||||
"time"
|
||||
|
||||
apiequality "k8s.io/apimachinery/pkg/api/equality"
|
||||
"k8s.io/apimachinery/pkg/api/meta"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/util/intstr"
|
||||
@@ -48,7 +49,12 @@ func TestAddonConfigReconcile(t *testing.T) {
|
||||
clusterManagementAddon: addontesting.NewClusterManagementAddon("test", "", "").Build(),
|
||||
placements: []runtime.Object{},
|
||||
placementDecisions: []runtime.Object{},
|
||||
validateAddonActions: addontesting.AssertNoActions,
|
||||
validateAddonActions: func(t *testing.T, actions []clienttesting.Action) {
|
||||
addontesting.AssertActions(t, actions, "patch", "patch")
|
||||
sort.Sort(byPatchName(actions))
|
||||
expectPatchConditionAction(t, actions[0], metav1.ConditionTrue)
|
||||
expectPatchConditionAction(t, actions[1], metav1.ConditionTrue)
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "manual installStrategy",
|
||||
@@ -80,6 +86,17 @@ func TestAddonConfigReconcile(t *testing.T) {
|
||||
},
|
||||
LastObservedGeneration: 0,
|
||||
}})
|
||||
expectPatchConfigurationAction(t, actions[1], []addonv1alpha1.ConfigReference{{
|
||||
ConfigGroupResource: addonv1alpha1.ConfigGroupResource{Group: "core", Resource: "Foo"},
|
||||
ConfigReferent: addonv1alpha1.ConfigReferent{Name: "test"},
|
||||
DesiredConfig: &addonv1alpha1.ConfigSpecHash{
|
||||
ConfigReferent: addonv1alpha1.ConfigReferent{Name: "test"},
|
||||
SpecHash: "hash",
|
||||
},
|
||||
LastObservedGeneration: 0,
|
||||
}})
|
||||
expectPatchConditionAction(t, actions[0], metav1.ConditionTrue)
|
||||
expectPatchConditionAction(t, actions[1], metav1.ConditionTrue)
|
||||
},
|
||||
},
|
||||
{
|
||||
@@ -151,6 +168,8 @@ func TestAddonConfigReconcile(t *testing.T) {
|
||||
},
|
||||
LastObservedGeneration: 0,
|
||||
}})
|
||||
expectPatchConditionAction(t, actions[0], metav1.ConditionTrue)
|
||||
expectPatchConditionAction(t, actions[1], metav1.ConditionTrue)
|
||||
},
|
||||
},
|
||||
{
|
||||
@@ -275,6 +294,8 @@ func TestAddonConfigReconcile(t *testing.T) {
|
||||
LastObservedGeneration: 0,
|
||||
},
|
||||
})
|
||||
expectPatchConditionAction(t, actions[0], metav1.ConditionTrue)
|
||||
expectPatchConditionAction(t, actions[1], metav1.ConditionTrue)
|
||||
},
|
||||
},
|
||||
{
|
||||
@@ -397,6 +418,8 @@ func TestAddonConfigReconcile(t *testing.T) {
|
||||
},
|
||||
LastObservedGeneration: 0,
|
||||
}})
|
||||
expectPatchConditionAction(t, actions[0], metav1.ConditionTrue)
|
||||
expectPatchConditionAction(t, actions[1], metav1.ConditionTrue)
|
||||
},
|
||||
},
|
||||
{
|
||||
@@ -477,6 +500,8 @@ func TestAddonConfigReconcile(t *testing.T) {
|
||||
},
|
||||
LastObservedGeneration: 0,
|
||||
}})
|
||||
expectPatchConditionAction(t, actions[0], metav1.ConditionTrue)
|
||||
expectPatchConditionAction(t, actions[1], metav1.ConditionTrue)
|
||||
},
|
||||
},
|
||||
{
|
||||
@@ -543,6 +568,7 @@ func TestAddonConfigReconcile(t *testing.T) {
|
||||
},
|
||||
LastObservedGeneration: 0,
|
||||
}})
|
||||
expectPatchConditionAction(t, actions[0], metav1.ConditionTrue)
|
||||
},
|
||||
},
|
||||
{
|
||||
@@ -616,6 +642,7 @@ func TestAddonConfigReconcile(t *testing.T) {
|
||||
},
|
||||
LastObservedGeneration: 1,
|
||||
}})
|
||||
expectPatchConditionAction(t, actions[0], metav1.ConditionTrue)
|
||||
},
|
||||
},
|
||||
{
|
||||
@@ -629,7 +656,12 @@ func TestAddonConfigReconcile(t *testing.T) {
|
||||
SpecHash: "hash1",
|
||||
},
|
||||
LastObservedGeneration: 1,
|
||||
}}, nil),
|
||||
}}, []metav1.Condition{{
|
||||
Type: addonv1alpha1.ManagedClusterAddOnConditionConfigured,
|
||||
Status: metav1.ConditionTrue,
|
||||
Reason: "ConfigurationsConfigured",
|
||||
Message: "Configurations configured",
|
||||
}}),
|
||||
},
|
||||
placements: []runtime.Object{
|
||||
&clusterv1beta1.Placement{ObjectMeta: metav1.ObjectMeta{Name: "test-placement", Namespace: "default"}},
|
||||
@@ -716,9 +748,12 @@ func TestAddonConfigReconcile(t *testing.T) {
|
||||
},
|
||||
}).Build(),
|
||||
validateAddonActions: func(t *testing.T, actions []clienttesting.Action) {
|
||||
addontesting.AssertActions(t, actions, "patch")
|
||||
addontesting.AssertActions(t, actions, "patch", "patch", "patch")
|
||||
sort.Sort(byPatchName(actions))
|
||||
expectPatchConfigurationAction(t, actions[0], []addonv1alpha1.ConfigReference{{
|
||||
// cluster1 is not in installstrategy and has no config
|
||||
expectPatchConditionAction(t, actions[0], metav1.ConditionTrue)
|
||||
// cluster2 is in installstrategy and is the first to rollout
|
||||
expectPatchConfigurationAction(t, actions[1], []addonv1alpha1.ConfigReference{{
|
||||
ConfigGroupResource: addonv1alpha1.ConfigGroupResource{Group: "core", Resource: "Foo"},
|
||||
ConfigReferent: addonv1alpha1.ConfigReferent{Name: "test1"},
|
||||
DesiredConfig: &addonv1alpha1.ConfigSpecHash{
|
||||
@@ -727,6 +762,9 @@ func TestAddonConfigReconcile(t *testing.T) {
|
||||
},
|
||||
LastObservedGeneration: 0,
|
||||
}})
|
||||
expectPatchConditionAction(t, actions[1], metav1.ConditionTrue)
|
||||
// cluster3 is in installstrategy and is not rollout
|
||||
expectPatchConditionAction(t, actions[2], metav1.ConditionFalse)
|
||||
},
|
||||
},
|
||||
{
|
||||
@@ -773,9 +811,12 @@ func TestAddonConfigReconcile(t *testing.T) {
|
||||
},
|
||||
}).Build(),
|
||||
validateAddonActions: func(t *testing.T, actions []clienttesting.Action) {
|
||||
addontesting.AssertActions(t, actions, "patch")
|
||||
addontesting.AssertActions(t, actions, "patch", "patch", "patch")
|
||||
sort.Sort(byPatchName(actions))
|
||||
expectPatchConfigurationAction(t, actions[0], []addonv1alpha1.ConfigReference{{
|
||||
// cluster1 is not in installstrategy and has no config
|
||||
expectPatchConditionAction(t, actions[0], metav1.ConditionTrue)
|
||||
// cluster2 is in installstrategy and is the first to rollout
|
||||
expectPatchConfigurationAction(t, actions[1], []addonv1alpha1.ConfigReference{{
|
||||
ConfigGroupResource: addonv1alpha1.ConfigGroupResource{Group: "core", Resource: "Foo"},
|
||||
ConfigReferent: addonv1alpha1.ConfigReferent{Name: "test1"},
|
||||
DesiredConfig: &addonv1alpha1.ConfigSpecHash{
|
||||
@@ -784,6 +825,9 @@ func TestAddonConfigReconcile(t *testing.T) {
|
||||
},
|
||||
LastObservedGeneration: 0,
|
||||
}})
|
||||
expectPatchConditionAction(t, actions[1], metav1.ConditionTrue)
|
||||
// cluster3 is in installstrategy and is not rollout
|
||||
expectPatchConditionAction(t, actions[2], metav1.ConditionFalse)
|
||||
},
|
||||
},
|
||||
{
|
||||
@@ -827,17 +871,11 @@ func TestAddonConfigReconcile(t *testing.T) {
|
||||
},
|
||||
}).Build(),
|
||||
validateAddonActions: func(t *testing.T, actions []clienttesting.Action) {
|
||||
addontesting.AssertActions(t, actions, "patch", "patch")
|
||||
addontesting.AssertActions(t, actions, "patch", "patch", "patch")
|
||||
sort.Sort(byPatchName(actions))
|
||||
expectPatchConfigurationAction(t, actions[0], []addonv1alpha1.ConfigReference{{
|
||||
ConfigGroupResource: addonv1alpha1.ConfigGroupResource{Group: "core", Resource: "Foo"},
|
||||
ConfigReferent: addonv1alpha1.ConfigReferent{Name: "test1"},
|
||||
DesiredConfig: &addonv1alpha1.ConfigSpecHash{
|
||||
ConfigReferent: addonv1alpha1.ConfigReferent{Name: "test1"},
|
||||
SpecHash: "hash1",
|
||||
},
|
||||
LastObservedGeneration: 0,
|
||||
}})
|
||||
// cluster1 is not in installstrategy and has no config
|
||||
expectPatchConditionAction(t, actions[0], metav1.ConditionTrue)
|
||||
// cluster2 is in installstrategy and rollout
|
||||
expectPatchConfigurationAction(t, actions[1], []addonv1alpha1.ConfigReference{{
|
||||
ConfigGroupResource: addonv1alpha1.ConfigGroupResource{Group: "core", Resource: "Foo"},
|
||||
ConfigReferent: addonv1alpha1.ConfigReferent{Name: "test1"},
|
||||
@@ -847,6 +885,18 @@ func TestAddonConfigReconcile(t *testing.T) {
|
||||
},
|
||||
LastObservedGeneration: 0,
|
||||
}})
|
||||
expectPatchConditionAction(t, actions[1], metav1.ConditionTrue)
|
||||
// cluster2 is in installstrategy and rollout
|
||||
expectPatchConfigurationAction(t, actions[2], []addonv1alpha1.ConfigReference{{
|
||||
ConfigGroupResource: addonv1alpha1.ConfigGroupResource{Group: "core", Resource: "Foo"},
|
||||
ConfigReferent: addonv1alpha1.ConfigReferent{Name: "test1"},
|
||||
DesiredConfig: &addonv1alpha1.ConfigSpecHash{
|
||||
ConfigReferent: addonv1alpha1.ConfigReferent{Name: "test1"},
|
||||
SpecHash: "hash1",
|
||||
},
|
||||
LastObservedGeneration: 0,
|
||||
}})
|
||||
expectPatchConditionAction(t, actions[2], metav1.ConditionTrue)
|
||||
},
|
||||
},
|
||||
{
|
||||
@@ -913,8 +963,9 @@ func TestAddonConfigReconcile(t *testing.T) {
|
||||
},
|
||||
}).Build(),
|
||||
validateAddonActions: func(t *testing.T, actions []clienttesting.Action) {
|
||||
addontesting.AssertActions(t, actions, "patch", "patch")
|
||||
addontesting.AssertActions(t, actions, "patch", "patch", "patch")
|
||||
sort.Sort(byPatchName(actions))
|
||||
// cluster1 and cluster2 are rollout
|
||||
expectPatchConfigurationAction(t, actions[0], []addonv1alpha1.ConfigReference{{
|
||||
ConfigGroupResource: addonv1alpha1.ConfigGroupResource{Group: "core", Resource: "Foo"},
|
||||
ConfigReferent: addonv1alpha1.ConfigReferent{Name: "test1"},
|
||||
@@ -933,6 +984,9 @@ func TestAddonConfigReconcile(t *testing.T) {
|
||||
},
|
||||
LastObservedGeneration: 0,
|
||||
}})
|
||||
expectPatchConditionAction(t, actions[0], metav1.ConditionTrue)
|
||||
expectPatchConditionAction(t, actions[1], metav1.ConditionTrue)
|
||||
expectPatchConditionAction(t, actions[2], metav1.ConditionFalse)
|
||||
},
|
||||
},
|
||||
{
|
||||
@@ -991,8 +1045,9 @@ func TestAddonConfigReconcile(t *testing.T) {
|
||||
},
|
||||
}).Build(),
|
||||
validateAddonActions: func(t *testing.T, actions []clienttesting.Action) {
|
||||
addontesting.AssertActions(t, actions, "patch", "patch")
|
||||
addontesting.AssertActions(t, actions, "patch", "patch", "patch")
|
||||
sort.Sort(byPatchName(actions))
|
||||
// cluster1 and cluster2 are rollout
|
||||
expectPatchConfigurationAction(t, actions[0], []addonv1alpha1.ConfigReference{{
|
||||
ConfigGroupResource: addonv1alpha1.ConfigGroupResource{Group: "core", Resource: "Foo"},
|
||||
ConfigReferent: addonv1alpha1.ConfigReferent{Name: "test1"},
|
||||
@@ -1011,6 +1066,9 @@ func TestAddonConfigReconcile(t *testing.T) {
|
||||
},
|
||||
LastObservedGeneration: 0,
|
||||
}})
|
||||
expectPatchConditionAction(t, actions[0], metav1.ConditionTrue)
|
||||
expectPatchConditionAction(t, actions[1], metav1.ConditionTrue)
|
||||
expectPatchConditionAction(t, actions[2], metav1.ConditionFalse)
|
||||
},
|
||||
},
|
||||
{
|
||||
@@ -1077,8 +1135,9 @@ func TestAddonConfigReconcile(t *testing.T) {
|
||||
},
|
||||
}).Build(),
|
||||
validateAddonActions: func(t *testing.T, actions []clienttesting.Action) {
|
||||
addontesting.AssertActions(t, actions, "patch", "patch")
|
||||
addontesting.AssertActions(t, actions, "patch", "patch", "patch")
|
||||
sort.Sort(byPatchName(actions))
|
||||
// cluster1 and cluster2 are rollout
|
||||
expectPatchConfigurationAction(t, actions[0], []addonv1alpha1.ConfigReference{{
|
||||
ConfigGroupResource: addonv1alpha1.ConfigGroupResource{Group: "core", Resource: "Foo"},
|
||||
ConfigReferent: addonv1alpha1.ConfigReferent{Name: "test1"},
|
||||
@@ -1097,6 +1156,9 @@ func TestAddonConfigReconcile(t *testing.T) {
|
||||
},
|
||||
LastObservedGeneration: 0,
|
||||
}})
|
||||
expectPatchConditionAction(t, actions[0], metav1.ConditionTrue)
|
||||
expectPatchConditionAction(t, actions[1], metav1.ConditionTrue)
|
||||
expectPatchConditionAction(t, actions[2], metav1.ConditionFalse)
|
||||
},
|
||||
},
|
||||
}
|
||||
@@ -1207,3 +1269,17 @@ func expectPatchConfigurationAction(t *testing.T, action clienttesting.Action, e
|
||||
t.Errorf("Configuration not correctly patched, expected %v, actual %v", expected, mca.Status.ConfigReferences)
|
||||
}
|
||||
}
|
||||
|
||||
func expectPatchConditionAction(t *testing.T, action clienttesting.Action, expected metav1.ConditionStatus) {
|
||||
patch := action.(clienttesting.PatchActionImpl).GetPatch()
|
||||
mca := &addonv1alpha1.ManagedClusterAddOn{}
|
||||
err := json.Unmarshal(patch, mca)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
actualCond := meta.FindStatusCondition(mca.Status.Conditions, addonv1alpha1.ManagedClusterAddOnConditionConfigured)
|
||||
if actualCond == nil || actualCond.Status != expected {
|
||||
t.Errorf("Condition not correctly patched, expected %v, actual %v", expected, mca.Status.Conditions)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -301,6 +301,12 @@ func (g *configurationGraph) getPlacementNodes() map[addonv1alpha1.PlacementRef]
|
||||
return placementNodeMap
|
||||
}
|
||||
|
||||
// getAddonsToUpdate returns the list of addons to be updated based on the rollout strategy.
|
||||
// It is a subset of the addons returned by getAddonsToApply.
|
||||
// For example, if there are 10 addons whose desired config has not yet been synced to status,
|
||||
// all 10 addons will be returned by getAddonsToApply().
|
||||
// Given a Progressive rollout strategy with a maxConcurrency of 3, only 3 of these addons
|
||||
// will be returned by getAddonsToUpdate() for update.
|
||||
func (g *configurationGraph) getAddonsToUpdate() []*addonNode {
|
||||
var addons []*addonNode
|
||||
for _, node := range g.nodes {
|
||||
@@ -312,6 +318,32 @@ func (g *configurationGraph) getAddonsToUpdate() []*addonNode {
|
||||
return addons
|
||||
}
|
||||
|
||||
// getAddonsToApply returns the list of addons that need their configurations synchronized.
|
||||
// ToApply indicates that the resource's desired status has not been applied yet.
|
||||
func (g *configurationGraph) getAddonsToApply() []*addonNode {
|
||||
var addons []*addonNode
|
||||
for _, node := range g.nodes {
|
||||
addons = append(addons, node.getAddonsToApply()...)
|
||||
}
|
||||
|
||||
addons = append(addons, g.defaults.getAddonsToApply()...)
|
||||
|
||||
return addons
|
||||
}
|
||||
|
||||
// getAddonsSucceeded returns the list of addons that their configurations desired status is applied
|
||||
// and last applied status is successful.
|
||||
func (g *configurationGraph) getAddonsSucceeded() []*addonNode {
|
||||
var addons []*addonNode
|
||||
for _, node := range g.nodes {
|
||||
addons = append(addons, node.getAddonsSucceeded()...)
|
||||
}
|
||||
|
||||
addons = append(addons, g.defaults.getAddonsSucceeded()...)
|
||||
|
||||
return addons
|
||||
}
|
||||
|
||||
func (g *configurationGraph) getRequeueTime() time.Duration {
|
||||
minRequeue := maxRequeueTime
|
||||
|
||||
@@ -422,6 +454,31 @@ func (n *installStrategyNode) getAddonsToUpdate() []*addonNode {
|
||||
return addons
|
||||
}
|
||||
|
||||
// getAddonsToApply return the addons to sync configurations
|
||||
// ToApply indicates that the resource's desired status has not been applied yet.
|
||||
func (n *installStrategyNode) getAddonsToApply() []*addonNode {
|
||||
var addons []*addonNode
|
||||
|
||||
for i, addon := range n.children {
|
||||
if addon.status.Status == clustersdkv1alpha1.ToApply {
|
||||
addons = append(addons, n.children[i])
|
||||
}
|
||||
}
|
||||
return addons
|
||||
}
|
||||
|
||||
// getAddonsSucceeded return the addons already rollout successfully or has no configurations
|
||||
func (n *installStrategyNode) getAddonsSucceeded() []*addonNode {
|
||||
var addons []*addonNode
|
||||
|
||||
for i, addon := range n.children {
|
||||
if addon.status.Status == clustersdkv1alpha1.Succeeded {
|
||||
addons = append(addons, n.children[i])
|
||||
}
|
||||
}
|
||||
return addons
|
||||
}
|
||||
|
||||
// Return the number of succeed addons.
|
||||
// Including the addons with status Succeed after MinSuccessTime.
|
||||
func (n *installStrategyNode) countAddonUpgradeSucceed() int {
|
||||
|
||||
@@ -234,6 +234,12 @@ var _ = ginkgo.Describe("Addon upgrade", func() {
|
||||
Reason: addonapiv1alpha1.ProgressingReasonCompleted,
|
||||
Message: "completed with no errors.",
|
||||
})
|
||||
assertManagedClusterAddOnConditions(testAddOnConfigsImpl.name, clusterNames[i], metav1.Condition{
|
||||
Type: addonapiv1alpha1.ManagedClusterAddOnConditionConfigured,
|
||||
Status: metav1.ConditionTrue,
|
||||
Reason: "ConfigurationsConfigured",
|
||||
Message: "Configurations configured",
|
||||
})
|
||||
}
|
||||
|
||||
ginkgo.By("check cma status")
|
||||
@@ -313,6 +319,12 @@ var _ = ginkgo.Describe("Addon upgrade", func() {
|
||||
Reason: addonapiv1alpha1.ProgressingReasonCompleted,
|
||||
Message: "completed with no errors.",
|
||||
})
|
||||
assertManagedClusterAddOnConditions(testAddOnConfigsImpl.name, clusterNames[i], metav1.Condition{
|
||||
Type: addonapiv1alpha1.ManagedClusterAddOnConditionConfigured,
|
||||
Status: metav1.ConditionTrue,
|
||||
Reason: "ConfigurationsConfigured",
|
||||
Message: "Configurations configured",
|
||||
})
|
||||
}
|
||||
|
||||
ginkgo.By("check cma status")
|
||||
@@ -403,6 +415,12 @@ var _ = ginkgo.Describe("Addon upgrade", func() {
|
||||
Reason: addonapiv1alpha1.ProgressingReasonProgressing,
|
||||
Message: "progressing... work is not ready",
|
||||
})
|
||||
assertManagedClusterAddOnConditions(testAddOnConfigsImpl.name, clusterNames[i], metav1.Condition{
|
||||
Type: addonapiv1alpha1.ManagedClusterAddOnConditionConfigured,
|
||||
Status: metav1.ConditionTrue,
|
||||
Reason: "ConfigurationsConfigured",
|
||||
Message: "Configurations configured",
|
||||
})
|
||||
}
|
||||
for i := 2; i < 4; i++ {
|
||||
assertManagedClusterAddOnConfigReferences(testAddOnConfigsImpl.name, clusterNames[i], addonapiv1alpha1.ConfigReference{
|
||||
@@ -436,6 +454,12 @@ var _ = ginkgo.Describe("Addon upgrade", func() {
|
||||
Reason: addonapiv1alpha1.ProgressingReasonCompleted,
|
||||
Message: "completed with no errors.",
|
||||
})
|
||||
assertManagedClusterAddOnConditions(testAddOnConfigsImpl.name, clusterNames[i], metav1.Condition{
|
||||
Type: addonapiv1alpha1.ManagedClusterAddOnConditionConfigured,
|
||||
Status: metav1.ConditionFalse,
|
||||
Reason: "ConfigurationsNotConfigured",
|
||||
Message: "Configurations updated and not configured yet",
|
||||
})
|
||||
}
|
||||
|
||||
ginkgo.By("check cma status")
|
||||
@@ -519,6 +543,12 @@ var _ = ginkgo.Describe("Addon upgrade", func() {
|
||||
Reason: addonapiv1alpha1.ProgressingReasonCompleted,
|
||||
Message: "completed with no errors.",
|
||||
})
|
||||
assertManagedClusterAddOnConditions(testAddOnConfigsImpl.name, clusterNames[i], metav1.Condition{
|
||||
Type: addonapiv1alpha1.ManagedClusterAddOnConditionConfigured,
|
||||
Status: metav1.ConditionTrue,
|
||||
Reason: "ConfigurationsConfigured",
|
||||
Message: "Configurations configured",
|
||||
})
|
||||
}
|
||||
|
||||
ginkgo.By("check cma status")
|
||||
@@ -567,6 +597,12 @@ var _ = ginkgo.Describe("Addon upgrade", func() {
|
||||
Reason: addonapiv1alpha1.ProgressingReasonCompleted,
|
||||
Message: "completed with no errors.",
|
||||
})
|
||||
assertManagedClusterAddOnConditions(testAddOnConfigsImpl.name, clusterNames[i], metav1.Condition{
|
||||
Type: addonapiv1alpha1.ManagedClusterAddOnConditionConfigured,
|
||||
Status: metav1.ConditionTrue,
|
||||
Reason: "ConfigurationsConfigured",
|
||||
Message: "Configurations configured",
|
||||
})
|
||||
}
|
||||
ginkgo.By("check cma status")
|
||||
assertClusterManagementAddOnInstallProgression(testAddOnConfigsImpl.name, addonapiv1alpha1.InstallProgression{
|
||||
@@ -664,6 +700,12 @@ var _ = ginkgo.Describe("Addon upgrade", func() {
|
||||
Reason: addonapiv1alpha1.ProgressingReasonProgressing,
|
||||
Message: "progressing... work is not ready",
|
||||
})
|
||||
assertManagedClusterAddOnConditions(testAddOnConfigsImpl.name, clusterNames[i], metav1.Condition{
|
||||
Type: addonapiv1alpha1.ManagedClusterAddOnConditionConfigured,
|
||||
Status: metav1.ConditionTrue,
|
||||
Reason: "ConfigurationsConfigured",
|
||||
Message: "Configurations configured",
|
||||
})
|
||||
}
|
||||
for i := 2; i < 4; i++ {
|
||||
assertManagedClusterAddOnConfigReferences(testAddOnConfigsImpl.name, clusterNames[i], addonapiv1alpha1.ConfigReference{
|
||||
@@ -697,6 +739,12 @@ var _ = ginkgo.Describe("Addon upgrade", func() {
|
||||
Reason: addonapiv1alpha1.ProgressingReasonCompleted,
|
||||
Message: "completed with no errors.",
|
||||
})
|
||||
assertManagedClusterAddOnConditions(testAddOnConfigsImpl.name, clusterNames[i], metav1.Condition{
|
||||
Type: addonapiv1alpha1.ManagedClusterAddOnConditionConfigured,
|
||||
Status: metav1.ConditionFalse,
|
||||
Reason: "ConfigurationsNotConfigured",
|
||||
Message: "Configurations updated and not configured yet",
|
||||
})
|
||||
}
|
||||
|
||||
ginkgo.By("check cma status")
|
||||
@@ -793,6 +841,12 @@ var _ = ginkgo.Describe("Addon upgrade", func() {
|
||||
Reason: addonapiv1alpha1.ProgressingReasonCompleted,
|
||||
Message: "completed with no errors.",
|
||||
})
|
||||
assertManagedClusterAddOnConditions(testAddOnConfigsImpl.name, clusterNames[i], metav1.Condition{
|
||||
Type: addonapiv1alpha1.ManagedClusterAddOnConditionConfigured,
|
||||
Status: metav1.ConditionTrue,
|
||||
Reason: "ConfigurationsConfigured",
|
||||
Message: "Configurations configured",
|
||||
})
|
||||
}
|
||||
|
||||
ginkgo.By("check cma status")
|
||||
|
||||
2
vendor/modules.txt
vendored
2
vendor/modules.txt
vendored
@@ -1528,7 +1528,7 @@ open-cluster-management.io/addon-framework/pkg/agent
|
||||
open-cluster-management.io/addon-framework/pkg/assets
|
||||
open-cluster-management.io/addon-framework/pkg/index
|
||||
open-cluster-management.io/addon-framework/pkg/utils
|
||||
# open-cluster-management.io/api v0.14.1-0.20240627145512-bd6f2229b53c
|
||||
# open-cluster-management.io/api v0.14.1-0.20241008081048-f6c658202790
|
||||
## explicit; go 1.22.0
|
||||
open-cluster-management.io/api/addon/v1alpha1
|
||||
open-cluster-management.io/api/client/addon/clientset/versioned
|
||||
|
||||
1
vendor/open-cluster-management.io/api/addon/v1alpha1/types_managedclusteraddon.go
generated
vendored
1
vendor/open-cluster-management.io/api/addon/v1alpha1/types_managedclusteraddon.go
generated
vendored
@@ -318,7 +318,6 @@ const (
|
||||
// the managed cluster.
|
||||
ManagedClusterAddOnConditionDegraded string = "Degraded"
|
||||
|
||||
// Deprecated: Use ManagedClusterAddOnConditionProgressing instead
|
||||
// ManagedClusterAddOnConditionConfigured represents that the addon agent is configured with its configuration
|
||||
ManagedClusterAddOnConditionConfigured string = "Configured"
|
||||
|
||||
|
||||
@@ -185,6 +185,29 @@ spec:
|
||||
type: integer
|
||||
format: int32
|
||||
default: 50
|
||||
registrationDriver:
|
||||
description: This provides driver details required to register with hub
|
||||
type: object
|
||||
properties:
|
||||
authType:
|
||||
description: Type of the authentication used by managedcluster to register as well as pull work from hub. Possible values are csr and awsirsa.
|
||||
type: string
|
||||
default: csr
|
||||
enum:
|
||||
- csr
|
||||
- awsirsa
|
||||
awsIrsa:
|
||||
description: 'Contain the details required for registering with hub cluster (ie: an EKS cluster) using AWS IAM roles for service account. This is required only when the authType is awsirsa.'
|
||||
type: object
|
||||
properties:
|
||||
hubClusterArn:
|
||||
description: 'The arn of the hub cluster (ie: an EKS cluster). This will be required to pass information to hub, which hub will use to create IAM identities for this klusterlet. Example - arn:eks:us-west-2:12345678910:cluster/hub-cluster1.'
|
||||
type: string
|
||||
minLength: 1
|
||||
managedClusterArn:
|
||||
description: 'The arn of the managed cluster (ie: an EKS cluster). This will be required to generate the md5hash which will be used as a suffix to create IAM role on hub as well as used by kluslerlet-agent, to assume role suffixed with the md5hash, on startup. Example - arn:eks:us-west-2:12345678910:cluster/managed-cluster1.'
|
||||
type: string
|
||||
minLength: 1
|
||||
registrationImagePullSpec:
|
||||
description: RegistrationImagePullSpec represents the desired image configuration of registration agent. quay.io/open-cluster-management.io/registration:latest will be used if unspecified.
|
||||
type: string
|
||||
|
||||
4
vendor/open-cluster-management.io/api/feature/feature.go
generated
vendored
4
vendor/open-cluster-management.io/api/feature/feature.go
generated
vendored
@@ -77,6 +77,9 @@ const (
|
||||
|
||||
// MultipleHubs allows user to configure multiple bootstrapkubeconfig connecting to different hubs via Klusterlet and let agent decide which one to use
|
||||
MultipleHubs featuregate.Feature = "MultipleHubs"
|
||||
|
||||
// ClusterProfile will start new controller in the Hub that can be used to sync ManagedCluster to ClusterProfile.
|
||||
ClusterProfile featuregate.Feature = "ClusterProfile"
|
||||
)
|
||||
|
||||
// DefaultSpokeRegistrationFeatureGates consists of all known ocm-registration
|
||||
@@ -97,6 +100,7 @@ var DefaultHubRegistrationFeatureGates = map[featuregate.Feature]featuregate.Fea
|
||||
V1beta1CSRAPICompatibility: {Default: false, PreRelease: featuregate.Alpha},
|
||||
ManagedClusterAutoApproval: {Default: false, PreRelease: featuregate.Alpha},
|
||||
ResourceCleanup: {Default: false, PreRelease: featuregate.Alpha},
|
||||
ClusterProfile: {Default: false, PreRelease: featuregate.Alpha},
|
||||
}
|
||||
|
||||
var DefaultHubAddonManagerFeatureGates = map[featuregate.Feature]featuregate.FeatureSpec{
|
||||
|
||||
@@ -289,6 +289,39 @@ spec:
|
||||
If it is set empty, use the default value: 50
|
||||
format: int32
|
||||
type: integer
|
||||
registrationDriver:
|
||||
description: This provides driver details required to register
|
||||
with hub
|
||||
properties:
|
||||
authType:
|
||||
default: csr
|
||||
description: Type of the authentication used by managedcluster
|
||||
to register as well as pull work from hub. Possible values
|
||||
are csr and awsirsa.
|
||||
enum:
|
||||
- csr
|
||||
- awsirsa
|
||||
type: string
|
||||
awsIrsa:
|
||||
description: |-
|
||||
Contain the details required for registering with hub cluster (ie: an EKS cluster) using AWS IAM roles for service account.
|
||||
This is required only when the authType is awsirsa.
|
||||
properties:
|
||||
hubClusterArn:
|
||||
description: |-
|
||||
The arn of the hub cluster (ie: an EKS cluster). This will be required to pass information to hub, which hub will use to create IAM identities for this klusterlet.
|
||||
Example - arn:eks:us-west-2:12345678910:cluster/hub-cluster1.
|
||||
minLength: 1
|
||||
type: string
|
||||
managedClusterArn:
|
||||
description: |-
|
||||
The arn of the managed cluster (ie: an EKS cluster). This will be required to generate the md5hash which will be used as a suffix to create IAM role on hub
|
||||
as well as used by kluslerlet-agent, to assume role suffixed with the md5hash, on startup.
|
||||
Example - arn:eks:us-west-2:12345678910:cluster/managed-cluster1.
|
||||
minLength: 1
|
||||
type: string
|
||||
type: object
|
||||
type: object
|
||||
type: object
|
||||
registrationImagePullSpec:
|
||||
description: |-
|
||||
|
||||
30
vendor/open-cluster-management.io/api/operator/v1/types_klusterlet.go
generated
vendored
30
vendor/open-cluster-management.io/api/operator/v1/types_klusterlet.go
generated
vendored
@@ -172,6 +172,36 @@ type RegistrationConfiguration struct {
|
||||
// But if the user updates the content of a failed bootstrapkubeconfig, the "failed" mark will be cleared.
|
||||
// +optional
|
||||
BootstrapKubeConfigs BootstrapKubeConfigs `json:"bootstrapKubeConfigs,omitempty"`
|
||||
|
||||
// This provides driver details required to register with hub
|
||||
// +optional
|
||||
RegistrationDriver RegistrationDriver `json:"registrationDriver,omitempty"`
|
||||
}
|
||||
|
||||
type RegistrationDriver struct {
|
||||
// Type of the authentication used by managedcluster to register as well as pull work from hub. Possible values are csr and awsirsa.
|
||||
// +required
|
||||
// +kubebuilder:default:=csr
|
||||
// +kubebuilder:validation:Enum=csr;awsirsa
|
||||
AuthType string `json:"authType,omitempty"`
|
||||
|
||||
// Contain the details required for registering with hub cluster (ie: an EKS cluster) using AWS IAM roles for service account.
|
||||
// This is required only when the authType is awsirsa.
|
||||
AwsIrsa *AwsIrsa `json:"awsIrsa,omitempty"`
|
||||
}
|
||||
|
||||
type AwsIrsa struct {
|
||||
// The arn of the hub cluster (ie: an EKS cluster). This will be required to pass information to hub, which hub will use to create IAM identities for this klusterlet.
|
||||
// Example - arn:eks:us-west-2:12345678910:cluster/hub-cluster1.
|
||||
// +required
|
||||
// +kubebuilder:validation:MinLength=1
|
||||
HubClusterArn string `json:"hubClusterArn"`
|
||||
// The arn of the managed cluster (ie: an EKS cluster). This will be required to generate the md5hash which will be used as a suffix to create IAM role on hub
|
||||
// as well as used by kluslerlet-agent, to assume role suffixed with the md5hash, on startup.
|
||||
// Example - arn:eks:us-west-2:12345678910:cluster/managed-cluster1.
|
||||
// +required
|
||||
// +kubebuilder:validation:MinLength=1
|
||||
ManagedClusterArn string `json:"managedClusterArn"`
|
||||
}
|
||||
|
||||
type TypeBootstrapKubeConfigs string
|
||||
|
||||
38
vendor/open-cluster-management.io/api/operator/v1/zz_generated.deepcopy.go
generated
vendored
38
vendor/open-cluster-management.io/api/operator/v1/zz_generated.deepcopy.go
generated
vendored
@@ -32,6 +32,22 @@ func (in *AddOnManagerConfiguration) DeepCopy() *AddOnManagerConfiguration {
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *AwsIrsa) DeepCopyInto(out *AwsIrsa) {
|
||||
*out = *in
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AwsIrsa.
|
||||
func (in *AwsIrsa) DeepCopy() *AwsIrsa {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(AwsIrsa)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *BootstrapKubeConfigs) DeepCopyInto(out *BootstrapKubeConfigs) {
|
||||
*out = *in
|
||||
@@ -506,6 +522,7 @@ func (in *RegistrationConfiguration) DeepCopyInto(out *RegistrationConfiguration
|
||||
}
|
||||
}
|
||||
in.BootstrapKubeConfigs.DeepCopyInto(&out.BootstrapKubeConfigs)
|
||||
in.RegistrationDriver.DeepCopyInto(&out.RegistrationDriver)
|
||||
return
|
||||
}
|
||||
|
||||
@@ -519,6 +536,27 @@ func (in *RegistrationConfiguration) DeepCopy() *RegistrationConfiguration {
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *RegistrationDriver) DeepCopyInto(out *RegistrationDriver) {
|
||||
*out = *in
|
||||
if in.AwsIrsa != nil {
|
||||
in, out := &in.AwsIrsa, &out.AwsIrsa
|
||||
*out = new(AwsIrsa)
|
||||
**out = **in
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RegistrationDriver.
|
||||
func (in *RegistrationDriver) DeepCopy() *RegistrationDriver {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(RegistrationDriver)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *RegistrationHubConfiguration) DeepCopyInto(out *RegistrationHubConfiguration) {
|
||||
*out = *in
|
||||
|
||||
19
vendor/open-cluster-management.io/api/operator/v1/zz_generated.swagger_doc_generated.go
generated
vendored
19
vendor/open-cluster-management.io/api/operator/v1/zz_generated.swagger_doc_generated.go
generated
vendored
@@ -163,6 +163,15 @@ func (WorkConfiguration) SwaggerDoc() map[string]string {
|
||||
return map_WorkConfiguration
|
||||
}
|
||||
|
||||
var map_AwsIrsa = map[string]string{
|
||||
"hubClusterArn": "The arn of the hub cluster (ie: an EKS cluster). This will be required to pass information to hub, which hub will use to create IAM identities for this klusterlet. Example - arn:eks:us-west-2:12345678910:cluster/hub-cluster1.",
|
||||
"managedClusterArn": "The arn of the managed cluster (ie: an EKS cluster). This will be required to generate the md5hash which will be used as a suffix to create IAM role on hub as well as used by kluslerlet-agent, to assume role suffixed with the md5hash, on startup. Example - arn:eks:us-west-2:12345678910:cluster/managed-cluster1.",
|
||||
}
|
||||
|
||||
func (AwsIrsa) SwaggerDoc() map[string]string {
|
||||
return map_AwsIrsa
|
||||
}
|
||||
|
||||
var map_BootstrapKubeConfigs = map[string]string{
|
||||
"type": "Type specifies the type of priority bootstrap kubeconfigs. By default, it is set to None, representing no priority bootstrap kubeconfigs are set.",
|
||||
"localSecretsConfig": "LocalSecretsConfig include a list of secrets that contains the kubeconfigs for ordered bootstrap kubeconifigs. The secrets must be in the same namespace where the agent controller runs.",
|
||||
@@ -268,12 +277,22 @@ var map_RegistrationConfiguration = map[string]string{
|
||||
"kubeAPIQPS": "KubeAPIQPS indicates the maximum QPS while talking with apiserver of hub cluster from the spoke cluster. If it is set empty, use the default value: 50",
|
||||
"kubeAPIBurst": "KubeAPIBurst indicates the maximum burst of the throttle while talking with apiserver of hub cluster from the spoke cluster. If it is set empty, use the default value: 100",
|
||||
"bootstrapKubeConfigs": "BootstrapKubeConfigs defines the ordered list of bootstrap kubeconfigs. The order decides which bootstrap kubeconfig to use first when rebootstrap.\n\nWhen the agent loses the connection to the current hub over HubConnectionTimeoutSeconds, or the managedcluster CR is set `hubAcceptsClient=false` on the hub, the controller marks the related bootstrap kubeconfig as \"failed\".\n\nA failed bootstrapkubeconfig won't be used for the duration specified by SkipFailedBootstrapKubeConfigSeconds. But if the user updates the content of a failed bootstrapkubeconfig, the \"failed\" mark will be cleared.",
|
||||
"registrationDriver": "This provides driver details required to register with hub",
|
||||
}
|
||||
|
||||
func (RegistrationConfiguration) SwaggerDoc() map[string]string {
|
||||
return map_RegistrationConfiguration
|
||||
}
|
||||
|
||||
var map_RegistrationDriver = map[string]string{
|
||||
"authType": "Type of the authentication used by managedcluster to register as well as pull work from hub. Possible values are csr and awsirsa.",
|
||||
"awsIrsa": "Contain the details required for registering with hub cluster (ie: an EKS cluster) using AWS IAM roles for service account. This is required only when the authType is awsirsa.",
|
||||
}
|
||||
|
||||
func (RegistrationDriver) SwaggerDoc() map[string]string {
|
||||
return map_RegistrationDriver
|
||||
}
|
||||
|
||||
var map_ServerURL = map[string]string{
|
||||
"": "ServerURL represents the apiserver url and ca bundle that is accessible externally",
|
||||
"url": "URL is the url of apiserver endpoint of the managed cluster.",
|
||||
|
||||
Reference in New Issue
Block a user