mirror of
https://github.com/open-cluster-management-io/ocm.git
synced 2026-02-14 18:09:57 +00:00
✨ Cluster decorator interface (#759)
* Add cluster decorator interface in register And refactor creating to controller to call decorators Signed-off-by: Jian Qiu <jqiu@redhat.com> * Add aws annotations to ManagedCluster using Decorator Signed-off-by: Gaurav Jaswal <jaswalkiranavtar@gmail.com> * Addressing review comments Signed-off-by: Gaurav Jaswal <jaswalkiranavtar@gmail.com> --------- Signed-off-by: Jian Qiu <jqiu@redhat.com> Signed-off-by: Gaurav Jaswal <jaswalkiranavtar@gmail.com> Co-authored-by: Jian Qiu <jqiu@redhat.com>
This commit is contained in:
@@ -13,6 +13,9 @@ import (
|
||||
clientcmdapi "k8s.io/client-go/tools/clientcmd/api"
|
||||
"k8s.io/klog/v2"
|
||||
|
||||
clusterv1 "open-cluster-management.io/api/cluster/v1"
|
||||
operatorv1 "open-cluster-management.io/api/operator/v1"
|
||||
|
||||
"open-cluster-management.io/ocm/pkg/registration/register"
|
||||
)
|
||||
|
||||
@@ -22,11 +25,15 @@ const (
|
||||
// TLSKeyFile is the name of tls key file in kubeconfigSecret
|
||||
TLSKeyFile = "tls.key"
|
||||
// TLSCertFile is the name of the tls cert file in kubeconfigSecret
|
||||
TLSCertFile = "tls.crt"
|
||||
TLSCertFile = "tls.crt"
|
||||
ManagedClusterArn = "managed-cluster-arn"
|
||||
ManagedClusterIAMRoleSuffix = "managed-cluster-iam-role-suffix"
|
||||
)
|
||||
|
||||
type AWSIRSADriver struct {
|
||||
name string
|
||||
name string
|
||||
managedClusterArn string
|
||||
managedClusterRoleSuffix string
|
||||
}
|
||||
|
||||
func (c *AWSIRSADriver) Process(
|
||||
@@ -95,6 +102,18 @@ func (c *AWSIRSADriver) IsHubKubeConfigValid(ctx context.Context, secretOption r
|
||||
return true, nil
|
||||
}
|
||||
|
||||
func NewAWSIRSADriver() register.RegisterDriver {
|
||||
return &AWSIRSADriver{}
|
||||
func (c *AWSIRSADriver) ManagedClusterDecorator(cluster *clusterv1.ManagedCluster) *clusterv1.ManagedCluster {
|
||||
if cluster.Annotations == nil {
|
||||
cluster.Annotations = make(map[string]string)
|
||||
}
|
||||
cluster.Annotations[operatorv1.ClusterAnnotationsKeyPrefix+"/"+ManagedClusterArn] = c.managedClusterArn
|
||||
cluster.Annotations[operatorv1.ClusterAnnotationsKeyPrefix+"/"+ManagedClusterIAMRoleSuffix] = c.managedClusterRoleSuffix
|
||||
return cluster
|
||||
}
|
||||
|
||||
func NewAWSIRSADriver(managedClusterArn string, managedClusterRoleSuffix string) register.RegisterDriver {
|
||||
return &AWSIRSADriver{
|
||||
managedClusterArn: managedClusterArn,
|
||||
managedClusterRoleSuffix: managedClusterRoleSuffix,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -230,7 +230,7 @@ func TestIsHubKubeConfigValidFunc(t *testing.T) {
|
||||
}
|
||||
for _, c := range cases {
|
||||
t.Run(c.name, func(t *testing.T) {
|
||||
driver := NewAWSIRSADriver()
|
||||
driver := NewAWSIRSADriver("", "")
|
||||
secretOption := register.SecretOption{
|
||||
ClusterName: c.clusterName,
|
||||
AgentName: c.agentName,
|
||||
|
||||
@@ -22,6 +22,8 @@ import (
|
||||
"k8s.io/client-go/util/keyutil"
|
||||
"k8s.io/klog/v2"
|
||||
|
||||
clusterv1 "open-cluster-management.io/api/cluster/v1"
|
||||
|
||||
"open-cluster-management.io/ocm/pkg/registration/register"
|
||||
)
|
||||
|
||||
@@ -266,6 +268,10 @@ func (c *CSRDriver) IsHubKubeConfigValid(ctx context.Context, secretOption regis
|
||||
return isCertificateValid(logger, certData, nil)
|
||||
}
|
||||
|
||||
func (c *CSRDriver) ManagedClusterDecorator(cluster *clusterv1.ManagedCluster) *clusterv1.ManagedCluster {
|
||||
return cluster
|
||||
}
|
||||
|
||||
func NewCSRDriver() register.RegisterDriver {
|
||||
return &CSRDriver{}
|
||||
}
|
||||
|
||||
@@ -71,6 +71,9 @@ type RegisterDriver interface {
|
||||
// InformerHandler returns informer of the related object. If no object needs to be watched, the func could
|
||||
// return nil, nil.
|
||||
InformerHandler(option any) (cache.SharedIndexInformer, factory.EventFilterFunc)
|
||||
|
||||
// ManagedClusterDecorator is to change managed cluster metadata or spec during registration process.
|
||||
ManagedClusterDecorator(cluster *clusterv1.ManagedCluster) *clusterv1.ManagedCluster
|
||||
}
|
||||
|
||||
// Approvers is the inteface that each driver should implement on hub side. The hub controller will use this driver
|
||||
|
||||
@@ -17,6 +17,8 @@ import (
|
||||
"k8s.io/client-go/tools/cache"
|
||||
clientcmdapi "k8s.io/client-go/tools/clientcmd/api"
|
||||
|
||||
clusterv1 "open-cluster-management.io/api/cluster/v1"
|
||||
|
||||
testingcommon "open-cluster-management.io/ocm/pkg/common/testing"
|
||||
testinghelpers "open-cluster-management.io/ocm/pkg/registration/helpers/testing"
|
||||
)
|
||||
@@ -133,7 +135,7 @@ func TestSync(t *testing.T) {
|
||||
for _, c := range testCases {
|
||||
t.Run(c.name, func(t *testing.T) {
|
||||
syncCtx := testingcommon.NewFakeSyncContext(t, "test")
|
||||
kubeClient := kubefake.NewSimpleClientset(c.secrets...)
|
||||
kubeClient := kubefake.NewClientset(c.secrets...)
|
||||
c.option.ManagementCoreClient = kubeClient.CoreV1()
|
||||
informerFactory := informers.NewSharedInformerFactory(kubeClient, 10*time.Minute)
|
||||
c.option.ManagementSecretInformer = informerFactory.Core().V1().Secrets().Informer()
|
||||
@@ -195,3 +197,7 @@ func (f *fakeDriver) Process(
|
||||
func (f *fakeDriver) InformerHandler(_ any) (cache.SharedIndexInformer, factory.EventFilterFunc) {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func (f *fakeDriver) ManagedClusterDecorator(cluster *clusterv1.ManagedCluster) *clusterv1.ManagedCluster {
|
||||
return cluster
|
||||
}
|
||||
|
||||
@@ -23,28 +23,26 @@ var (
|
||||
CreatingControllerSyncInterval = 60 * time.Minute
|
||||
)
|
||||
|
||||
type ManagedClusterDecorator func(cluster *clusterv1.ManagedCluster) *clusterv1.ManagedCluster
|
||||
|
||||
// managedClusterCreatingController creates a ManagedCluster on hub cluster during the spoke agent bootstrap phase
|
||||
type managedClusterCreatingController struct {
|
||||
clusterName string
|
||||
spokeExternalServerURLs []string
|
||||
spokeCABundle []byte
|
||||
clusterAnnotations map[string]string
|
||||
hubClusterClient clientset.Interface
|
||||
clusterName string
|
||||
clusterDecorators []ManagedClusterDecorator
|
||||
hubClusterClient clientset.Interface
|
||||
}
|
||||
|
||||
// NewManagedClusterCreatingController creates a new managedClusterCreatingController on the managed cluster.
|
||||
func NewManagedClusterCreatingController(
|
||||
clusterName string, spokeExternalServerURLs []string, annotations map[string]string,
|
||||
spokeCABundle []byte,
|
||||
clusterName string,
|
||||
decorators []ManagedClusterDecorator,
|
||||
hubClusterClient clientset.Interface,
|
||||
recorder events.Recorder) factory.Controller {
|
||||
|
||||
c := &managedClusterCreatingController{
|
||||
clusterName: clusterName,
|
||||
spokeExternalServerURLs: spokeExternalServerURLs,
|
||||
spokeCABundle: spokeCABundle,
|
||||
clusterAnnotations: commonhelpers.FilterClusterAnnotations(annotations),
|
||||
hubClusterClient: hubClusterClient,
|
||||
clusterName: clusterName,
|
||||
hubClusterClient: hubClusterClient,
|
||||
clusterDecorators: decorators,
|
||||
}
|
||||
|
||||
return factory.New().
|
||||
@@ -69,20 +67,12 @@ func (c *managedClusterCreatingController) sync(ctx context.Context, syncCtx fac
|
||||
if errors.IsNotFound(err) {
|
||||
managedCluster := &clusterv1.ManagedCluster{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: c.clusterName,
|
||||
Annotations: c.clusterAnnotations,
|
||||
Name: c.clusterName,
|
||||
},
|
||||
}
|
||||
|
||||
if len(c.spokeExternalServerURLs) != 0 {
|
||||
var managedClusterClientConfigs []clusterv1.ClientConfig
|
||||
for _, serverURL := range c.spokeExternalServerURLs {
|
||||
managedClusterClientConfigs = append(managedClusterClientConfigs, clusterv1.ClientConfig{
|
||||
URL: serverURL,
|
||||
CABundle: c.spokeCABundle,
|
||||
})
|
||||
}
|
||||
managedCluster.Spec.ManagedClusterClientConfigs = managedClusterClientConfigs
|
||||
for _, decorator := range c.clusterDecorators {
|
||||
managedCluster = decorator(managedCluster)
|
||||
}
|
||||
|
||||
_, err = c.hubClusterClient.ClusterV1().ManagedClusters().Create(ctx, managedCluster, metav1.CreateOptions{})
|
||||
@@ -94,37 +84,17 @@ func (c *managedClusterCreatingController) sync(ctx context.Context, syncCtx fac
|
||||
return nil
|
||||
}
|
||||
|
||||
// do not update ManagedClusterClientConfigs in ManagedCluster if spokeExternalServerURLs is empty
|
||||
if len(c.spokeExternalServerURLs) == 0 {
|
||||
return nil
|
||||
managedCluster := existingCluster.DeepCopy()
|
||||
for _, decorator := range c.clusterDecorators {
|
||||
managedCluster = decorator(managedCluster)
|
||||
}
|
||||
|
||||
// merge ClientConfig
|
||||
managedClusterClientConfigs := existingCluster.Spec.ManagedClusterClientConfigs
|
||||
for _, serverURL := range c.spokeExternalServerURLs {
|
||||
isIncludeByExisting := false
|
||||
for _, existingClientConfig := range existingCluster.Spec.ManagedClusterClientConfigs {
|
||||
if serverURL == existingClientConfig.URL {
|
||||
isIncludeByExisting = true
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if !isIncludeByExisting {
|
||||
managedClusterClientConfigs = append(managedClusterClientConfigs, clusterv1.ClientConfig{
|
||||
URL: serverURL,
|
||||
CABundle: c.spokeCABundle,
|
||||
})
|
||||
}
|
||||
}
|
||||
if len(existingCluster.Spec.ManagedClusterClientConfigs) == len(managedClusterClientConfigs) {
|
||||
if len(existingCluster.Spec.ManagedClusterClientConfigs) == len(managedCluster.Spec.ManagedClusterClientConfigs) {
|
||||
return nil
|
||||
}
|
||||
|
||||
// update ManagedClusterClientConfigs in ManagedCluster
|
||||
clusterCopy := existingCluster.DeepCopy()
|
||||
clusterCopy.Spec.ManagedClusterClientConfigs = managedClusterClientConfigs
|
||||
_, err = c.hubClusterClient.ClusterV1().ManagedClusters().Update(ctx, clusterCopy, metav1.UpdateOptions{})
|
||||
_, err = c.hubClusterClient.ClusterV1().ManagedClusters().Update(ctx, managedCluster, metav1.UpdateOptions{})
|
||||
// ManagedClusterClientConfigs in ManagedCluster is only allowed updated during bootstrap.
|
||||
// After bootstrap secret expired, an unauthorized error will be got, skip it
|
||||
if skipUnauthorizedError(err) != nil {
|
||||
@@ -141,3 +111,40 @@ func skipUnauthorizedError(err error) error {
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
func AnnotationDecorator(annotations map[string]string) ManagedClusterDecorator {
|
||||
return func(cluster *clusterv1.ManagedCluster) *clusterv1.ManagedCluster {
|
||||
filteredAnnotations := commonhelpers.FilterClusterAnnotations(annotations)
|
||||
if cluster.Annotations == nil {
|
||||
cluster.Annotations = make(map[string]string)
|
||||
}
|
||||
for key, value := range filteredAnnotations {
|
||||
cluster.Annotations[key] = value
|
||||
}
|
||||
return cluster
|
||||
}
|
||||
}
|
||||
|
||||
// ClientConfigDecorator merge ClientConfig
|
||||
func ClientConfigDecorator(externalServerURLs []string, caBundle []byte) ManagedClusterDecorator {
|
||||
return func(cluster *clusterv1.ManagedCluster) *clusterv1.ManagedCluster {
|
||||
for _, serverURL := range externalServerURLs {
|
||||
isIncludeByExisting := false
|
||||
for _, existingClientConfig := range cluster.Spec.ManagedClusterClientConfigs {
|
||||
if serverURL == existingClientConfig.URL {
|
||||
isIncludeByExisting = true
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if !isIncludeByExisting {
|
||||
cluster.Spec.ManagedClusterClientConfigs = append(
|
||||
cluster.Spec.ManagedClusterClientConfigs, clusterv1.ClientConfig{
|
||||
URL: serverURL,
|
||||
CABundle: caBundle,
|
||||
})
|
||||
}
|
||||
}
|
||||
return cluster
|
||||
}
|
||||
}
|
||||
|
||||
@@ -59,13 +59,14 @@ func TestCreateSpokeCluster(t *testing.T) {
|
||||
t.Run(c.name, func(t *testing.T) {
|
||||
clusterClient := clusterfake.NewSimpleClientset(c.startingObjects...)
|
||||
ctrl := managedClusterCreatingController{
|
||||
clusterName: testinghelpers.TestManagedClusterName,
|
||||
spokeExternalServerURLs: []string{testSpokeExternalServerUrl},
|
||||
spokeCABundle: []byte("testcabundle"),
|
||||
hubClusterClient: clusterClient,
|
||||
clusterAnnotations: map[string]string{
|
||||
"agent.open-cluster-management.io/test": "true",
|
||||
clusterName: testinghelpers.TestManagedClusterName,
|
||||
clusterDecorators: []ManagedClusterDecorator{
|
||||
AnnotationDecorator(map[string]string{
|
||||
"agent.open-cluster-management.io/test": "true",
|
||||
}),
|
||||
ClientConfigDecorator([]string{testSpokeExternalServerUrl}, []byte("testcabundle")),
|
||||
},
|
||||
hubClusterClient: clusterClient,
|
||||
}
|
||||
|
||||
syncErr := ctrl.sync(context.TODO(), testingcommon.NewFakeSyncContext(t, ""))
|
||||
|
||||
@@ -26,7 +26,6 @@ import (
|
||||
clusterv1informers "open-cluster-management.io/api/client/cluster/informers/externalversions"
|
||||
clusterv1 "open-cluster-management.io/api/cluster/v1"
|
||||
ocmfeature "open-cluster-management.io/api/feature"
|
||||
operatorv1 "open-cluster-management.io/api/operator/v1"
|
||||
|
||||
"open-cluster-management.io/ocm/pkg/common/helpers"
|
||||
commonoptions "open-cluster-management.io/ocm/pkg/common/options"
|
||||
@@ -191,20 +190,9 @@ func (o *SpokeAgentConfig) RunSpokeAgentWithSpokeInformers(ctx context.Context,
|
||||
|
||||
// initiate registration driver
|
||||
var registerDriver register.RegisterDriver
|
||||
if o.registrationOption.RegistrationAuth == AwsIrsaAuthType {
|
||||
// TODO: may consider add additional validations
|
||||
if o.registrationOption.HubClusterArn != "" {
|
||||
registerDriver = awsIrsa.NewAWSIRSADriver()
|
||||
if o.registrationOption.ClusterAnnotations == nil {
|
||||
o.registrationOption.ClusterAnnotations = map[string]string{}
|
||||
}
|
||||
o.registrationOption.ClusterAnnotations[operatorv1.ClusterAnnotationsKeyPrefix+"/managed-cluster-arn"] = o.registrationOption.ManagedClusterArn
|
||||
o.registrationOption.ClusterAnnotations[operatorv1.ClusterAnnotationsKeyPrefix+"/managed-cluster-iam-role-suffix"] =
|
||||
o.registrationOption.ManagedClusterRoleSuffix
|
||||
|
||||
} else {
|
||||
panic("A valid EKS Hub Cluster ARN is required with awsirsa based authentication")
|
||||
}
|
||||
var registrationOption = o.registrationOption
|
||||
if registrationOption.RegistrationAuth == AwsIrsaAuthType {
|
||||
registerDriver = awsIrsa.NewAWSIRSADriver(o.registrationOption.ManagedClusterArn, o.registrationOption.ManagedClusterRoleSuffix)
|
||||
} else {
|
||||
registerDriver = csr.NewCSRDriver()
|
||||
}
|
||||
@@ -254,8 +242,12 @@ func (o *SpokeAgentConfig) RunSpokeAgentWithSpokeInformers(ctx context.Context,
|
||||
|
||||
// start a SpokeClusterCreatingController to make sure there is a spoke cluster on hub cluster
|
||||
spokeClusterCreatingController := registration.NewManagedClusterCreatingController(
|
||||
o.agentOptions.SpokeClusterName, o.registrationOption.SpokeExternalServerURLs, o.registrationOption.ClusterAnnotations,
|
||||
spokeClusterCABundle,
|
||||
o.agentOptions.SpokeClusterName,
|
||||
[]registration.ManagedClusterDecorator{
|
||||
registration.AnnotationDecorator(o.registrationOption.ClusterAnnotations),
|
||||
registration.ClientConfigDecorator(o.registrationOption.SpokeExternalServerURLs, spokeClusterCABundle),
|
||||
o.driver.ManagedClusterDecorator,
|
||||
},
|
||||
bootstrapClusterClient,
|
||||
recorder,
|
||||
)
|
||||
|
||||
@@ -45,6 +45,10 @@ func init() {
|
||||
func TestValidate(t *testing.T) {
|
||||
defaultCompletedOptions := NewSpokeAgentOptions()
|
||||
defaultCompletedOptions.BootstrapKubeconfig = "/spoke/bootstrap/kubeconfig"
|
||||
awsCompletedOptionsHubArnMissing := *defaultCompletedOptions
|
||||
awsCompletedOptionsHubArnMissing.RegistrationAuth = AwsIrsaAuthType
|
||||
awsDefaultCompletedOptions := awsCompletedOptionsHubArnMissing
|
||||
awsDefaultCompletedOptions.HubClusterArn = "arn:aws:eks:us-west-2:123456789012:cluster/hub-cluster1"
|
||||
|
||||
cases := []struct {
|
||||
name string
|
||||
@@ -78,6 +82,16 @@ func TestValidate(t *testing.T) {
|
||||
options: defaultCompletedOptions,
|
||||
expectedErr: "",
|
||||
},
|
||||
{
|
||||
name: "default completed options for aws flow",
|
||||
options: &awsDefaultCompletedOptions,
|
||||
expectedErr: "",
|
||||
},
|
||||
{
|
||||
name: "default completed options without HubClusterArn for aws flow",
|
||||
options: &awsCompletedOptionsHubArnMissing,
|
||||
expectedErr: "EksHubClusterArn cannot be empty if RegistrationAuth is awsirsa",
|
||||
},
|
||||
{
|
||||
name: "default completed options",
|
||||
options: &SpokeAgentOptions{
|
||||
|
||||
72
test/integration/registration/clusterannotations_aws_test.go
Normal file
72
test/integration/registration/clusterannotations_aws_test.go
Normal file
@@ -0,0 +1,72 @@
|
||||
package registration_test
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"path"
|
||||
"time"
|
||||
|
||||
"github.com/onsi/ginkgo/v2"
|
||||
"github.com/onsi/gomega"
|
||||
|
||||
operatorv1 "open-cluster-management.io/api/operator/v1"
|
||||
|
||||
commonoptions "open-cluster-management.io/ocm/pkg/common/options"
|
||||
"open-cluster-management.io/ocm/pkg/registration/register/aws_irsa"
|
||||
"open-cluster-management.io/ocm/pkg/registration/spoke"
|
||||
"open-cluster-management.io/ocm/test/integration/util"
|
||||
)
|
||||
|
||||
var _ = ginkgo.Describe("Cluster Annotations for aws", func() {
|
||||
ginkgo.It("Cluster Annotations for aws flow should be created on the managed cluster", func() {
|
||||
managedClusterName := "clusterannotations-spokecluster-aws"
|
||||
//#nosec G101
|
||||
hubKubeconfigSecret := "clusterannotations-hub-kubeconfig-secret"
|
||||
hubKubeconfigDir := path.Join(util.TestDir, "clusterannotations", "hub-kubeconfig")
|
||||
|
||||
managedClusterArn := "arn:aws:eks:us-west-2:123456789012:cluster/managed-cluster1"
|
||||
managedClusterRoleSuffix := "7f8141296c75f2871e3d030f85c35692"
|
||||
hubClusterArn := "arn:aws:eks:us-west-2:123456789012:cluster/hub-cluster1"
|
||||
agentOptions := &spoke.SpokeAgentOptions{
|
||||
RegistrationAuth: spoke.AwsIrsaAuthType,
|
||||
HubClusterArn: hubClusterArn,
|
||||
ManagedClusterArn: managedClusterArn,
|
||||
ManagedClusterRoleSuffix: managedClusterRoleSuffix,
|
||||
BootstrapKubeconfig: bootstrapKubeConfigFile,
|
||||
HubKubeconfigSecret: hubKubeconfigSecret,
|
||||
ClusterHealthCheckPeriod: 1 * time.Minute,
|
||||
ClusterAnnotations: map[string]string{
|
||||
"agent.open-cluster-management.io/foo": "bar",
|
||||
"foo": "bar", // this annotation should be filtered out
|
||||
},
|
||||
}
|
||||
|
||||
commOptions := commonoptions.NewAgentOptions()
|
||||
commOptions.HubKubeconfigDir = hubKubeconfigDir
|
||||
commOptions.SpokeClusterName = managedClusterName
|
||||
|
||||
// run registration agent
|
||||
cancel := runAgent("rotationtest", agentOptions, commOptions, spokeCfg)
|
||||
defer cancel()
|
||||
|
||||
// after bootstrap the spokecluster and csr should be created
|
||||
gomega.Eventually(func() error {
|
||||
mc, err := util.GetManagedCluster(clusterClient, managedClusterName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if mc.Annotations[operatorv1.ClusterAnnotationsKeyPrefix+"/"+aws_irsa.ManagedClusterArn] != managedClusterArn {
|
||||
return fmt.Errorf("expected annotation "+operatorv1.ClusterAnnotationsKeyPrefix+"/"+aws_irsa.ManagedClusterArn+" to be "+
|
||||
""+managedClusterArn+", got %s", mc.Annotations[operatorv1.ClusterAnnotationsKeyPrefix+"/"+aws_irsa.ManagedClusterArn])
|
||||
}
|
||||
|
||||
if mc.Annotations[operatorv1.ClusterAnnotationsKeyPrefix+"/"+aws_irsa.ManagedClusterIAMRoleSuffix] != managedClusterRoleSuffix {
|
||||
return fmt.Errorf("expected annotation "+operatorv1.ClusterAnnotationsKeyPrefix+"/"+aws_irsa.ManagedClusterIAMRoleSuffix+" "+
|
||||
"to be "+managedClusterRoleSuffix+", got %s", mc.Annotations[operatorv1.ClusterAnnotationsKeyPrefix+"/"+aws_irsa.ManagedClusterIAMRoleSuffix])
|
||||
}
|
||||
|
||||
return nil
|
||||
}, eventuallyTimeout, eventuallyInterval).Should(gomega.Succeed())
|
||||
|
||||
})
|
||||
})
|
||||
@@ -8,7 +8,10 @@ import (
|
||||
"github.com/onsi/ginkgo/v2"
|
||||
"github.com/onsi/gomega"
|
||||
|
||||
operatorv1 "open-cluster-management.io/api/operator/v1"
|
||||
|
||||
commonoptions "open-cluster-management.io/ocm/pkg/common/options"
|
||||
"open-cluster-management.io/ocm/pkg/registration/register/aws_irsa"
|
||||
"open-cluster-management.io/ocm/pkg/registration/spoke"
|
||||
"open-cluster-management.io/ocm/test/integration/util"
|
||||
)
|
||||
@@ -49,6 +52,14 @@ var _ = ginkgo.Describe("Cluster Annotations", func() {
|
||||
return fmt.Errorf("unexpected annotations %v", mc.Annotations)
|
||||
}
|
||||
|
||||
if _, ok := mc.Annotations[operatorv1.ClusterAnnotationsKeyPrefix+"/"+aws_irsa.ManagedClusterArn]; ok {
|
||||
return fmt.Errorf("unexpected annotations %v", mc.Annotations)
|
||||
}
|
||||
|
||||
if _, ok := mc.Annotations[operatorv1.ClusterAnnotationsKeyPrefix+"/"+aws_irsa.ManagedClusterIAMRoleSuffix]; ok {
|
||||
return fmt.Errorf("unexpected annotations %v", mc.Annotations)
|
||||
}
|
||||
|
||||
if mc.Annotations["agent.open-cluster-management.io/foo"] != "bar" {
|
||||
return fmt.Errorf("expected annotation agent.open-cluster-management.io/foo to be bar, got %s", mc.Annotations["agent.open-cluster-management.io/foo"])
|
||||
}
|
||||
|
||||
119
test/integration/registration/spokecluster_aws_joining_test.go
Normal file
119
test/integration/registration/spokecluster_aws_joining_test.go
Normal file
@@ -0,0 +1,119 @@
|
||||
package registration_test
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"path"
|
||||
"time"
|
||||
|
||||
"github.com/onsi/ginkgo/v2"
|
||||
"github.com/onsi/gomega"
|
||||
"k8s.io/apimachinery/pkg/util/rand"
|
||||
|
||||
commonoptions "open-cluster-management.io/ocm/pkg/common/options"
|
||||
"open-cluster-management.io/ocm/pkg/registration/spoke"
|
||||
"open-cluster-management.io/ocm/test/integration/util"
|
||||
)
|
||||
|
||||
var _ = ginkgo.Describe("Joining Process for aws flow", func() {
|
||||
var bootstrapKubeconfig string
|
||||
var managedClusterName string
|
||||
var hubKubeconfigSecret string
|
||||
var hubKubeconfigDir string
|
||||
|
||||
ginkgo.BeforeEach(func() {
|
||||
postfix := rand.String(5)
|
||||
managedClusterName = fmt.Sprintf("joiningtest-managedcluster-%s", postfix)
|
||||
hubKubeconfigSecret = fmt.Sprintf("joiningtest-hub-kubeconfig-secret-%s", postfix)
|
||||
hubKubeconfigDir = path.Join(util.TestDir, fmt.Sprintf("joiningtest-%s", postfix), "hub-kubeconfig")
|
||||
})
|
||||
|
||||
assertJoiningSucceed := func() {
|
||||
ginkgo.It("managedcluster should join successfully for aws flow", func() {
|
||||
var err error
|
||||
|
||||
managedClusterArn := "arn:aws:eks:us-west-2:123456789012:cluster/managed-cluster1"
|
||||
managedClusterRoleSuffix := "7f8141296c75f2871e3d030f85c35692"
|
||||
hubClusterArn := "arn:aws:eks:us-west-2:123456789012:cluster/hub-cluster1"
|
||||
|
||||
// run registration agent
|
||||
agentOptions := &spoke.SpokeAgentOptions{
|
||||
RegistrationAuth: spoke.AwsIrsaAuthType,
|
||||
HubClusterArn: hubClusterArn,
|
||||
ManagedClusterArn: managedClusterArn,
|
||||
ManagedClusterRoleSuffix: managedClusterRoleSuffix,
|
||||
BootstrapKubeconfig: bootstrapKubeconfig,
|
||||
HubKubeconfigSecret: hubKubeconfigSecret,
|
||||
ClusterHealthCheckPeriod: 1 * time.Minute,
|
||||
}
|
||||
commOptions := commonoptions.NewAgentOptions()
|
||||
commOptions.HubKubeconfigDir = hubKubeconfigDir
|
||||
commOptions.SpokeClusterName = managedClusterName
|
||||
|
||||
cancel := runAgent("joiningtest", agentOptions, commOptions, spokeCfg)
|
||||
defer cancel()
|
||||
|
||||
// the ManagedCluster CR should be created after bootstrap
|
||||
gomega.Eventually(func() error {
|
||||
if _, err := util.GetManagedCluster(clusterClient, managedClusterName); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}, eventuallyTimeout, eventuallyInterval).ShouldNot(gomega.HaveOccurred())
|
||||
|
||||
// the csr should not be created for aws flow after bootstrap
|
||||
gomega.Eventually(func() error {
|
||||
if _, err := util.FindUnapprovedSpokeCSR(kubeClient, managedClusterName); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}, eventuallyTimeout, eventuallyInterval).Should(gomega.HaveOccurred())
|
||||
|
||||
// simulate hub cluster admin to accept the managedcluster
|
||||
err = util.AcceptManagedCluster(clusterClient, managedClusterName)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
|
||||
err = authn.ApproveSpokeClusterCSR(kubeClient, managedClusterName, time.Hour*24)
|
||||
gomega.Expect(err).To(gomega.HaveOccurred())
|
||||
|
||||
// the hub kubeconfig secret should be filled after the ManagedCluster is accepted
|
||||
// TODO: Revisit while implementing slice 3
|
||||
//gomega.Eventually(func() error {
|
||||
// secret, err := util.GetFilledHubKubeConfigSecret(kubeClient, testNamespace, hubKubeconfigSecret)
|
||||
// if err != nil {
|
||||
// return err
|
||||
// }
|
||||
//
|
||||
// // check if the proxyURL is set correctly
|
||||
// proxyURL, err := getProxyURLFromKubeconfigData(secret.Data["kubeconfig"])
|
||||
// if err != nil {
|
||||
// return err
|
||||
// }
|
||||
// if proxyURL != expectedProxyURL {
|
||||
// return fmt.Errorf("expected proxy url %q, but got %q", expectedProxyURL, proxyURL)
|
||||
// }
|
||||
// return nil
|
||||
//}, eventuallyTimeout, eventuallyInterval).ShouldNot(gomega.HaveOccurred())
|
||||
|
||||
// the spoke cluster should have joined condition finally
|
||||
// TODO: Revisit while implementing slice 3
|
||||
//gomega.Eventually(func() error {
|
||||
// spokeCluster, err := util.GetManagedCluster(clusterClient, managedClusterName)
|
||||
// if err != nil {
|
||||
// return err
|
||||
// }
|
||||
// if !meta.IsStatusConditionTrue(spokeCluster.Status.Conditions, clusterv1.ManagedClusterConditionJoined) {
|
||||
// return fmt.Errorf("cluster should be joined")
|
||||
// }
|
||||
// return nil
|
||||
//}, eventuallyTimeout, eventuallyInterval).ShouldNot(gomega.HaveOccurred())
|
||||
})
|
||||
}
|
||||
|
||||
ginkgo.Context("without proxy", func() {
|
||||
ginkgo.BeforeEach(func() {
|
||||
bootstrapKubeconfig = bootstrapKubeConfigFile
|
||||
})
|
||||
assertJoiningSucceed()
|
||||
})
|
||||
|
||||
})
|
||||
Reference in New Issue
Block a user