mirror of
https://github.com/open-cluster-management-io/ocm.git
synced 2026-02-14 18:09:57 +00:00
✨ Completing aws registration on spoke (#788)
Some checks failed
Scorecard supply-chain security / Scorecard analysis (push) Failing after 40s
Post / images (amd64) (push) Failing after 5m45s
Post / images (arm64) (push) Failing after 5m45s
Post / image manifest (push) Has been skipped
Post / trigger clusteradm e2e (push) Has been skipped
Post / coverage (push) Failing after 26m38s
Close stale issues and PRs / stale (push) Successful in 6s
Some checks failed
Scorecard supply-chain security / Scorecard analysis (push) Failing after 40s
Post / images (amd64) (push) Failing after 5m45s
Post / images (arm64) (push) Failing after 5m45s
Post / image manifest (push) Has been skipped
Post / trigger clusteradm e2e (push) Has been skipped
Post / coverage (push) Failing after 26m38s
Close stale issues and PRs / stale (push) Successful in 6s
* Completing aws registration on spoke Signed-off-by: suvaanshkumar <suvaanshkumar@gmail.com> * adding new function to overcome gci errors on slices Signed-off-by: suvaanshkumar <suvaanshkumar@gmail.com> * Refactoring array contains function Signed-off-by: Gaurav Jaswal <jaswalkiranavtar@gmail.com> --------- Signed-off-by: suvaanshkumar <suvaanshkumar@gmail.com> Signed-off-by: Gaurav Jaswal <jaswalkiranavtar@gmail.com> Co-authored-by: suvaanshkumar <suvaanshkumar@gmail.com>
This commit is contained in:
@@ -130,7 +130,7 @@ type ManagedClusterIamRole struct {
|
||||
}
|
||||
|
||||
func (managedClusterIamRole *ManagedClusterIamRole) arn() string {
|
||||
managedClusterAccountId, _ := getAwsAccountIdAndClusterName(managedClusterIamRole.AwsIrsa.ManagedClusterArn)
|
||||
managedClusterAccountId, _ := GetAwsAccountIdAndClusterName(managedClusterIamRole.AwsIrsa.ManagedClusterArn)
|
||||
md5HashUniqueIdentifier := managedClusterIamRole.md5HashSuffix()
|
||||
|
||||
//arn:aws:iam::<managed-cluster-account-id>:role/ocm-managed-cluster-<md5-hash-unique-identifier>
|
||||
@@ -138,8 +138,8 @@ func (managedClusterIamRole *ManagedClusterIamRole) arn() string {
|
||||
}
|
||||
|
||||
func (managedClusterIamRole *ManagedClusterIamRole) md5HashSuffix() string {
|
||||
hubClusterAccountId, hubClusterName := getAwsAccountIdAndClusterName(managedClusterIamRole.AwsIrsa.HubClusterArn)
|
||||
managedClusterAccountId, managedClusterName := getAwsAccountIdAndClusterName(managedClusterIamRole.AwsIrsa.ManagedClusterArn)
|
||||
hubClusterAccountId, hubClusterName := GetAwsAccountIdAndClusterName(managedClusterIamRole.AwsIrsa.HubClusterArn)
|
||||
managedClusterAccountId, managedClusterName := GetAwsAccountIdAndClusterName(managedClusterIamRole.AwsIrsa.ManagedClusterArn)
|
||||
|
||||
hash := md5.Sum([]byte(strings.Join([]string{hubClusterAccountId, hubClusterName, managedClusterAccountId, managedClusterName}, "#"))) // #nosec G401
|
||||
return hex.EncodeToString(hash[:])
|
||||
@@ -574,9 +574,14 @@ func serviceAccountName(suffix string, klusterlet *operatorapiv1.Klusterlet) str
|
||||
return fmt.Sprintf("%s-%s", klusterlet.Name, suffix)
|
||||
}
|
||||
|
||||
func getAwsAccountIdAndClusterName(clusterArn string) (string, string) {
|
||||
func GetAwsAccountIdAndClusterName(clusterArn string) (string, string) {
|
||||
clusterStringParts := strings.Split(clusterArn, ":")
|
||||
clusterName := strings.Split(clusterStringParts[5], "/")[1]
|
||||
awsAccountId := clusterStringParts[4]
|
||||
return awsAccountId, clusterName
|
||||
}
|
||||
|
||||
func GetAwsRegion(clusterArn string) string {
|
||||
clusterStringParts := strings.Split(clusterArn, ":")
|
||||
return clusterStringParts[3]
|
||||
}
|
||||
|
||||
@@ -1,12 +1,19 @@
|
||||
package aws_irsa
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
"k8s.io/apimachinery/pkg/api/meta"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/client-go/tools/cache"
|
||||
|
||||
cluster "open-cluster-management.io/api/client/cluster/clientset/versioned"
|
||||
managedclusterv1client "open-cluster-management.io/api/client/cluster/clientset/versioned/typed/cluster/v1"
|
||||
managedclusterinformers "open-cluster-management.io/api/client/cluster/informers/externalversions/cluster"
|
||||
managedclusterv1lister "open-cluster-management.io/api/client/cluster/listers/cluster/v1"
|
||||
v1 "open-cluster-management.io/api/cluster/v1"
|
||||
)
|
||||
|
||||
type AWSIRSAControl interface {
|
||||
@@ -26,14 +33,23 @@ type v1AWSIRSAControl struct {
|
||||
}
|
||||
|
||||
func (v *v1AWSIRSAControl) isApproved(name string) (bool, error) {
|
||||
// TODO: check if the managedclusuter cr on hub has required condition and is approved
|
||||
managedcluster, err := v.get(name)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
v1Managedcluster := managedcluster.(*v1.ManagedCluster)
|
||||
approved := false
|
||||
|
||||
condition := meta.FindStatusCondition(v1Managedcluster.Status.Conditions, v1.ManagedClusterConditionHubAccepted)
|
||||
if condition != nil {
|
||||
approved = true
|
||||
} else {
|
||||
return false, nil
|
||||
}
|
||||
return approved, nil
|
||||
}
|
||||
|
||||
func (v *v1AWSIRSAControl) generateEKSKubeConfig(name string) ([]byte, error) {
|
||||
// TODO: generate and return kubeconfig
|
||||
// TODO: generate and return kubeconfig, remove this if not needed
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
@@ -41,8 +57,6 @@ func (v *v1AWSIRSAControl) Informer() cache.SharedIndexInformer {
|
||||
return v.hubManagedClusterInformer
|
||||
}
|
||||
|
||||
//TODO: Uncomment the below once required in the aws irsa authentication implementation
|
||||
/*
|
||||
func (v *v1AWSIRSAControl) get(name string) (metav1.Object, error) {
|
||||
managedcluster, err := v.hubManagedClusterLister.Get(name)
|
||||
switch {
|
||||
@@ -57,7 +71,6 @@ func (v *v1AWSIRSAControl) get(name string) (metav1.Object, error) {
|
||||
}
|
||||
return managedcluster, nil
|
||||
}
|
||||
*/
|
||||
|
||||
func NewAWSIRSAControl(hubManagedClusterInformer managedclusterinformers.Interface, hubManagedClusterClient cluster.Interface) (AWSIRSAControl, error) {
|
||||
return &v1AWSIRSAControl{
|
||||
|
||||
@@ -11,11 +11,11 @@ import (
|
||||
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
|
||||
"k8s.io/client-go/tools/cache"
|
||||
clientcmdapi "k8s.io/client-go/tools/clientcmd/api"
|
||||
"k8s.io/klog/v2"
|
||||
|
||||
clusterv1 "open-cluster-management.io/api/cluster/v1"
|
||||
operatorv1 "open-cluster-management.io/api/operator/v1"
|
||||
|
||||
"open-cluster-management.io/ocm/pkg/operator/operators/klusterlet/controllers/klusterletcontroller"
|
||||
"open-cluster-management.io/ocm/pkg/registration/register"
|
||||
)
|
||||
|
||||
@@ -33,20 +33,19 @@ const (
|
||||
type AWSIRSADriver struct {
|
||||
name string
|
||||
managedClusterArn string
|
||||
hubClusterArn string
|
||||
managedClusterRoleSuffix string
|
||||
}
|
||||
|
||||
func (c *AWSIRSADriver) Process(
|
||||
ctx context.Context, controllerName string, secret *corev1.Secret, additionalSecretData map[string][]byte,
|
||||
recorder events.Recorder, opt any) (*corev1.Secret, *metav1.Condition, error) {
|
||||
logger := klog.FromContext(ctx)
|
||||
|
||||
awsOption, ok := opt.(*AWSOption)
|
||||
if !ok {
|
||||
return nil, nil, fmt.Errorf("option type is not correct")
|
||||
}
|
||||
|
||||
// TODO: skip if registration request is not accepted yet, that is the required condition is missing on ManagedCluster CR
|
||||
isApproved, err := awsOption.AWSIRSAControl.isApproved(c.name)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
@@ -55,37 +54,31 @@ func (c *AWSIRSADriver) Process(
|
||||
return nil, nil, nil
|
||||
}
|
||||
|
||||
// TODO: Generate kubeconfig if the request is accepted
|
||||
eksKubeConfigData, err := awsOption.AWSIRSAControl.generateEKSKubeConfig(c.name)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
if len(eksKubeConfigData) == 0 {
|
||||
return nil, nil, nil
|
||||
}
|
||||
|
||||
secret.Data["kubeconfig"] = eksKubeConfigData
|
||||
logger.Info("Store kubeconfig into the secret.")
|
||||
|
||||
recorder.Eventf("EKSHubKubeconfigCreated", "A new eks hub Kubeconfig for %s is available", controllerName)
|
||||
//return secret, cond, err
|
||||
return secret, nil, err
|
||||
recorder.Eventf("EKSRegistrationRequestApproved", "An EKS registration request is approved for %s", controllerName)
|
||||
return secret, nil, nil
|
||||
}
|
||||
|
||||
//TODO: Uncomment the below once required in the aws irsa authentication implementation
|
||||
|
||||
/*
|
||||
func (c *AWSIRSADriver) reset() {
|
||||
c.name = ""
|
||||
}
|
||||
*/
|
||||
|
||||
func (c *AWSIRSADriver) BuildKubeConfigFromTemplate(kubeConfig *clientcmdapi.Config) *clientcmdapi.Config {
|
||||
hubClusterAccountId, hubClusterName := klusterletcontroller.GetAwsAccountIdAndClusterName(c.hubClusterArn)
|
||||
awsRegion := klusterletcontroller.GetAwsRegion(c.hubClusterArn)
|
||||
kubeConfig.AuthInfos = map[string]*clientcmdapi.AuthInfo{register.DefaultKubeConfigAuth: {
|
||||
ClientCertificate: TLSCertFile,
|
||||
ClientKey: TLSKeyFile,
|
||||
Exec: &clientcmdapi.ExecConfig{
|
||||
APIVersion: "client.authentication.k8s.io/v1beta1",
|
||||
Command: "aws",
|
||||
Args: []string{
|
||||
"--region",
|
||||
awsRegion,
|
||||
"eks",
|
||||
"get-token",
|
||||
"--cluster-name",
|
||||
hubClusterName,
|
||||
"--output",
|
||||
"json",
|
||||
"--role",
|
||||
fmt.Sprintf("arn:aws:iam::%s:role/ocm-hub-%s", hubClusterAccountId, c.managedClusterRoleSuffix),
|
||||
},
|
||||
},
|
||||
}}
|
||||
|
||||
return kubeConfig
|
||||
}
|
||||
|
||||
@@ -111,9 +104,11 @@ func (c *AWSIRSADriver) ManagedClusterDecorator(cluster *clusterv1.ManagedCluste
|
||||
return cluster
|
||||
}
|
||||
|
||||
func NewAWSIRSADriver(managedClusterArn string, managedClusterRoleSuffix string) register.RegisterDriver {
|
||||
func NewAWSIRSADriver(managedClusterArn string, managedClusterRoleSuffix string, hubClusterArn string, name string) register.RegisterDriver {
|
||||
return &AWSIRSADriver{
|
||||
managedClusterArn: managedClusterArn,
|
||||
managedClusterRoleSuffix: managedClusterRoleSuffix,
|
||||
hubClusterArn: hubClusterArn,
|
||||
name: name,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -230,7 +230,7 @@ func TestIsHubKubeConfigValidFunc(t *testing.T) {
|
||||
}
|
||||
for _, c := range cases {
|
||||
t.Run(c.name, func(t *testing.T) {
|
||||
driver := NewAWSIRSADriver("", "")
|
||||
driver := NewAWSIRSADriver("", "", "", "")
|
||||
secretOption := register.SecretOption{
|
||||
ClusterName: c.clusterName,
|
||||
AgentName: c.agentName,
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
package aws_irsa
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"reflect"
|
||||
"testing"
|
||||
|
||||
@@ -12,6 +13,7 @@ import (
|
||||
managedclusterv1lister "open-cluster-management.io/api/client/cluster/listers/cluster/v1"
|
||||
|
||||
"open-cluster-management.io/ocm/pkg/registration/register"
|
||||
"open-cluster-management.io/ocm/test/integration/util"
|
||||
)
|
||||
|
||||
func TestIsCRApproved(t *testing.T) {
|
||||
@@ -49,21 +51,27 @@ func TestBuildKubeconfig(t *testing.T) {
|
||||
caData []byte
|
||||
clientCertFile string
|
||||
clientKeyFile string
|
||||
AuthInfoExec *clientcmdapi.ExecConfig
|
||||
}{
|
||||
{
|
||||
name: "without proxy",
|
||||
server: "https://127.0.0.1:6443",
|
||||
caData: []byte("fake-ca-bundle"),
|
||||
clientCertFile: "tls.crt",
|
||||
clientKeyFile: "tls.key",
|
||||
},
|
||||
{
|
||||
name: "with proxy",
|
||||
server: "https://127.0.0.1:6443",
|
||||
caData: []byte("fake-ca-bundle-with-proxy-ca"),
|
||||
proxyURL: "https://127.0.0.1:3129",
|
||||
clientCertFile: "tls.crt",
|
||||
clientKeyFile: "tls.key",
|
||||
name: "without proxy",
|
||||
server: "https://127.0.0.1:6443",
|
||||
AuthInfoExec: &clientcmdapi.ExecConfig{
|
||||
APIVersion: "client.authentication.k8s.io/v1beta1",
|
||||
Command: "aws",
|
||||
Args: []string{
|
||||
"--region",
|
||||
"us-west-2",
|
||||
"eks",
|
||||
"get-token",
|
||||
"--cluster-name",
|
||||
"hub-cluster1",
|
||||
"--output",
|
||||
"json",
|
||||
"--role",
|
||||
fmt.Sprintf("arn:aws:iam::123456789012:role/ocm-hub-%s", ManagedClusterIAMRoleSuffix),
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
for _, c := range cases {
|
||||
@@ -71,10 +79,8 @@ func TestBuildKubeconfig(t *testing.T) {
|
||||
bootstrapKubeconfig := &clientcmdapi.Config{
|
||||
Clusters: map[string]*clientcmdapi.Cluster{
|
||||
"default-cluster": {
|
||||
Server: c.server,
|
||||
InsecureSkipTLSVerify: false,
|
||||
CertificateAuthorityData: c.caData,
|
||||
ProxyURL: c.proxyURL,
|
||||
Server: c.server,
|
||||
InsecureSkipTLSVerify: false,
|
||||
}},
|
||||
// Define a context that connects the auth info and cluster, and set it as the default
|
||||
Contexts: map[string]*clientcmdapi.Context{register.DefaultKubeConfigContext: {
|
||||
@@ -92,6 +98,8 @@ func TestBuildKubeconfig(t *testing.T) {
|
||||
}
|
||||
|
||||
registerImpl := &AWSIRSADriver{}
|
||||
registerImpl.hubClusterArn = util.HubClusterArn
|
||||
registerImpl.managedClusterRoleSuffix = ManagedClusterIAMRoleSuffix
|
||||
kubeconfig := registerImpl.BuildKubeConfigFromTemplate(bootstrapKubeconfig)
|
||||
currentContext, ok := kubeconfig.Contexts[kubeconfig.CurrentContext]
|
||||
if !ok {
|
||||
@@ -107,26 +115,23 @@ func TestBuildKubeconfig(t *testing.T) {
|
||||
t.Errorf("expected server %q, but got %q", c.server, cluster.Server)
|
||||
}
|
||||
|
||||
if cluster.ProxyURL != c.proxyURL {
|
||||
t.Errorf("expected proxy URL %q, but got %q", c.proxyURL, cluster.ProxyURL)
|
||||
}
|
||||
|
||||
if !reflect.DeepEqual(cluster.CertificateAuthorityData, c.caData) {
|
||||
t.Errorf("expected ca data %v, but got %v", c.caData, cluster.CertificateAuthorityData)
|
||||
}
|
||||
|
||||
authInfo, ok := kubeconfig.AuthInfos[currentContext.AuthInfo]
|
||||
if !ok {
|
||||
t.Errorf("auth info %q not found: %v", currentContext.AuthInfo, kubeconfig)
|
||||
}
|
||||
|
||||
if authInfo.ClientCertificate != c.clientCertFile {
|
||||
t.Errorf("expected client certificate %q, but got %q", c.clientCertFile, authInfo.ClientCertificate)
|
||||
if authInfo.Exec.APIVersion != c.AuthInfoExec.APIVersion {
|
||||
t.Errorf("The value of api version is %s but is expected to be %s", authInfo.Exec.APIVersion, c.AuthInfoExec.APIVersion)
|
||||
}
|
||||
|
||||
if authInfo.ClientKey != c.clientKeyFile {
|
||||
t.Errorf("expected client key %q, but got %q", c.clientKeyFile, authInfo.ClientKey)
|
||||
if authInfo.Exec.Command != c.AuthInfoExec.Command {
|
||||
t.Errorf("Value of AuthInfo.Exec.Command is expected to be %s but got %s", authInfo.Exec.Command, c.AuthInfoExec.Command)
|
||||
}
|
||||
|
||||
if !reflect.DeepEqual(authInfo.Exec.Args, c.AuthInfoExec.Args) {
|
||||
t.Errorf("Value of AuthInfo.Exec.Args is expected to be %s but got %s", authInfo.Exec.Args, c.AuthInfoExec.Args)
|
||||
}
|
||||
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
@@ -2,7 +2,6 @@ package aws_irsa
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"github.com/openshift/library-go/pkg/controller/factory"
|
||||
"k8s.io/apimachinery/pkg/api/meta"
|
||||
@@ -10,7 +9,6 @@ import (
|
||||
addonv1alpha1 "open-cluster-management.io/api/addon/v1alpha1"
|
||||
hubclusterclientset "open-cluster-management.io/api/client/cluster/clientset/versioned"
|
||||
managedclusterinformers "open-cluster-management.io/api/client/cluster/informers/externalversions/cluster"
|
||||
clusterv1 "open-cluster-management.io/api/cluster/v1"
|
||||
|
||||
"open-cluster-management.io/ocm/pkg/registration/register"
|
||||
)
|
||||
@@ -18,8 +16,7 @@ import (
|
||||
// AWSOption includes options that is used to monitor ManagedClusters
|
||||
type AWSOption struct {
|
||||
EventFilterFunc factory.EventFilterFunc
|
||||
|
||||
AWSIRSAControl AWSIRSAControl
|
||||
AWSIRSAControl AWSIRSAControl
|
||||
}
|
||||
|
||||
func NewAWSOption(
|
||||
@@ -35,16 +32,11 @@ func NewAWSOption(
|
||||
}
|
||||
return &AWSOption{
|
||||
EventFilterFunc: func(obj interface{}) bool {
|
||||
// TODO: implement EventFilterFunc and update below
|
||||
accessor, err := meta.Accessor(obj)
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
labels := accessor.GetLabels()
|
||||
// only enqueue csr from a specific managed cluster
|
||||
if labels[clusterv1.ClusterNameLabelKey] != secretOption.ClusterName {
|
||||
return false
|
||||
}
|
||||
|
||||
// should not contain addon key
|
||||
_, ok := labels[addonv1alpha1.AddonLabelKey]
|
||||
@@ -53,7 +45,7 @@ func NewAWSOption(
|
||||
}
|
||||
|
||||
// only enqueue csr whose name starts with the cluster name
|
||||
return strings.HasPrefix(accessor.GetName(), fmt.Sprintf("%s-", secretOption.ClusterName))
|
||||
return accessor.GetName() == secretOption.ClusterName
|
||||
},
|
||||
AWSIRSAControl: awsIrsaControl,
|
||||
}, nil
|
||||
|
||||
@@ -190,9 +190,11 @@ func (o *SpokeAgentConfig) RunSpokeAgentWithSpokeInformers(ctx context.Context,
|
||||
|
||||
// initiate registration driver
|
||||
var registerDriver register.RegisterDriver
|
||||
var registrationOption = o.registrationOption
|
||||
if registrationOption.RegistrationAuth == AwsIrsaAuthType {
|
||||
registerDriver = awsIrsa.NewAWSIRSADriver(o.registrationOption.ManagedClusterArn, o.registrationOption.ManagedClusterRoleSuffix)
|
||||
if o.registrationOption.RegistrationAuth == AwsIrsaAuthType {
|
||||
registerDriver = awsIrsa.NewAWSIRSADriver(o.registrationOption.ManagedClusterArn,
|
||||
o.registrationOption.ManagedClusterRoleSuffix,
|
||||
o.registrationOption.HubClusterArn,
|
||||
o.agentOptions.SpokeClusterName)
|
||||
} else {
|
||||
registerDriver = csr.NewCSRDriver()
|
||||
}
|
||||
@@ -331,6 +333,7 @@ func (o *SpokeAgentConfig) RunSpokeAgentWithSpokeInformers(ctx context.Context,
|
||||
secretOption, registrationAuthOption, o.driver, register.GenerateBootstrapStatusUpdater(), recorder, controllerName)
|
||||
|
||||
go bootstrapInformerFactory.Start(bootstrapCtx.Done())
|
||||
go bootstrapClusterInformerFactory.Start(bootstrapCtx.Done())
|
||||
go secretController.Run(bootstrapCtx, 1)
|
||||
|
||||
// Wait for the hub client config is ready.
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
package registration_test
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"path"
|
||||
"time"
|
||||
@@ -8,8 +9,12 @@ import (
|
||||
"github.com/onsi/ginkgo/v2"
|
||||
"github.com/onsi/gomega"
|
||||
"k8s.io/apimachinery/pkg/util/rand"
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
"k8s.io/client-go/tools/clientcmd"
|
||||
|
||||
commonoptions "open-cluster-management.io/ocm/pkg/common/options"
|
||||
"open-cluster-management.io/ocm/pkg/operator/operators/klusterlet/controllers/klusterletcontroller"
|
||||
"open-cluster-management.io/ocm/pkg/registration/register"
|
||||
"open-cluster-management.io/ocm/pkg/registration/spoke"
|
||||
"open-cluster-management.io/ocm/test/integration/util"
|
||||
)
|
||||
@@ -75,37 +80,84 @@ var _ = ginkgo.Describe("Joining Process for aws flow", func() {
|
||||
err = authn.ApproveSpokeClusterCSR(kubeClient, managedClusterName, time.Hour*24)
|
||||
gomega.Expect(err).To(gomega.HaveOccurred())
|
||||
|
||||
// the hub kubeconfig secret should be filled after the ManagedCluster is accepted
|
||||
// TODO: Revisit while implementing slice 3
|
||||
//gomega.Eventually(func() error {
|
||||
// secret, err := util.GetFilledHubKubeConfigSecret(kubeClient, testNamespace, hubKubeconfigSecret)
|
||||
// if err != nil {
|
||||
// return err
|
||||
// }
|
||||
//
|
||||
// // check if the proxyURL is set correctly
|
||||
// proxyURL, err := getProxyURLFromKubeconfigData(secret.Data["kubeconfig"])
|
||||
// if err != nil {
|
||||
// return err
|
||||
// }
|
||||
// if proxyURL != expectedProxyURL {
|
||||
// return fmt.Errorf("expected proxy url %q, but got %q", expectedProxyURL, proxyURL)
|
||||
// }
|
||||
// return nil
|
||||
//}, eventuallyTimeout, eventuallyInterval).ShouldNot(gomega.HaveOccurred())
|
||||
// Kubeconfig secret in integration test for AWS won't be able to connect to hub server, since it is not in the eks environment
|
||||
// So we only ensure that generated hub-kubeconfig-secret has a correct format
|
||||
|
||||
gomega.Eventually(func() error {
|
||||
secret, err := util.GetHubKubeConfigFromSecret(kubeClient, testNamespace, hubKubeconfigSecret)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
hubKubeConfig, err := clientcmd.Load(secret.Data["kubeconfig"])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
hubCurrentContext, ok := hubKubeConfig.Contexts[hubKubeConfig.CurrentContext]
|
||||
if !ok {
|
||||
return fmt.Errorf("context pointed to by the current-context property is missing")
|
||||
}
|
||||
hubCluster, ok := hubKubeConfig.Clusters[hubCurrentContext.Cluster]
|
||||
if !ok {
|
||||
return fmt.Errorf("cluster pointed to by the current-context is missing")
|
||||
}
|
||||
|
||||
hubUser, ok := hubKubeConfig.AuthInfos[hubCurrentContext.AuthInfo]
|
||||
if !ok {
|
||||
return fmt.Errorf("user pointed to by the current-context is missing")
|
||||
}
|
||||
if hubUser.Exec.APIVersion != "client.authentication.k8s.io/v1beta1" {
|
||||
return fmt.Errorf("user exec plugun apiVersion is invalid")
|
||||
}
|
||||
if hubUser.Exec.Command != "aws" {
|
||||
return fmt.Errorf("user exec plugun command is invalid")
|
||||
}
|
||||
|
||||
hubClusterAccountId, hubClusterName := klusterletcontroller.GetAwsAccountIdAndClusterName(hubClusterArn)
|
||||
awsRegion := klusterletcontroller.GetAwsRegion(hubClusterArn)
|
||||
|
||||
if !contains(hubUser.Exec.Args, fmt.Sprintf("arn:aws:iam::%s:role/ocm-hub-%s", hubClusterAccountId, managedClusterRoleSuffix)) ||
|
||||
!contains(hubUser.Exec.Args, hubClusterName) ||
|
||||
!contains(hubUser.Exec.Args, awsRegion) {
|
||||
return fmt.Errorf("aws get-token command is not well formed")
|
||||
}
|
||||
|
||||
bootstrapKubeConfig, err := clientcmd.LoadFromFile(agentOptions.BootstrapKubeconfig)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
bootstrapCurrentContext, ok := bootstrapKubeConfig.Contexts[bootstrapKubeConfig.CurrentContext]
|
||||
if !ok {
|
||||
return fmt.Errorf("context pointed to by the current-context property is missing")
|
||||
}
|
||||
bootstrapCluster, ok := bootstrapKubeConfig.Clusters[bootstrapCurrentContext.Cluster]
|
||||
if !ok {
|
||||
return fmt.Errorf("cluster pointed to by the current-context is missing")
|
||||
}
|
||||
|
||||
if bootstrapCurrentContext.Cluster != hubCurrentContext.Cluster {
|
||||
return fmt.Errorf("cluster name mismatch in hub kubeconfig and bootstrap kubeconfig")
|
||||
}
|
||||
if hubCluster.Server == "" {
|
||||
return fmt.Errorf("serverUrl missing in hub kubeconfig")
|
||||
}
|
||||
if hubCluster.Server != bootstrapCluster.Server {
|
||||
return fmt.Errorf("serverUrl mismatch in hub kubeconfig and bootstrap kubeconfig")
|
||||
}
|
||||
if hubCluster.CertificateAuthorityData == nil {
|
||||
return fmt.Errorf("certificateAuthorityData missing in hub kubeconfig")
|
||||
}
|
||||
if !bytes.Equal(hubCluster.CertificateAuthorityData, bootstrapCluster.CertificateAuthorityData) {
|
||||
return fmt.Errorf("certificateAuthorityData mismatch in hub kubeconfig and bootstrap kubeconfig")
|
||||
}
|
||||
|
||||
if string(secret.Data[register.ClusterNameFile]) != managedClusterName {
|
||||
return fmt.Errorf("invalid clustername in hub-kubeconfig-secret")
|
||||
}
|
||||
|
||||
return nil
|
||||
}, eventuallyTimeout, eventuallyInterval).ShouldNot(gomega.HaveOccurred())
|
||||
|
||||
// the spoke cluster should have joined condition finally
|
||||
// TODO: Revisit while implementing slice 3
|
||||
//gomega.Eventually(func() error {
|
||||
// spokeCluster, err := util.GetManagedCluster(clusterClient, managedClusterName)
|
||||
// if err != nil {
|
||||
// return err
|
||||
// }
|
||||
// if !meta.IsStatusConditionTrue(spokeCluster.Status.Conditions, clusterv1.ManagedClusterConditionJoined) {
|
||||
// return fmt.Errorf("cluster should be joined")
|
||||
// }
|
||||
// return nil
|
||||
//}, eventuallyTimeout, eventuallyInterval).ShouldNot(gomega.HaveOccurred())
|
||||
})
|
||||
}
|
||||
|
||||
@@ -117,3 +169,8 @@ var _ = ginkgo.Describe("Joining Process for aws flow", func() {
|
||||
})
|
||||
|
||||
})
|
||||
|
||||
func contains(slice []string, value string) bool {
|
||||
stringSet := sets.New[string](slice...)
|
||||
return stringSet.Has(value)
|
||||
}
|
||||
|
||||
@@ -516,6 +516,21 @@ func PrepareSpokeAgentNamespace(kubeClient kubernetes.Interface, namespace strin
|
||||
}
|
||||
|
||||
func GetFilledHubKubeConfigSecret(kubeClient kubernetes.Interface, secretNamespace, secretName string) (*corev1.Secret, error) {
|
||||
secret, err := GetHubKubeConfigFromSecret(kubeClient, secretNamespace, secretName)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if _, existed := secret.Data["tls.crt"]; !existed {
|
||||
return nil, fmt.Errorf("tls.crt is not found")
|
||||
}
|
||||
|
||||
if _, existed := secret.Data["tls.key"]; !existed {
|
||||
return nil, fmt.Errorf("tls.key is not found")
|
||||
}
|
||||
return secret, nil
|
||||
}
|
||||
|
||||
func GetHubKubeConfigFromSecret(kubeClient kubernetes.Interface, secretNamespace, secretName string) (*corev1.Secret, error) {
|
||||
secret, err := kubeClient.CoreV1().Secrets(secretNamespace).Get(context.TODO(), secretName, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -531,14 +546,6 @@ func GetFilledHubKubeConfigSecret(kubeClient kubernetes.Interface, secretNamespa
|
||||
if _, existed := secret.Data["kubeconfig"]; !existed {
|
||||
return nil, fmt.Errorf("kubeconfig is not found")
|
||||
}
|
||||
|
||||
if _, existed := secret.Data["tls.crt"]; !existed {
|
||||
return nil, fmt.Errorf("tls.crt is not found")
|
||||
}
|
||||
|
||||
if _, existed := secret.Data["tls.key"]; !existed {
|
||||
return nil, fmt.Errorf("tls.key is not found")
|
||||
}
|
||||
return secret, nil
|
||||
}
|
||||
|
||||
|
||||
Reference in New Issue
Block a user