mirror of
https://github.com/open-cluster-management-io/ocm.git
synced 2026-05-10 03:07:59 +00:00
Merge pull request #83 from skeeey/bootstrap-secret
reconcile klusterlet bootstrap secret
This commit is contained in:
@@ -31,6 +31,23 @@ func AssertGet(t *testing.T, actual clienttesting.Action, group, version, resour
|
||||
}
|
||||
}
|
||||
|
||||
func AssertDelete(t *testing.T, actual clienttesting.Action, resource, namespace, name string) {
|
||||
t.Helper()
|
||||
deleteAction, ok := actual.(clienttesting.DeleteAction)
|
||||
if !ok {
|
||||
t.Error(spew.Sdump(actual))
|
||||
}
|
||||
if deleteAction.GetResource().Resource != resource {
|
||||
t.Error(spew.Sdump(actual))
|
||||
}
|
||||
if deleteAction.GetNamespace() != namespace {
|
||||
t.Error(spew.Sdump(actual))
|
||||
}
|
||||
if deleteAction.GetName() != name {
|
||||
t.Error(spew.Sdump(actual))
|
||||
}
|
||||
}
|
||||
|
||||
func NamedCondition(name, reason string, status metav1.ConditionStatus) metav1.Condition {
|
||||
return metav1.Condition{Type: name, Status: status, Reason: reason}
|
||||
}
|
||||
|
||||
@@ -0,0 +1,188 @@
|
||||
package bootstrapcontroller
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
operatorinformer "github.com/open-cluster-management/api/client/operator/informers/externalversions/operator/v1"
|
||||
operatorlister "github.com/open-cluster-management/api/client/operator/listers/operator/v1"
|
||||
"github.com/open-cluster-management/registration-operator/pkg/helpers"
|
||||
|
||||
"github.com/openshift/library-go/pkg/controller/factory"
|
||||
"github.com/openshift/library-go/pkg/operator/events"
|
||||
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/errors"
|
||||
"k8s.io/apimachinery/pkg/api/meta"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
coreinformer "k8s.io/client-go/informers/core/v1"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
corelister "k8s.io/client-go/listers/core/v1"
|
||||
"k8s.io/client-go/tools/clientcmd"
|
||||
clientcmdapi "k8s.io/client-go/tools/clientcmd/api"
|
||||
"k8s.io/klog/v2"
|
||||
)
|
||||
|
||||
// bootstrapController watches bootstrap hub kubeconfig secret, if the secret is changed with hub kube-apiserver ca or apiserver
|
||||
// endpoints, this controller will make the klusterlet re-bootstrap to get the new hub kubeconfig from hub cluster by deleting
|
||||
// the current hub kubeconfig secret and restart the klusterlet agents
|
||||
type bootstrapController struct {
|
||||
kubeClient kubernetes.Interface
|
||||
klusterletLister operatorlister.KlusterletLister
|
||||
secretLister corelister.SecretLister
|
||||
}
|
||||
|
||||
// NewBootstrapController returns a bootstrapController
|
||||
func NewBootstrapController(
|
||||
kubeClient kubernetes.Interface,
|
||||
klusterletInformer operatorinformer.KlusterletInformer,
|
||||
secretInformer coreinformer.SecretInformer,
|
||||
recorder events.Recorder) factory.Controller {
|
||||
controller := &bootstrapController{
|
||||
kubeClient: kubeClient,
|
||||
klusterletLister: klusterletInformer.Lister(),
|
||||
secretLister: secretInformer.Lister(),
|
||||
}
|
||||
return factory.New().WithSync(controller.sync).
|
||||
WithInformersQueueKeyFunc(bootstrapSecretQueueKeyFunc(controller.klusterletLister), secretInformer.Informer()).
|
||||
ToController("BootstrapController", recorder)
|
||||
}
|
||||
|
||||
func (k *bootstrapController) sync(ctx context.Context, controllerContext factory.SyncContext) error {
|
||||
queueKey := controllerContext.QueueKey()
|
||||
if queueKey == "" {
|
||||
return nil
|
||||
}
|
||||
|
||||
keys := strings.Split(queueKey, "/")
|
||||
if len(keys) != 2 {
|
||||
// this should not happen, do nothing
|
||||
return nil
|
||||
}
|
||||
klusterletNamespace := keys[0]
|
||||
klusterletName := keys[1]
|
||||
|
||||
klog.V(4).Infof("Reconciling bootstrap hub kubeconfig secret %q", klusterletNamespace+"/"+helpers.BootstrapHubKubeConfig)
|
||||
|
||||
bootstrapHubKubeconfigSecret, err := k.secretLister.Secrets(klusterletNamespace).Get(helpers.BootstrapHubKubeConfig)
|
||||
switch {
|
||||
case errors.IsNotFound(err):
|
||||
// the bootstrap hub kubeconfig secret not found, do nothing
|
||||
return nil
|
||||
case err != nil:
|
||||
return err
|
||||
}
|
||||
|
||||
bootstrapKubeconfig, err := k.loadKubeConfig(bootstrapHubKubeconfigSecret)
|
||||
if err != nil {
|
||||
// a bad bootstrap secret, ignore it
|
||||
controllerContext.Recorder().Warningf("BadBootstrapSecret",
|
||||
fmt.Sprintf("unable to load hub kubeconfig from secret %q: %v", klusterletNamespace+"/"+helpers.BootstrapHubKubeConfig, err))
|
||||
return nil
|
||||
}
|
||||
|
||||
hubKubeconfigSecret, err := k.secretLister.Secrets(klusterletNamespace).Get(helpers.HubKubeConfig)
|
||||
switch {
|
||||
case errors.IsNotFound(err):
|
||||
// the hub kubeconfig secret not found, could not have bootstrap yet, do nothing
|
||||
return nil
|
||||
case err != nil:
|
||||
return err
|
||||
}
|
||||
|
||||
hubKubeconfig, err := k.loadKubeConfig(hubKubeconfigSecret)
|
||||
if err != nil {
|
||||
// the hub kubeconfig secret has errors, do nothing
|
||||
controllerContext.Recorder().Warningf("BadHubKubeConfigSecret",
|
||||
fmt.Sprintf("unable to load hub kubeconfig from secret %q: %v", klusterletNamespace+"/"+helpers.BootstrapHubKubeConfig, err))
|
||||
return nil
|
||||
}
|
||||
|
||||
// the CA and server are not changed in bootstrap kubeconfig secret, ignore this change
|
||||
if bootstrapKubeconfig.Server == hubKubeconfig.Server &&
|
||||
bytes.Equal(bootstrapKubeconfig.CertificateAuthorityData, hubKubeconfig.CertificateAuthorityData) {
|
||||
return nil
|
||||
}
|
||||
|
||||
// the bootstrap kubeconfig secret is changed, reload the klusterlet agents
|
||||
return k.reloadAgents(ctx, controllerContext, klusterletNamespace, klusterletName)
|
||||
}
|
||||
|
||||
// reloadAgents reload klusterlet agents by
|
||||
// 1. make the registration agent re-bootstrap by deleting the current hub kubeconfig secret to
|
||||
// 2. restart the registration and work agents to reload the new hub ca by deleting the agent deployments
|
||||
func (k *bootstrapController) reloadAgents(ctx context.Context, ctrlContext factory.SyncContext, namespace, klusterletName string) error {
|
||||
if err := k.kubeClient.CoreV1().Secrets(namespace).Delete(ctx, helpers.HubKubeConfig, metav1.DeleteOptions{}); err != nil {
|
||||
return err
|
||||
}
|
||||
ctrlContext.Recorder().Eventf("HubKubeconfigSecretDeleted",
|
||||
fmt.Sprintf("the hub kubeconfig secret %q is deleted due to the bootstrap secret %q is changed",
|
||||
namespace+"/"+helpers.HubKubeConfig, namespace+"/"+helpers.BootstrapHubKubeConfig))
|
||||
|
||||
registrationName := fmt.Sprintf("%s-registration-agent", klusterletName)
|
||||
if err := k.kubeClient.AppsV1().Deployments(namespace).Delete(ctx, registrationName, metav1.DeleteOptions{}); err != nil {
|
||||
return err
|
||||
}
|
||||
ctrlContext.Recorder().Eventf("KlusterletAgentDeploymentDeleted",
|
||||
fmt.Sprintf("the deployment %q is deleted due to the bootstrap secret %q is changed",
|
||||
namespace+"/"+registrationName, namespace+"/"+helpers.BootstrapHubKubeConfig))
|
||||
|
||||
workName := fmt.Sprintf("%s-work-agent", klusterletName)
|
||||
if err := k.kubeClient.AppsV1().Deployments(namespace).Delete(ctx, workName, metav1.DeleteOptions{}); err != nil {
|
||||
return err
|
||||
}
|
||||
ctrlContext.Recorder().Eventf("KlusterletAgentDeploymentDeleted",
|
||||
fmt.Sprintf("the deployment %q is deleted due to the bootstrap secret %q is changed",
|
||||
namespace+"/"+workName, namespace+"/"+helpers.BootstrapHubKubeConfig))
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (k *bootstrapController) loadKubeConfig(secret *corev1.Secret) (*clientcmdapi.Cluster, error) {
|
||||
kubeconfig, ok := secret.Data["kubeconfig"]
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("unable to get kubeconfig in secret")
|
||||
}
|
||||
config, err := clientcmd.Load(kubeconfig)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
currentContext, ok := config.Contexts[config.CurrentContext]
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("unable to get current-context in kubeconfig")
|
||||
}
|
||||
cluster, ok := config.Clusters[currentContext.Cluster]
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("unable to get current cluster %q in kubeconfig", currentContext.Cluster)
|
||||
}
|
||||
return cluster, nil
|
||||
}
|
||||
|
||||
func bootstrapSecretQueueKeyFunc(klusterletLister operatorlister.KlusterletLister) factory.ObjectQueueKeyFunc {
|
||||
return func(obj runtime.Object) string {
|
||||
accessor, err := meta.Accessor(obj)
|
||||
if err != nil {
|
||||
return ""
|
||||
}
|
||||
name := accessor.GetName()
|
||||
if name != helpers.BootstrapHubKubeConfig {
|
||||
return ""
|
||||
}
|
||||
|
||||
namespace := accessor.GetNamespace()
|
||||
klusterlets, err := klusterletLister.List(labels.Everything())
|
||||
if err != nil {
|
||||
return ""
|
||||
}
|
||||
|
||||
if klusterlet := helpers.FindKlusterletByNamespace(klusterlets, namespace); klusterlet != nil {
|
||||
return namespace + "/" + klusterlet.Name
|
||||
}
|
||||
|
||||
return ""
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,200 @@
|
||||
package bootstrapcontroller
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
fakeoperatorclient "github.com/open-cluster-management/api/client/operator/clientset/versioned/fake"
|
||||
operatorinformers "github.com/open-cluster-management/api/client/operator/informers/externalversions"
|
||||
operatorapiv1 "github.com/open-cluster-management/api/operator/v1"
|
||||
testinghelper "github.com/open-cluster-management/registration-operator/pkg/helpers/testing"
|
||||
|
||||
appsv1 "k8s.io/api/apps/v1"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
kubeinformers "k8s.io/client-go/informers"
|
||||
fakekube "k8s.io/client-go/kubernetes/fake"
|
||||
clienttesting "k8s.io/client-go/testing"
|
||||
clientcmdapi "k8s.io/client-go/tools/clientcmd/api"
|
||||
clientcmdlatest "k8s.io/client-go/tools/clientcmd/api/latest"
|
||||
)
|
||||
|
||||
func TestSync(t *testing.T) {
|
||||
cases := []struct {
|
||||
name string
|
||||
queueKey string
|
||||
objects []runtime.Object
|
||||
validateActions func(t *testing.T, actions []clienttesting.Action)
|
||||
}{
|
||||
{
|
||||
name: "the changed secret is not bootstrap secret",
|
||||
objects: []runtime.Object{},
|
||||
validateActions: func(t *testing.T, actions []clienttesting.Action) {
|
||||
if len(actions) != 0 {
|
||||
t.Errorf("expected no actions happens, but got %#v", actions)
|
||||
}
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "the bootstrap is not started",
|
||||
queueKey: "test/test",
|
||||
objects: []runtime.Object{newSecret("bootstrap-hub-kubeconfig", "test", newKubeConfig("https://10.0.118.47:6443"))},
|
||||
validateActions: func(t *testing.T, actions []clienttesting.Action) {
|
||||
if len(actions) != 0 {
|
||||
t.Errorf("expected no actions happens, but got %#v", actions)
|
||||
}
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "the bootstrap secret is not changed",
|
||||
queueKey: "test/test",
|
||||
objects: []runtime.Object{
|
||||
newSecret("bootstrap-hub-kubeconfig", "test", newKubeConfig("https://10.0.118.47:6443")),
|
||||
newSecret("hub-kubeconfig-secret", "test", newKubeConfig("https://10.0.118.47:6443")),
|
||||
},
|
||||
validateActions: func(t *testing.T, actions []clienttesting.Action) {
|
||||
if len(actions) != 0 {
|
||||
t.Errorf("expected no actions happens, but got %#v", actions)
|
||||
}
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "the bootstrap secret is changed",
|
||||
queueKey: "test/test",
|
||||
objects: []runtime.Object{
|
||||
newSecret("bootstrap-hub-kubeconfig", "test", newKubeConfig("https://10.0.118.47:6443")),
|
||||
newSecret("hub-kubeconfig-secret", "test", newKubeConfig("https://10.0.118.48:6443")),
|
||||
newDeployment("test-registration-agent", "test"),
|
||||
newDeployment("test-work-agent", "test"),
|
||||
},
|
||||
validateActions: func(t *testing.T, actions []clienttesting.Action) {
|
||||
testinghelper.AssertDelete(t, actions[0], "secrets", "test", "hub-kubeconfig-secret")
|
||||
testinghelper.AssertDelete(t, actions[1], "deployments", "test", "test-registration-agent")
|
||||
testinghelper.AssertDelete(t, actions[2], "deployments", "test", "test-work-agent")
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, c := range cases {
|
||||
t.Run(c.name, func(t *testing.T) {
|
||||
fakeKubeClient := fakekube.NewSimpleClientset(c.objects...)
|
||||
kubeInformers := kubeinformers.NewSharedInformerFactory(fakeKubeClient, 5*time.Minute)
|
||||
secretStore := kubeInformers.Core().V1().Secrets().Informer().GetStore()
|
||||
for _, object := range c.objects {
|
||||
switch object.(type) {
|
||||
case *corev1.Secret:
|
||||
secretStore.Add(object)
|
||||
}
|
||||
}
|
||||
|
||||
fakeOperatorClient := fakeoperatorclient.NewSimpleClientset()
|
||||
operatorInformers := operatorinformers.NewSharedInformerFactory(fakeOperatorClient, 5*time.Minute)
|
||||
operatorStore := operatorInformers.Operator().V1().Klusterlets().Informer().GetStore()
|
||||
operatorStore.Add(newKlusterlet("test", "test"))
|
||||
|
||||
controller := &bootstrapController{
|
||||
kubeClient: fakeKubeClient,
|
||||
klusterletLister: operatorInformers.Operator().V1().Klusterlets().Lister(),
|
||||
secretLister: kubeInformers.Core().V1().Secrets().Lister(),
|
||||
}
|
||||
|
||||
syncContext := testinghelper.NewFakeSyncContext(t, c.queueKey)
|
||||
if err := controller.sync(context.TODO(), syncContext); err != nil {
|
||||
t.Errorf("Expected no errors, but got %v", err)
|
||||
}
|
||||
|
||||
c.validateActions(t, fakeKubeClient.Actions())
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestBootstrapSecretQueueKeyFunc(t *testing.T) {
|
||||
cases := []struct {
|
||||
name string
|
||||
object runtime.Object
|
||||
klusterlet *operatorapiv1.Klusterlet
|
||||
expectedKey string
|
||||
}{
|
||||
{
|
||||
name: "key by bootstrap secret",
|
||||
object: newSecret("bootstrap-hub-kubeconfig", "test", []byte{}),
|
||||
klusterlet: newKlusterlet("testklusterlet", "test"),
|
||||
expectedKey: "test/testklusterlet",
|
||||
},
|
||||
{
|
||||
name: "key by wrong secret",
|
||||
object: newSecret("dummy", "test", []byte{}),
|
||||
klusterlet: newKlusterlet("testklusterlet", "test"),
|
||||
expectedKey: "",
|
||||
},
|
||||
{
|
||||
name: "key by klusterlet with empty namespace",
|
||||
object: newSecret("bootstrap-hub-kubeconfig", "open-cluster-management-agent", []byte{}),
|
||||
klusterlet: newKlusterlet("testklusterlet", ""),
|
||||
expectedKey: "open-cluster-management-agent/testklusterlet",
|
||||
},
|
||||
}
|
||||
|
||||
for _, c := range cases {
|
||||
t.Run(c.name, func(t *testing.T) {
|
||||
fakeOperatorClient := fakeoperatorclient.NewSimpleClientset(c.klusterlet)
|
||||
operatorInformers := operatorinformers.NewSharedInformerFactory(fakeOperatorClient, 5*time.Minute)
|
||||
store := operatorInformers.Operator().V1().Klusterlets().Informer().GetStore()
|
||||
store.Add(c.klusterlet)
|
||||
keyFunc := bootstrapSecretQueueKeyFunc(operatorInformers.Operator().V1().Klusterlets().Lister())
|
||||
actualKey := keyFunc(c.object)
|
||||
if actualKey != c.expectedKey {
|
||||
t.Errorf("Queued key is not correct: actual %s, expected %s", actualKey, c.expectedKey)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func newKlusterlet(name, namespace string) *operatorapiv1.Klusterlet {
|
||||
return &operatorapiv1.Klusterlet{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: name,
|
||||
},
|
||||
Spec: operatorapiv1.KlusterletSpec{
|
||||
Namespace: namespace,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func newSecret(name, namespace string, kubeConfig []byte) *corev1.Secret {
|
||||
secret := &corev1.Secret{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: name,
|
||||
Namespace: namespace,
|
||||
},
|
||||
Data: map[string][]byte{},
|
||||
}
|
||||
secret.Data["kubeconfig"] = kubeConfig
|
||||
return secret
|
||||
}
|
||||
|
||||
func newKubeConfig(host string) []byte {
|
||||
configData, _ := runtime.Encode(clientcmdlatest.Codec, &clientcmdapi.Config{
|
||||
Clusters: map[string]*clientcmdapi.Cluster{"default-cluster": {
|
||||
Server: host,
|
||||
InsecureSkipTLSVerify: true,
|
||||
}},
|
||||
Contexts: map[string]*clientcmdapi.Context{"default-context": {
|
||||
Cluster: "default-cluster",
|
||||
}},
|
||||
CurrentContext: "default-context",
|
||||
})
|
||||
return configData
|
||||
}
|
||||
|
||||
func newDeployment(name, namespace string) *appsv1.Deployment {
|
||||
return &appsv1.Deployment{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: name,
|
||||
Namespace: namespace,
|
||||
},
|
||||
Spec: appsv1.DeploymentSpec{},
|
||||
}
|
||||
}
|
||||
@@ -18,6 +18,7 @@ import (
|
||||
"github.com/open-cluster-management/registration-operator/pkg/helpers"
|
||||
"github.com/open-cluster-management/registration-operator/pkg/operators/clustermanager/controllers/clustermanagercontroller"
|
||||
clustermanagerstatuscontroller "github.com/open-cluster-management/registration-operator/pkg/operators/clustermanager/controllers/statuscontroller"
|
||||
"github.com/open-cluster-management/registration-operator/pkg/operators/klusterlet/controllers/bootstrapcontroller"
|
||||
"github.com/open-cluster-management/registration-operator/pkg/operators/klusterlet/controllers/klusterletcontroller"
|
||||
"github.com/open-cluster-management/registration-operator/pkg/operators/klusterlet/controllers/statuscontroller"
|
||||
)
|
||||
@@ -119,6 +120,7 @@ func RunKlusterletOperator(ctx context.Context, controllerContext *controllercmd
|
||||
kubeVersion,
|
||||
operatorNamespace,
|
||||
controllerContext.EventRecorder)
|
||||
|
||||
statusController := statuscontroller.NewKlusterletStatusController(
|
||||
kubeClient,
|
||||
operatorClient.OperatorV1().Klusterlets(),
|
||||
@@ -128,10 +130,19 @@ func RunKlusterletOperator(ctx context.Context, controllerContext *controllercmd
|
||||
controllerContext.EventRecorder,
|
||||
)
|
||||
|
||||
bootstrapController := bootstrapcontroller.NewBootstrapController(
|
||||
kubeClient,
|
||||
operatorInformer.Operator().V1().Klusterlets(),
|
||||
kubeInformer.Core().V1().Secrets(),
|
||||
controllerContext.EventRecorder,
|
||||
)
|
||||
|
||||
go operatorInformer.Start(ctx.Done())
|
||||
go kubeInformer.Start(ctx.Done())
|
||||
go klusterletController.Run(ctx, 1)
|
||||
go statusController.Run(ctx, 1)
|
||||
go bootstrapController.Run(ctx, 1)
|
||||
|
||||
<-ctx.Done()
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -365,6 +365,9 @@ var _ = ginkgo.Describe("Klusterlet", func() {
|
||||
registrationDeploymentName = fmt.Sprintf("%s-registration-agent", klusterlet.Name)
|
||||
workDeploymentName = fmt.Sprintf("%s-work-agent", klusterlet.Name)
|
||||
})
|
||||
ginkgo.AfterEach(func() {
|
||||
operatorClient.OperatorV1().Klusterlets().Delete(context.Background(), klusterlet.Name, metav1.DeleteOptions{})
|
||||
})
|
||||
ginkgo.It("should have correct degraded conditions", func() {
|
||||
_, err := operatorClient.OperatorV1().Klusterlets().Create(context.Background(), klusterlet, metav1.CreateOptions{})
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
@@ -420,4 +423,85 @@ var _ = ginkgo.Describe("Klusterlet", func() {
|
||||
util.AssertKlusterletCondition(klusterlet.Name, operatorClient, "KlusterletWorkDegraded", "WorkFunctional", metav1.ConditionFalse)
|
||||
})
|
||||
})
|
||||
|
||||
ginkgo.Context("bootstrap reconciliation", func() {
|
||||
ginkgo.BeforeEach(func() {
|
||||
registrationDeploymentName = fmt.Sprintf("%s-registration-agent", klusterlet.Name)
|
||||
workDeploymentName = fmt.Sprintf("%s-work-agent", klusterlet.Name)
|
||||
})
|
||||
ginkgo.AfterEach(func() {
|
||||
operatorClient.OperatorV1().Klusterlets().Delete(context.Background(), klusterlet.Name, metav1.DeleteOptions{})
|
||||
})
|
||||
ginkgo.It("should reload the klusterlet after the bootstrap secret is changed", func() {
|
||||
_, err := operatorClient.OperatorV1().Klusterlets().Create(context.Background(), klusterlet, metav1.CreateOptions{})
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
|
||||
// Create a bootstrap secret
|
||||
bootStrapSecret := &corev1.Secret{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: helpers.BootstrapHubKubeConfig,
|
||||
Namespace: klusterletNamespace,
|
||||
},
|
||||
Data: map[string][]byte{
|
||||
"kubeconfig": util.NewKubeConfig(restConfig.Host),
|
||||
},
|
||||
}
|
||||
_, err = kubeClient.CoreV1().Secrets(klusterletNamespace).Create(context.Background(), bootStrapSecret, metav1.CreateOptions{})
|
||||
gomega.Expect(err).ToNot(gomega.HaveOccurred())
|
||||
|
||||
// Update the hub secret and make it same with the bootstrap secret
|
||||
gomega.Eventually(func() bool {
|
||||
hubSecret, err := kubeClient.CoreV1().Secrets(klusterletNamespace).Get(context.Background(), helpers.HubKubeConfig, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
hubSecret.Data["cluster-name"] = []byte("testcluster")
|
||||
hubSecret.Data["kubeconfig"] = util.NewKubeConfig(restConfig.Host)
|
||||
if _, err = kubeClient.CoreV1().Secrets(klusterletNamespace).Update(context.Background(), hubSecret, metav1.UpdateOptions{}); err != nil {
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}, eventuallyTimeout, eventuallyInterval).Should(gomega.BeTrue())
|
||||
|
||||
// Get the deployments
|
||||
var registrationDeployment *appsv1.Deployment
|
||||
var workDeployment *appsv1.Deployment
|
||||
gomega.Eventually(func() bool {
|
||||
if registrationDeployment, err = kubeClient.AppsV1().Deployments(klusterletNamespace).Get(context.Background(), registrationDeploymentName, metav1.GetOptions{}); err != nil {
|
||||
return false
|
||||
}
|
||||
if workDeployment, err = kubeClient.AppsV1().Deployments(klusterletNamespace).Get(context.Background(), workDeploymentName, metav1.GetOptions{}); err != nil {
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}, eventuallyTimeout, eventuallyInterval).Should(gomega.BeTrue())
|
||||
|
||||
// Change the bootstrap secret server address
|
||||
bootStrapSecret, err = kubeClient.CoreV1().Secrets(klusterletNamespace).Get(context.Background(), helpers.BootstrapHubKubeConfig, metav1.GetOptions{})
|
||||
gomega.Expect(err).ToNot(gomega.HaveOccurred())
|
||||
|
||||
bootStrapSecret.Data["kubeconfig"] = util.NewKubeConfig("https://127.0.0.10:33934")
|
||||
_, err = kubeClient.CoreV1().Secrets(klusterletNamespace).Update(context.Background(), bootStrapSecret, metav1.UpdateOptions{})
|
||||
gomega.Expect(err).ToNot(gomega.HaveOccurred())
|
||||
|
||||
// Make sure the deployments are deleted and recreated
|
||||
gomega.Eventually(func() bool {
|
||||
lastRegistrationDeployment, err := kubeClient.AppsV1().Deployments(klusterletNamespace).Get(context.Background(), registrationDeploymentName, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
lastWorkDeployment, err := kubeClient.AppsV1().Deployments(klusterletNamespace).Get(context.Background(), workDeploymentName, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
if registrationDeployment.CreationTimestamp.Time.Equal(lastRegistrationDeployment.CreationTimestamp.Time) {
|
||||
return false
|
||||
}
|
||||
if workDeployment.CreationTimestamp.Time.Equal(lastWorkDeployment.CreationTimestamp.Time) {
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}, eventuallyTimeout, eventuallyInterval).Should(gomega.BeTrue())
|
||||
})
|
||||
})
|
||||
})
|
||||
|
||||
@@ -55,7 +55,6 @@ func (r *IntegrationTestEventRecorder) Shutdown() {
|
||||
func HasCondition(conditions []metav1.Condition, expectedType, expectedReason string, expectedStatus metav1.ConditionStatus) bool {
|
||||
found := false
|
||||
for _, condition := range conditions {
|
||||
fmt.Printf(">>> %v \n", condition)
|
||||
if condition.Type != expectedType {
|
||||
continue
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user