Merge pull request #36 from skeeey/deploy-status

check managed cluster agent secrets status
This commit is contained in:
OpenShift Merge Robot
2020-06-25 10:27:54 -04:00
committed by GitHub
5 changed files with 434 additions and 34 deletions

View File

@@ -3,7 +3,12 @@ package statuscontroller
import (
"context"
"fmt"
"io/ioutil"
"os"
"path"
authorizationv1 "k8s.io/api/authorization/v1"
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/api/meta"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
@@ -13,6 +18,7 @@ import (
"k8s.io/client-go/kubernetes"
appslister "k8s.io/client-go/listers/apps/v1"
corelister "k8s.io/client-go/listers/core/v1"
"k8s.io/client-go/tools/clientcmd"
"k8s.io/klog"
"github.com/openshift/library-go/pkg/controller/factory"
@@ -99,7 +105,7 @@ func (k *klusterletStatusController) sync(ctx context.Context, controllerContext
}
// Check if bootstrap secret exists
_, err = k.kubeClient.CoreV1().Secrets(klusterletNS).Get(ctx, helpers.BootstrapHubKubeConfigSecret, metav1.GetOptions{})
bootstrapSecret, err := k.kubeClient.CoreV1().Secrets(klusterletNS).Get(ctx, helpers.BootstrapHubKubeConfigSecret, metav1.GetOptions{})
if err != nil {
registrationDegradedCondition.Message = fmt.Sprintf("Failed to get bootstrap secret %q %q: %v", klusterletNS, helpers.BootstrapHubKubeConfigSecret, err)
registrationDegradedCondition.Status = metav1.ConditionTrue
@@ -109,7 +115,39 @@ func (k *klusterletStatusController) sync(ctx context.Context, controllerContext
)
return err
}
// TODO verify if bootstrap secret works
// Check if bootstrap secret works by building kube client
bootstrapClient, err := buildKubeClientWithSecret(bootstrapSecret)
if err != nil {
registrationDegradedCondition.Message = fmt.Sprintf("Failed to build kube client with bootstrap secret %q %q: %v", klusterletNS, helpers.BootstrapHubKubeConfigSecret, err)
registrationDegradedCondition.Status = metav1.ConditionTrue
registrationDegradedCondition.Reason = "BootstrapSecretError"
_, _, err := helpers.UpdateKlusterletStatus(ctx, k.klusterletClient, klusterletName,
helpers.UpdateKlusterletConditionFn(registrationDegradedCondition),
)
return err
}
// Check the bootstrap client permissions by creating SelfSubjectAccessReviews
allowed, failedReview, err := createSelfSubjectAccessReviews(ctx, bootstrapClient, getBootstrapSelfSubjectAccessReviews())
if err != nil {
registrationDegradedCondition.Message = fmt.Sprintf("Failed to create %+v with bootstrap secret %q %q: %v", failedReview, klusterletNS, helpers.BootstrapHubKubeConfigSecret, err)
registrationDegradedCondition.Status = metav1.ConditionTrue
registrationDegradedCondition.Reason = "BootstrapSecretError"
_, _, err := helpers.UpdateKlusterletStatus(ctx, k.klusterletClient, klusterletName,
helpers.UpdateKlusterletConditionFn(registrationDegradedCondition),
)
return err
}
if !allowed {
registrationDegradedCondition.Message = fmt.Sprintf("Operation for resource %+v is not allowed with bootstrap secret %q %q", failedReview.Spec.ResourceAttributes, klusterletNS, helpers.BootstrapHubKubeConfigSecret)
registrationDegradedCondition.Status = metav1.ConditionTrue
registrationDegradedCondition.Reason = "BootstrapSecretUnauthorized"
_, _, err := helpers.UpdateKlusterletStatus(ctx, k.klusterletClient, klusterletName,
helpers.UpdateKlusterletConditionFn(registrationDegradedCondition),
)
return err
}
// Check if hub kubeconfig secret exists
hubConfigSecret, err := k.kubeClient.CoreV1().Secrets(klusterletNS).Get(ctx, helpers.HubKubeConfigSecret, metav1.GetOptions{})
@@ -130,9 +168,9 @@ func (k *klusterletStatusController) sync(ctx context.Context, controllerContext
}
// If cluster name is empty, read cluster name from hub config secret
if klusterlet.Spec.ClusterName == "" {
clusterName := hubConfigSecret.Data["cluster-name"]
if clusterName == nil {
clusterName := klusterlet.Spec.ClusterName
if clusterName == "" {
if hubConfigSecret.Data["cluster-name"] == nil {
registrationDegradedCondition.Message = fmt.Sprintf(
"Failed to get cluster name from `kubectl get secret -n %q %q -ojsonpath='{.data.cluster-name}`. This is set by the klusterlet registration deployment.", hubConfigSecret.Namespace, hubConfigSecret.Name)
registrationDegradedCondition.Status = metav1.ConditionTrue
@@ -148,6 +186,7 @@ func (k *klusterletStatusController) sync(ctx context.Context, controllerContext
)
return err
}
clusterName = string(hubConfigSecret.Data["cluster-name"])
}
// If hub kubeconfig does not exist, return err.
@@ -167,7 +206,65 @@ func (k *klusterletStatusController) sync(ctx context.Context, controllerContext
)
return err
}
// TODO it is possible to verify the kubeconfig actually works.
// Check if hub config secret works by building kube client with its kubeconfig
hubClient, err := buildKubeClientWithSecret(hubConfigSecret)
if err != nil {
registrationDegradedCondition.Message = fmt.Sprintf("Failed to build kube client with hub config secret %q %q: %v", hubConfigSecret.Namespace, hubConfigSecret.Name, err)
registrationDegradedCondition.Status = metav1.ConditionTrue
registrationDegradedCondition.Reason = "HubConfigSecretError"
// Work condition will be the same as registration
workDegradedCondition.Message = registrationDegradedCondition.Message
workDegradedCondition.Status = registrationDegradedCondition.Status
workDegradedCondition.Reason = registrationDegradedCondition.Reason
_, _, err := helpers.UpdateKlusterletStatus(ctx, k.klusterletClient, klusterletName,
helpers.UpdateKlusterletConditionFn(registrationDegradedCondition),
helpers.UpdateKlusterletConditionFn(workDegradedCondition),
)
return err
}
// Check the hub client (registration and work) permissions by creating SelfSubjectAccessReviews
allowed, failedReview, err = createSelfSubjectAccessReviews(ctx, hubClient, getRegistrationSelfSubjectAccessReviews(clusterName))
if err != nil {
registrationDegradedCondition.Message = fmt.Sprintf("Failed to create %+v with hub config secret %q %q: %v", failedReview, klusterletNS, helpers.BootstrapHubKubeConfigSecret, err)
registrationDegradedCondition.Status = metav1.ConditionTrue
registrationDegradedCondition.Reason = "HubConfigSecretError"
_, _, err := helpers.UpdateKlusterletStatus(ctx, k.klusterletClient, klusterletName,
helpers.UpdateKlusterletConditionFn(registrationDegradedCondition),
)
return err
}
if !allowed {
registrationDegradedCondition.Message = fmt.Sprintf("Operation for resource %+v is not allowed with hub config secret %q %q", failedReview.Spec.ResourceAttributes, klusterletNS, helpers.BootstrapHubKubeConfigSecret)
registrationDegradedCondition.Status = metav1.ConditionTrue
registrationDegradedCondition.Reason = "HubConfigSecretUnauthorized"
_, _, err := helpers.UpdateKlusterletStatus(ctx, k.klusterletClient, klusterletName,
helpers.UpdateKlusterletConditionFn(registrationDegradedCondition),
)
return err
}
allowed, failedReview, err = createSelfSubjectAccessReviews(ctx, hubClient, getWorkSelfSubjectAccessReviews(clusterName))
if err != nil {
workDegradedCondition.Message = fmt.Sprintf("Failed to create %+v with hub config secret %q %q: %v", failedReview, klusterletNS, helpers.BootstrapHubKubeConfigSecret, err)
workDegradedCondition.Status = metav1.ConditionTrue
workDegradedCondition.Reason = "HubConfigSecretError"
_, _, err := helpers.UpdateKlusterletStatus(ctx, k.klusterletClient, klusterletName,
helpers.UpdateKlusterletConditionFn(workDegradedCondition),
)
return err
}
if !allowed {
workDegradedCondition.Message = fmt.Sprintf("Operation for resource %+v is not allowed with hub config secret %q %q", failedReview.Spec.ResourceAttributes, klusterletNS, helpers.BootstrapHubKubeConfigSecret)
workDegradedCondition.Status = metav1.ConditionTrue
workDegradedCondition.Reason = "HubConfigSecretUnauthorized"
_, _, err := helpers.UpdateKlusterletStatus(ctx, k.klusterletClient, klusterletName,
helpers.UpdateKlusterletConditionFn(workDegradedCondition),
)
return err
}
// Check deployment status
registrationDeploymentName := fmt.Sprintf("%s-registration-agent", klusterlet.Name)
@@ -200,3 +297,144 @@ func (k *klusterletStatusController) sync(ctx context.Context, controllerContext
)
return nil
}
func buildKubeClientWithSecret(secret *corev1.Secret) (kubernetes.Interface, error) {
tempdir, err := ioutil.TempDir("", "kube")
if err != nil {
return nil, err
}
defer os.RemoveAll(tempdir)
for key, data := range secret.Data {
if err := ioutil.WriteFile(path.Join(tempdir, key), data, 0644); err != nil {
return nil, err
}
}
restConfig, err := clientcmd.BuildConfigFromFlags("", path.Join(tempdir, "kubeconfig"))
if err != nil {
return nil, err
}
return kubernetes.NewForConfig(restConfig)
}
func createSelfSubjectAccessReviews(
ctx context.Context,
kubeClient kubernetes.Interface,
selfSubjectAccessReviews []authorizationv1.SelfSubjectAccessReview) (bool, *authorizationv1.SelfSubjectAccessReview, error) {
for i := range selfSubjectAccessReviews {
subjectAccessReview := selfSubjectAccessReviews[i]
ssar, err := kubeClient.AuthorizationV1().SelfSubjectAccessReviews().Create(ctx, &subjectAccessReview, metav1.CreateOptions{})
if err != nil {
return false, &subjectAccessReview, err
}
if !ssar.Status.Allowed {
return false, &subjectAccessReview, nil
}
}
return true, nil, nil
}
func getBootstrapSelfSubjectAccessReviews() []authorizationv1.SelfSubjectAccessReview {
reviews := []authorizationv1.SelfSubjectAccessReview{}
clusterResource := authorizationv1.ResourceAttributes{
Group: "cluster.open-cluster-management.io",
Resource: "managedclusters",
}
reviews = append(reviews, generateSelfSubjectAccessReviews(clusterResource, "create", "get")...)
certResource := authorizationv1.ResourceAttributes{
Group: "certificates.k8s.io",
Resource: "certificatesigningrequests",
}
return append(reviews, generateSelfSubjectAccessReviews(certResource, "create", "get", "list", "watch")...)
}
func getRegistrationSelfSubjectAccessReviews(clusterName string) []authorizationv1.SelfSubjectAccessReview {
reviews := []authorizationv1.SelfSubjectAccessReview{}
certResource := authorizationv1.ResourceAttributes{
Group: "certificates.k8s.io",
Resource: "certificatesigningrequests",
}
reviews = append(reviews, generateSelfSubjectAccessReviews(certResource, "get", "list", "watch")...)
clusterResource := authorizationv1.ResourceAttributes{
Group: "cluster.open-cluster-management.io",
Resource: "managedclusters",
Name: clusterName,
}
reviews = append(reviews, generateSelfSubjectAccessReviews(clusterResource, "get", "list", "update", "watch")...)
clusterStatusResource := authorizationv1.ResourceAttributes{
Group: "cluster.open-cluster-management.io",
Resource: "managedclusters",
Subresource: "status",
Name: clusterName,
}
reviews = append(reviews, generateSelfSubjectAccessReviews(clusterStatusResource, "patch", "update")...)
clusterCertResource := authorizationv1.ResourceAttributes{
Group: "register.open-cluster-management.io",
Resource: "managedclusters",
Subresource: "clientcertificates",
}
reviews = append(reviews, generateSelfSubjectAccessReviews(clusterCertResource, "renew")...)
leaseResource := authorizationv1.ResourceAttributes{
Group: "coordination.k8s.io",
Resource: "leases",
Name: fmt.Sprintf("cluster-lease-%s", clusterName),
Namespace: clusterName,
}
return append(reviews, generateSelfSubjectAccessReviews(leaseResource, "get", "update")...)
}
func getWorkSelfSubjectAccessReviews(clusterName string) []authorizationv1.SelfSubjectAccessReview {
reviews := []authorizationv1.SelfSubjectAccessReview{}
eventResource := authorizationv1.ResourceAttributes{
Resource: "events",
Namespace: clusterName,
}
reviews = append(reviews, generateSelfSubjectAccessReviews(eventResource, "create", "patch", "update")...)
eventResource = authorizationv1.ResourceAttributes{
Group: "events.k8s.io",
Resource: "events",
Namespace: clusterName,
}
reviews = append(reviews, generateSelfSubjectAccessReviews(eventResource, "create", "patch", "update")...)
workResource := authorizationv1.ResourceAttributes{
Group: "work.open-cluster-management.io",
Resource: "manifestworks",
Namespace: clusterName,
}
reviews = append(reviews, generateSelfSubjectAccessReviews(workResource, "get", "list", "watch", "update")...)
workStatusResource := authorizationv1.ResourceAttributes{
Group: "work.open-cluster-management.io",
Resource: "manifestworks",
Subresource: "status",
Namespace: clusterName,
}
reviews = append(reviews, generateSelfSubjectAccessReviews(workStatusResource, "patch", "update")...)
return reviews
}
func generateSelfSubjectAccessReviews(resource authorizationv1.ResourceAttributes, verbs ...string) []authorizationv1.SelfSubjectAccessReview {
reviews := []authorizationv1.SelfSubjectAccessReview{}
for _, verb := range verbs {
reviews = append(reviews, authorizationv1.SelfSubjectAccessReview{
Spec: authorizationv1.SelfSubjectAccessReviewSpec{
ResourceAttributes: &authorizationv1.ResourceAttributes{
Group: resource.Group,
Resource: resource.Resource,
Subresource: resource.Subresource,
Name: resource.Name,
Namespace: resource.Namespace,
Verb: verb,
},
},
})
}
return reviews
}

View File

@@ -1,16 +1,24 @@
package statuscontroller
import (
"context"
"encoding/json"
"io/ioutil"
"net/http"
"net/http/httptest"
"testing"
"time"
appsv1 "k8s.io/api/apps/v1"
authorizationv1 "k8s.io/api/authorization/v1"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
kubeinformers "k8s.io/client-go/informers"
fakekube "k8s.io/client-go/kubernetes/fake"
clienttesting "k8s.io/client-go/testing"
clientcmdapi "k8s.io/client-go/tools/clientcmd/api"
clientcmdlatest "k8s.io/client-go/tools/clientcmd/api/latest"
fakeoperatorclient "github.com/open-cluster-management/api/client/operator/clientset/versioned/fake"
operatorinformers "github.com/open-cluster-management/api/client/operator/informers/externalversions"
@@ -24,6 +32,12 @@ type testController struct {
operatorClient *fakeoperatorclient.Clientset
}
type serverResponse struct {
allowToOperateManagedClusters bool
allowToOperateManagedClusterStatus bool
allowToOperateManifestWorks bool
}
func newSecret(name, namespace string) *corev1.Secret {
return &corev1.Secret{
ObjectMeta: metav1.ObjectMeta{
@@ -34,9 +48,23 @@ func newSecret(name, namespace string) *corev1.Secret {
}
}
func newSecretWithKubeConfig(name, namespace string) *corev1.Secret {
func newKubeConfig(host string) []byte {
configData, _ := runtime.Encode(clientcmdlatest.Codec, &clientcmdapi.Config{
Clusters: map[string]*clientcmdapi.Cluster{"default-cluster": {
Server: host,
InsecureSkipTLSVerify: true,
}},
Contexts: map[string]*clientcmdapi.Context{"default-context": {
Cluster: "default-cluster",
}},
CurrentContext: "default-context",
})
return configData
}
func newSecretWithKubeConfig(name, namespace string, kubeConfig []byte) *corev1.Secret {
secret := newSecret(name, namespace)
secret.Data["kubeconfig"] = []byte("kubeconfig")
secret.Data["kubeconfig"] = kubeConfig
return secret
}
@@ -94,11 +122,48 @@ func newTestController(klusterlet *operatorapiv1.Klusterlet, objects ...runtime.
}
func TestSync(t *testing.T) {
response := &serverResponse{}
apiServer := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) {
if req.URL.Path != "/apis/authorization.k8s.io/v1/selfsubjectaccessreviews" {
w.WriteHeader(http.StatusNotFound)
return
}
data, err := ioutil.ReadAll(req.Body)
if err != nil {
w.WriteHeader(http.StatusInternalServerError)
return
}
ssar := &authorizationv1.SelfSubjectAccessReview{}
json.Unmarshal(data, ssar)
if ssar.Spec.ResourceAttributes.Resource == "managedclusters" {
if ssar.Spec.ResourceAttributes.Subresource == "status" {
ssar.Status.Allowed = response.allowToOperateManagedClusterStatus
} else {
ssar.Status.Allowed = response.allowToOperateManagedClusters
}
} else if ssar.Spec.ResourceAttributes.Resource == "manifestworks" {
ssar.Status.Allowed = response.allowToOperateManifestWorks
} else {
ssar.Status.Allowed = true
}
w.Header().Set("Content-type", "application/json")
w.WriteHeader(http.StatusCreated)
json.NewEncoder(w).Encode(ssar)
}))
defer apiServer.Close()
apiServerHost := apiServer.URL
cases := []struct {
name string
object []runtime.Object
klusterlet *operatorapiv1.Klusterlet
expectedConditions []operatorapiv1.StatusCondition
name string
object []runtime.Object
klusterlet *operatorapiv1.Klusterlet
allowToOperateManagedClusters bool
allowToOperateManagedClusterStatus bool
allowToOperateManifestWorks bool
expectedConditions []operatorapiv1.StatusCondition
}{
{
name: "No bootstrap secret",
@@ -109,41 +174,113 @@ func TestSync(t *testing.T) {
},
},
{
name: "No hubconfig secret",
object: []runtime.Object{newSecret(helpers.BootstrapHubKubeConfigSecret, "test")},
name: "Bad bootstrap secret",
object: []runtime.Object{
newSecretWithKubeConfig(helpers.BootstrapHubKubeConfigSecret, "test", []byte("badsecret")),
},
klusterlet: newKlusterlet("testklusterlet", "test", ""),
expectedConditions: []operatorapiv1.StatusCondition{
testinghelper.NamedCondition(klusterletRegistrationDegraded, "BootstrapSecretError", metav1.ConditionTrue),
},
},
{
name: "Unauthorized bootstrap secret",
object: []runtime.Object{
newSecretWithKubeConfig(helpers.BootstrapHubKubeConfigSecret, "test", newKubeConfig(apiServerHost)),
},
klusterlet: newKlusterlet("testklusterlet", "test", ""),
expectedConditions: []operatorapiv1.StatusCondition{
testinghelper.NamedCondition(klusterletRegistrationDegraded, "BootstrapSecretUnauthorized", metav1.ConditionTrue),
},
},
{
name: "No hubconfig secret",
object: []runtime.Object{
newSecretWithKubeConfig(helpers.BootstrapHubKubeConfigSecret, "test", newKubeConfig(apiServerHost)),
},
klusterlet: newKlusterlet("testklusterlet", "test", ""),
allowToOperateManagedClusters: true,
expectedConditions: []operatorapiv1.StatusCondition{
testinghelper.NamedCondition(klusterletRegistrationDegraded, "HubKubeConfigSecretMissing", metav1.ConditionTrue),
testinghelper.NamedCondition(klusterletWorKDegraded, "HubKubeConfigSecretMissing", metav1.ConditionTrue),
},
},
{
name: "No cluster name secret",
object: []runtime.Object{newSecret(helpers.BootstrapHubKubeConfigSecret, "test"), newSecret(helpers.HubKubeConfigSecret, "test")},
klusterlet: newKlusterlet("testklusterlet", "test", ""),
name: "No cluster name secret",
object: []runtime.Object{
newSecretWithKubeConfig(helpers.BootstrapHubKubeConfigSecret, "test", newKubeConfig(apiServerHost)),
newSecret(helpers.HubKubeConfigSecret, "test"),
},
allowToOperateManagedClusters: true,
klusterlet: newKlusterlet("testklusterlet", "test", ""),
expectedConditions: []operatorapiv1.StatusCondition{
testinghelper.NamedCondition(klusterletRegistrationDegraded, "ClusterNameMissing", metav1.ConditionTrue),
testinghelper.NamedCondition(klusterletWorKDegraded, "ClusterNameMissing", metav1.ConditionTrue),
},
},
{
name: "No kubeconfig secret",
object: []runtime.Object{newSecret(helpers.BootstrapHubKubeConfigSecret, "test"), newSecret(helpers.HubKubeConfigSecret, "test")},
klusterlet: newKlusterlet("testklusterlet", "test", "cluster1"),
name: "No kubeconfig secret",
object: []runtime.Object{
newSecretWithKubeConfig(helpers.BootstrapHubKubeConfigSecret, "test", newKubeConfig(apiServerHost)),
newSecret(helpers.HubKubeConfigSecret, "test"),
},
allowToOperateManagedClusters: true,
klusterlet: newKlusterlet("testklusterlet", "test", "cluster1"),
expectedConditions: []operatorapiv1.StatusCondition{
testinghelper.NamedCondition(klusterletRegistrationDegraded, "KubeConfigMissing", metav1.ConditionTrue),
testinghelper.NamedCondition(klusterletWorKDegraded, "KubeConfigMissing", metav1.ConditionTrue),
},
},
{
name: "Bad hub config secret",
object: []runtime.Object{
newSecretWithKubeConfig(helpers.BootstrapHubKubeConfigSecret, "test", newKubeConfig(apiServerHost)),
newSecretWithKubeConfig(helpers.HubKubeConfigSecret, "test", []byte("badkubeconfig")),
},
allowToOperateManagedClusters: true,
klusterlet: newKlusterlet("testklusterlet", "test", "cluster1"),
expectedConditions: []operatorapiv1.StatusCondition{
testinghelper.NamedCondition(klusterletRegistrationDegraded, "HubConfigSecretError", metav1.ConditionTrue),
testinghelper.NamedCondition(klusterletWorKDegraded, "HubConfigSecretError", metav1.ConditionTrue),
},
},
{
name: "Unauthorized hub config secret (registration)",
object: []runtime.Object{
newSecretWithKubeConfig(helpers.BootstrapHubKubeConfigSecret, "test", newKubeConfig(apiServerHost)),
newSecretWithKubeConfig(helpers.HubKubeConfigSecret, "test", newKubeConfig(apiServerHost)),
},
allowToOperateManagedClusters: true,
klusterlet: newKlusterlet("testklusterlet", "test", "cluster1"),
expectedConditions: []operatorapiv1.StatusCondition{
testinghelper.NamedCondition(klusterletRegistrationDegraded, "HubConfigSecretUnauthorized", metav1.ConditionTrue),
},
},
{
name: "Unauthorized hub config secret (work)",
object: []runtime.Object{
newSecretWithKubeConfig(helpers.BootstrapHubKubeConfigSecret, "test", newKubeConfig(apiServerHost)),
newSecretWithKubeConfig(helpers.HubKubeConfigSecret, "test", newKubeConfig(apiServerHost)),
},
allowToOperateManagedClusters: true,
allowToOperateManagedClusterStatus: true,
klusterlet: newKlusterlet("testklusterlet", "test", "cluster1"),
expectedConditions: []operatorapiv1.StatusCondition{
testinghelper.NamedCondition(klusterletWorKDegraded, "HubConfigSecretUnauthorized", metav1.ConditionTrue),
},
},
{
name: "Unavailable pod in deployments",
object: []runtime.Object{
newSecret(helpers.BootstrapHubKubeConfigSecret, "test"),
newSecretWithKubeConfig(helpers.HubKubeConfigSecret, "test"),
newSecretWithKubeConfig(helpers.BootstrapHubKubeConfigSecret, "test", newKubeConfig(apiServerHost)),
newSecretWithKubeConfig(helpers.HubKubeConfigSecret, "test", newKubeConfig(apiServerHost)),
newDeployment("testklusterlet-registration-agent", "test", 3, 0),
newDeployment("testklusterlet-work-agent", "test", 3, 0),
},
klusterlet: newKlusterlet("testklusterlet", "test", "cluster1"),
allowToOperateManagedClusters: true,
allowToOperateManagedClusterStatus: true,
allowToOperateManifestWorks: true,
klusterlet: newKlusterlet("testklusterlet", "test", "cluster1"),
expectedConditions: []operatorapiv1.StatusCondition{
testinghelper.NamedCondition(klusterletRegistrationDegraded, "UnavailableRegistrationPod", metav1.ConditionTrue),
testinghelper.NamedCondition(klusterletWorKDegraded, "UnavailableWorkPod", metav1.ConditionTrue),
@@ -152,12 +289,15 @@ func TestSync(t *testing.T) {
{
name: "Operator functional",
object: []runtime.Object{
newSecret(helpers.BootstrapHubKubeConfigSecret, "test"),
newSecretWithKubeConfig(helpers.HubKubeConfigSecret, "test"),
newSecretWithKubeConfig(helpers.BootstrapHubKubeConfigSecret, "test", newKubeConfig(apiServerHost)),
newSecretWithKubeConfig(helpers.HubKubeConfigSecret, "test", newKubeConfig(apiServerHost)),
newDeployment("testklusterlet-registration-agent", "test", 3, 3),
newDeployment("testklusterlet-work-agent", "test", 3, 3),
},
klusterlet: newKlusterlet("testklusterlet", "test", "cluster1"),
allowToOperateManagedClusters: true,
allowToOperateManagedClusterStatus: true,
allowToOperateManifestWorks: true,
klusterlet: newKlusterlet("testklusterlet", "test", "cluster1"),
expectedConditions: []operatorapiv1.StatusCondition{
testinghelper.NamedCondition(klusterletRegistrationDegraded, "RegistrationFunctional", metav1.ConditionFalse),
testinghelper.NamedCondition(klusterletWorKDegraded, "WorkFunctional", metav1.ConditionFalse),
@@ -169,18 +309,21 @@ func TestSync(t *testing.T) {
t.Run(c.name, func(t *testing.T) {
controller := newTestController(c.klusterlet, c.object...)
syncContext := testinghelper.NewFakeSyncContext(t, c.klusterlet.Name)
err := controller.controller.sync(nil, syncContext)
response.allowToOperateManagedClusters = c.allowToOperateManagedClusters
response.allowToOperateManagedClusterStatus = c.allowToOperateManagedClusterStatus
response.allowToOperateManifestWorks = c.allowToOperateManifestWorks
err := controller.controller.sync(context.TODO(), syncContext)
if err != nil {
t.Errorf("Expected no error when update status: %v", err)
}
operatorActions := controller.operatorClient.Actions()
testinghelper.AssertEqualNumber(t, len(operatorActions), 2)
testinghelper.AssertGet(t, operatorActions[0], "operator.open-cluster-management.io", "v1", "klusterlets")
testinghelper.AssertAction(t, operatorActions[1], "update")
testinghelper.AssertOnlyConditions(
t, operatorActions[1].(clienttesting.UpdateActionImpl).Object, c.expectedConditions...)
testinghelper.AssertOnlyConditions(t, operatorActions[1].(clienttesting.UpdateActionImpl).Object, c.expectedConditions...)
})
}
}

View File

@@ -53,6 +53,7 @@ var _ = ginkgo.BeforeSuite(func(done ginkgo.Done) {
},
}
cfg, err := testEnv.Start()
gomega.Expect(err).ToNot(gomega.HaveOccurred())
gomega.Expect(cfg).ToNot(gomega.BeNil())

View File

@@ -371,14 +371,14 @@ var _ = ginkgo.Describe("Klusterlet", func() {
util.AssertKlusterletCondition(klusterlet.Name, operatorClient, "KlusterletRegistrationDegraded", "BootStrapSecretMissing", metav1.ConditionTrue)
// Create a dummy bootstrap secret
// Create a bootstrap secret and make sure the kubeconfig can work
bootStrapSecret := &corev1.Secret{
ObjectMeta: metav1.ObjectMeta{
Name: helpers.BootstrapHubKubeConfigSecret,
Namespace: klusterletNamespace,
},
Data: map[string][]byte{
"kubeconfig": []byte("dummy"),
"kubeconfig": util.NewKubeConfig(restConfig.Host),
},
}
_, err = kubeClient.CoreV1().Secrets(klusterletNamespace).Create(context.Background(), bootStrapSecret, metav1.CreateOptions{})
@@ -390,9 +390,9 @@ var _ = ginkgo.Describe("Klusterlet", func() {
hubSecret, err := kubeClient.CoreV1().Secrets(klusterletNamespace).Get(context.Background(), helpers.HubKubeConfigSecret, metav1.GetOptions{})
gomega.Expect(err).ToNot(gomega.HaveOccurred())
// Update hub secret
// Update hub secret and make sure the kubeconfig can work
hubSecret.Data["cluster-name"] = []byte("testcluster")
hubSecret.Data["kubeconfig"] = []byte("dummy")
hubSecret.Data["kubeconfig"] = util.NewKubeConfig(restConfig.Host)
_, err = kubeClient.CoreV1().Secrets(klusterletNamespace).Update(context.Background(), hubSecret, metav1.UpdateOptions{})
gomega.Expect(err).ToNot(gomega.HaveOccurred())

View File

@@ -7,6 +7,9 @@ import (
"github.com/openshift/library-go/pkg/operator/events"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
clientcmdapi "k8s.io/client-go/tools/clientcmd/api"
clientcmdlatest "k8s.io/client-go/tools/clientcmd/api/latest"
operatorapiv1 "github.com/open-cluster-management/api/operator/v1"
)
@@ -54,6 +57,7 @@ func (r *IntegrationTestEventRecorder) Shutdown() {
func HasCondition(conditions []operatorapiv1.StatusCondition, expectedType, expectedReason string, expectedStatus metav1.ConditionStatus) bool {
found := false
for _, condition := range conditions {
fmt.Printf(">>> %v \n", condition)
if condition.Type != expectedType {
continue
}
@@ -72,3 +76,17 @@ func HasCondition(conditions []operatorapiv1.StatusCondition, expectedType, expe
return found
}
func NewKubeConfig(host string) []byte {
configData, _ := runtime.Encode(clientcmdlatest.Codec, &clientcmdapi.Config{
Clusters: map[string]*clientcmdapi.Cluster{"test-cluster": {
Server: host,
InsecureSkipTLSVerify: true,
}},
Contexts: map[string]*clientcmdapi.Context{"test-context": {
Cluster: "test-cluster",
}},
CurrentContext: "test-context",
})
return configData
}