Merge pull request #19 from deads2k/tweak-conditions

Minor tweaks as I used it.
This commit is contained in:
OpenShift Merge Robot
2020-05-28 23:54:27 -04:00
committed by GitHub
4 changed files with 154 additions and 112 deletions

View File

@@ -12,7 +12,7 @@ rules:
verbs: ["create"]
- apiGroups: [""]
resources: ["namespaces"]
verbs: ["get", "list", "watch"]
verbs: ["create", "get", "list", "watch"]
- apiGroups: ["", "events.k8s.io"]
resources: ["events"]
verbs: ["create", "patch", "update"]
@@ -25,7 +25,7 @@ rules:
- apiGroups: ["rbac.authorization.k8s.io"]
resources: ["clusterroles", "roles"]
verbs: ["create", "get", "list", "update", "watch", "patch", "delete", "escalate", "bind"]
# Allow nuclues to manage nucleus apis.
# Allow nucleus to manage nucleus apis.
- apiGroups: ["nucleus.open-cluster-management.io"]
resources: ["spokecores"]
verbs: ["get", "list", "watch", "update", "patch", "delete"]

1
go.mod
View File

@@ -3,6 +3,7 @@ module github.com/open-cluster-management/nucleus
go 1.13
require (
github.com/davecgh/go-spew v1.1.1
github.com/jteeuwen/go-bindata v3.0.8-0.20151023091102-a0ff2567cfb7+incompatible
github.com/onsi/ginkgo v1.11.0
github.com/onsi/gomega v1.8.1

View File

@@ -39,6 +39,7 @@ const (
hubKubeConfigSecret = "hub-kubeconfig-secret"
nucleusSpokeCoreNamespace = "open-cluster-management-spoke"
spokeCoreApplied = "Applied"
spokeRegistrationDegraded = "SpokeRegistrationDegraded"
)
var (
@@ -157,15 +158,23 @@ func (n *nucleusSpokeController) sync(ctx context.Context, controllerContext fac
// Start deploy spoke core components
// Check if namespace exists
_, err = n.kubeClient.CoreV1().Namespaces().Get(ctx, config.SpokeCoreNamespace, metav1.GetOptions{})
if err != nil {
helpers.SetNucleusCondition(&spokeCore.Status.Conditions, nucleusapiv1.StatusCondition{
Type: spokeCoreApplied,
Status: metav1.ConditionFalse,
Reason: "SpokeCoreApplyFailed",
Message: fmt.Sprintf("Failed to get namespace %q", config.SpokeCoreNamespace),
})
helpers.UpdateNucleusSpokeStatus(
ctx, n.nucleusClient, spokeCoreName, helpers.UpdateNucleusSpokeConditionFn(spokeCore.Status.Conditions...))
switch {
case errors.IsNotFound(err):
_, createErr := n.kubeClient.CoreV1().Namespaces().Create(ctx, &corev1.Namespace{
ObjectMeta: metav1.ObjectMeta{Name: config.SpokeCoreNamespace},
}, metav1.CreateOptions{})
if createErr != nil {
helpers.UpdateNucleusSpokeStatus(ctx, n.nucleusClient, spokeCoreName, helpers.UpdateNucleusSpokeConditionFn(nucleusapiv1.StatusCondition{
Type: spokeCoreApplied, Status: metav1.ConditionFalse, Reason: "SpokeCoreApplyFailed",
Message: fmt.Sprintf("Failed to create namespace %q: %v", config.SpokeCoreNamespace, createErr),
}))
return createErr
}
case err != nil:
helpers.UpdateNucleusSpokeStatus(ctx, n.nucleusClient, spokeCoreName, helpers.UpdateNucleusSpokeConditionFn(nucleusapiv1.StatusCondition{
Type: spokeCoreApplied, Status: metav1.ConditionFalse, Reason: "SpokeCoreApplyFailed",
Message: fmt.Sprintf("Failed to get namespace %q: %v", config.SpokeCoreNamespace, err),
}))
return err
}
@@ -173,14 +182,10 @@ func (n *nucleusSpokeController) sync(ctx context.Context, controllerContext fac
_, err = n.kubeClient.CoreV1().Secrets(config.SpokeCoreNamespace).Get(
ctx, config.BootStrapKubeConfigSecret, metav1.GetOptions{})
if err != nil {
helpers.SetNucleusCondition(&spokeCore.Status.Conditions, nucleusapiv1.StatusCondition{
Type: spokeCoreApplied,
Status: metav1.ConditionFalse,
Reason: "SpokeCoreApplyFailed",
Message: fmt.Sprintf("Failed to get bootstracp secret %s/%s", config.SpokeCoreNamespace, config.BootStrapKubeConfigSecret),
})
helpers.UpdateNucleusSpokeStatus(
ctx, n.nucleusClient, spokeCoreName, helpers.UpdateNucleusSpokeConditionFn(spokeCore.Status.Conditions...))
helpers.UpdateNucleusSpokeStatus(ctx, n.nucleusClient, spokeCoreName, helpers.UpdateNucleusSpokeConditionFn(nucleusapiv1.StatusCondition{
Type: spokeCoreApplied, Status: metav1.ConditionFalse, Reason: "SpokeCoreApplyFailed",
Message: fmt.Sprintf("Failed to get bootstrap secret -n %q %q: %v", config.SpokeCoreNamespace, config.BootStrapKubeConfigSecret, err),
}))
return err
}
@@ -203,22 +208,18 @@ func (n *nucleusSpokeController) sync(ctx context.Context, controllerContext fac
if len(errs) > 0 {
applyErrors := operatorhelpers.NewMultiLineAggregate(errs)
helpers.SetNucleusCondition(&spokeCore.Status.Conditions, nucleusapiv1.StatusCondition{
Type: spokeCoreApplied,
Status: metav1.ConditionFalse,
Reason: "SpokeCoreApplyFailed",
helpers.UpdateNucleusSpokeStatus(ctx, n.nucleusClient, spokeCoreName, helpers.UpdateNucleusSpokeConditionFn(nucleusapiv1.StatusCondition{
Type: spokeCoreApplied, Status: metav1.ConditionFalse, Reason: "SpokeCoreApplyFailed",
Message: applyErrors.Error(),
})
helpers.UpdateNucleusSpokeStatus(
ctx, n.nucleusClient, spokeCoreName, helpers.UpdateNucleusSpokeConditionFn(spokeCore.Status.Conditions...))
}))
return applyErrors
}
// Create hub config secret
hubSecret, err := n.kubeClient.CoreV1().Secrets(config.SpokeCoreNamespace).Get(
ctx, hubKubeConfigSecret, metav1.GetOptions{})
if errors.IsNotFound(err) {
// Craete an empty secret with placeholder
hubSecret, err := n.kubeClient.CoreV1().Secrets(config.SpokeCoreNamespace).Get(ctx, hubKubeConfigSecret, metav1.GetOptions{})
switch {
case errors.IsNotFound(err):
// Create an empty secret with placeholder
hubSecret = &corev1.Secret{
ObjectMeta: metav1.ObjectMeta{
Name: hubKubeConfigSecret,
@@ -228,50 +229,64 @@ func (n *nucleusSpokeController) sync(ctx context.Context, controllerContext fac
}
hubSecret, err = n.kubeClient.CoreV1().Secrets(config.SpokeCoreNamespace).Create(ctx, hubSecret, metav1.CreateOptions{})
if err != nil {
helpers.UpdateNucleusSpokeStatus(ctx, n.nucleusClient, spokeCoreName, helpers.UpdateNucleusSpokeConditionFn(nucleusapiv1.StatusCondition{
Type: spokeCoreApplied, Status: metav1.ConditionFalse, Reason: "SpokeCoreApplyFailed",
Message: fmt.Sprintf("Failed to create hub kubeconfig secret -n %q %q: %v", hubSecret.Namespace, hubSecret.Name, err),
}))
return err
}
}
if err != nil {
helpers.SetNucleusCondition(&spokeCore.Status.Conditions, nucleusapiv1.StatusCondition{
Type: spokeCoreApplied,
Status: metav1.ConditionFalse,
Reason: "SpokeCoreApplyFailed",
case err != nil:
helpers.UpdateNucleusSpokeStatus(ctx, n.nucleusClient, spokeCoreName, helpers.UpdateNucleusSpokeConditionFn(nucleusapiv1.StatusCondition{
Type: spokeCoreApplied, Status: metav1.ConditionFalse, Reason: "SpokeCoreApplyFailed",
Message: fmt.Sprintf("Failed to get hub kubeconfig secret with error %v", err),
})
helpers.UpdateNucleusSpokeStatus(
ctx, n.nucleusClient, spokeCoreName, helpers.UpdateNucleusSpokeConditionFn(spokeCore.Status.Conditions...))
}))
return err
}
// Deploy registration agent
generation, err := n.applyDeployment(
config, "manifests/spoke/spoke-registration-deployment.yaml", n.registrationGeneration, controllerContext)
generation, err := n.applyDeployment(config, "manifests/spoke/spoke-registration-deployment.yaml", n.registrationGeneration, controllerContext)
if err != nil {
helpers.SetNucleusCondition(&spokeCore.Status.Conditions, nucleusapiv1.StatusCondition{
Type: spokeCoreApplied,
Status: metav1.ConditionFalse,
Reason: "SpokeCoreApplyFailed",
helpers.UpdateNucleusSpokeStatus(ctx, n.nucleusClient, spokeCoreName, helpers.UpdateNucleusSpokeConditionFn(nucleusapiv1.StatusCondition{
Type: spokeCoreApplied, Status: metav1.ConditionFalse, Reason: "SpokeCoreApplyFailed",
Message: fmt.Sprintf("Failed to deploy registration deployment with error %v", err),
})
helpers.UpdateNucleusSpokeStatus(
ctx, n.nucleusClient, spokeCoreName, helpers.UpdateNucleusSpokeConditionFn(spokeCore.Status.Conditions...))
}))
return err
}
// TODO store this in the status of the spokecore itself
n.registrationGeneration = generation
// Deploy work agent
generation, err = n.applyDeployment(
config, "manifests/spoke/spoke-work-deployment.yaml", n.workGeneration, controllerContext)
if err != nil {
helpers.UpdateNucleusSpokeStatus(ctx, n.nucleusClient, spokeCoreName, helpers.UpdateNucleusSpokeConditionFn(nucleusapiv1.StatusCondition{
Type: spokeCoreApplied, Status: metav1.ConditionFalse, Reason: "SpokeCoreApplyFailed",
Message: fmt.Sprintf("Failed to deploy work deployment with error %v", err),
}))
return err
}
// TODO store this in the status of the spokecore itself
n.workGeneration = generation
// if we get here, we have successfully applied everything and should indicate that
helpers.UpdateNucleusSpokeStatus(ctx, n.nucleusClient, spokeCoreName, helpers.UpdateNucleusSpokeConditionFn(nucleusapiv1.StatusCondition{
Type: spokeCoreApplied, Status: metav1.ConditionTrue, Reason: "SpokeCoreApplied",
Message: "Spoke Core Component Applied",
}))
// now that we have applied all of our logic, we can check to see if the data we expect to have present as indications of
// proper functioning of registration controller is working
// TODO this should be moved into a separate loop since it is independent of the application of the eventually consistent
// resources above
// If cluster name is empty, read cluster name from hub config secret
if config.ClusterName == "" {
clusterName := hubSecret.Data["cluster-name"]
if clusterName == nil {
helpers.SetNucleusCondition(&spokeCore.Status.Conditions, nucleusapiv1.StatusCondition{
Type: spokeCoreApplied,
Status: metav1.ConditionFalse,
Reason: "SpokeCoreApplyFailed",
Message: fmt.Sprintf("Failed to get cluster name"),
})
helpers.UpdateNucleusSpokeStatus(
ctx, n.nucleusClient, spokeCoreName, helpers.UpdateNucleusSpokeConditionFn(spokeCore.Status.Conditions...))
helpers.UpdateNucleusSpokeStatus(ctx, n.nucleusClient, spokeCoreName, helpers.UpdateNucleusSpokeConditionFn(nucleusapiv1.StatusCondition{
Type: spokeRegistrationDegraded, Status: metav1.ConditionTrue, Reason: "ClusterNameMissing",
Message: fmt.Sprintf("Failed to get cluster name from `kubectl get secret -n %q %q -ojsonpath='{.data.cluster-name}`. This is set by the spoke registration deployment.", hubSecret.Namespace, hubSecret.Name),
}))
return fmt.Errorf("Failed to get cluster name")
}
config.ClusterName = string(clusterName)
@@ -279,35 +294,19 @@ func (n *nucleusSpokeController) sync(ctx context.Context, controllerContext fac
// If hub kubeconfig does not exist, return err.
if hubSecret.Data["kubeconfig"] == nil {
helpers.UpdateNucleusSpokeStatus(ctx, n.nucleusClient, spokeCoreName, helpers.UpdateNucleusSpokeConditionFn(nucleusapiv1.StatusCondition{
Type: spokeRegistrationDegraded, Status: metav1.ConditionTrue, Reason: "HubKubeconfigMissing",
Message: fmt.Sprintf("Failed to get cluster name from `kubectl get secret -n %q %q -ojsonpath='{.data.kubeconfig}`. This is set by the spoke registration deployment, but the CSR must be approved by the cluster-admin on the hub.", hubSecret.Namespace, hubSecret.Name),
}))
return fmt.Errorf("Failed to get kubeconfig from hub kubeconfig secret")
}
// TODO it is possible to verify the kubeconfig actually works.
// Deploy work agent
generation, err = n.applyDeployment(
config, "manifests/spoke/spoke-work-deployment.yaml", n.workGeneration, controllerContext)
if err != nil {
helpers.SetNucleusCondition(&spokeCore.Status.Conditions, nucleusapiv1.StatusCondition{
Type: spokeCoreApplied,
Status: metav1.ConditionFalse,
Reason: "SpokeCoreApplyFailed",
Message: fmt.Sprintf("Failed to deploy work deployment with error %v", err),
})
helpers.UpdateNucleusSpokeStatus(
ctx, n.nucleusClient, spokeCoreName, helpers.UpdateNucleusSpokeConditionFn(spokeCore.Status.Conditions...))
return err
}
n.workGeneration = generation
// Update status
helpers.SetNucleusCondition(&spokeCore.Status.Conditions, nucleusapiv1.StatusCondition{
Type: spokeCoreApplied,
Status: metav1.ConditionTrue,
Reason: "SpokeCoreApplied",
Message: "Spoke Core Component Applied",
})
helpers.UpdateNucleusSpokeStatus(
ctx, n.nucleusClient, spokeCoreName, helpers.UpdateNucleusSpokeConditionFn(spokeCore.Status.Conditions...))
return err
helpers.UpdateNucleusSpokeStatus(ctx, n.nucleusClient, spokeCoreName, helpers.UpdateNucleusSpokeConditionFn(nucleusapiv1.StatusCondition{
Type: spokeRegistrationDegraded, Status: metav1.ConditionFalse, Reason: "RegistrationFunctional",
Message: "Registration is managing credentials",
}))
return nil
}
func (n *nucleusSpokeController) applyDeployment(

View File

@@ -6,9 +6,11 @@ import (
"testing"
"time"
"github.com/davecgh/go-spew/spew"
fakenucleusclient "github.com/open-cluster-management/api/client/nucleus/clientset/versioned/fake"
nucleusinformers "github.com/open-cluster-management/api/client/nucleus/informers/externalversions"
nucleusapiv1 "github.com/open-cluster-management/api/nucleus/v1"
"github.com/open-cluster-management/nucleus/pkg/helpers"
"github.com/openshift/library-go/pkg/operator/events"
"github.com/openshift/library-go/pkg/operator/events/eventstesting"
appsv1 "k8s.io/api/apps/v1"
@@ -17,6 +19,7 @@ import (
"k8s.io/apimachinery/pkg/api/meta"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
fakekube "k8s.io/client-go/kubernetes/fake"
clienttesting "k8s.io/client-go/testing"
"k8s.io/client-go/util/workqueue"
@@ -46,6 +49,16 @@ func newFakeSyncContext(t *testing.T, key string) *fakeSyncContext {
}
}
func newSecret(name, namespace string) *corev1.Secret {
return &corev1.Secret{
ObjectMeta: metav1.ObjectMeta{
Name: name,
Namespace: namespace,
},
Data: map[string][]byte{},
}
}
func newSpokeCore(name, namespace, clustername string) *nucleusapiv1.SpokeCore {
return &nucleusapiv1.SpokeCore{
ObjectMeta: metav1.ObjectMeta{
@@ -62,16 +75,6 @@ func newSpokeCore(name, namespace, clustername string) *nucleusapiv1.SpokeCore {
}
}
func newSecret(name, namespace string) *corev1.Secret {
return &corev1.Secret{
ObjectMeta: metav1.ObjectMeta{
Name: name,
Namespace: namespace,
},
Data: map[string][]byte{},
}
}
func newNamespace(name string) *corev1.Namespace {
return &corev1.Namespace{
ObjectMeta: metav1.ObjectMeta{
@@ -107,18 +110,37 @@ func assertAction(t *testing.T, actual clienttesting.Action, expected string) {
}
}
func assertCondition(t *testing.T, actual runtime.Object, expectedCondition string, expectedStatus metav1.ConditionStatus) {
func assertGet(t *testing.T, actual clienttesting.Action, group, version, resource string) {
t.Helper()
if actual.GetVerb() != "get" {
t.Error(spew.Sdump(actual))
}
if actual.GetResource() != (schema.GroupVersionResource{Group: group, Version: version, Resource: resource}) {
t.Error(spew.Sdump(actual))
}
}
func namedCondition(name string, status metav1.ConditionStatus) nucleusapiv1.StatusCondition {
return nucleusapiv1.StatusCondition{Type: name, Status: status}
}
func assertOnlyConditions(t *testing.T, actual runtime.Object, expectedConditions ...nucleusapiv1.StatusCondition) {
t.Helper()
spokeCore := actual.(*nucleusapiv1.SpokeCore)
conditions := spokeCore.Status.Conditions
if len(conditions) != 1 {
t.Errorf("expected 1 condition but got: %#v", conditions)
actualConditions := spokeCore.Status.Conditions
if len(actualConditions) != len(expectedConditions) {
t.Errorf("expected %v condition but got: %v", len(expectedConditions), spew.Sdump(actualConditions))
}
condition := conditions[0]
if condition.Type != expectedCondition {
t.Errorf("expected %s but got: %s", expectedCondition, condition.Type)
}
if condition.Status != expectedStatus {
t.Errorf("expected %s but got: %s", expectedStatus, condition.Status)
for _, expectedCondition := range expectedConditions {
actual := helpers.FindNucleusCondition(actualConditions, expectedCondition.Type)
if actual == nil {
t.Errorf("missing %v in %v", spew.Sdump(expectedCondition), spew.Sdump(actual))
}
if actual.Status != expectedCondition.Status {
t.Errorf("wrong result for %v in %v", spew.Sdump(expectedCondition), spew.Sdump(actual))
}
}
}
@@ -193,12 +215,18 @@ func TestSyncDeploy(t *testing.T) {
}
nucleusAction := controller.nucleusClient.Actions()
if len(nucleusAction) != 2 {
t.Errorf("Expect 2 actions in the sync loop, actual %#v", nucleusAction)
if len(nucleusAction) != 4 {
t.Errorf("Expect 4 actions in the sync loop, actual %#v", nucleusAction)
}
assertGet(t, nucleusAction[0], "nucleus.open-cluster-management.io", "v1", "spokecores")
assertAction(t, nucleusAction[1], "update")
assertCondition(t, nucleusAction[1].(clienttesting.UpdateActionImpl).Object, spokeCoreApplied, metav1.ConditionTrue)
assertOnlyConditions(t, nucleusAction[1].(clienttesting.UpdateActionImpl).Object,
namedCondition(spokeCoreApplied, metav1.ConditionTrue))
assertGet(t, nucleusAction[2], "nucleus.open-cluster-management.io", "v1", "spokecores")
assertAction(t, nucleusAction[3], "update")
assertOnlyConditions(t, nucleusAction[3].(clienttesting.UpdateActionImpl).Object,
namedCondition(spokeCoreApplied, metav1.ConditionTrue), namedCondition(spokeRegistrationDegraded, metav1.ConditionFalse))
}
// TestSyncWithNoSecret test the scenario that bootstrap secret and hub config secret does not exist
@@ -220,9 +248,12 @@ func TestSyncWithNoSecret(t *testing.T) {
t.Errorf("Expect 2 actions in the sync loop, actual %#v", nucleusAction)
}
assertGet(t, nucleusAction[0], "nucleus.open-cluster-management.io", "v1", "spokecores")
assertAction(t, nucleusAction[1], "update")
assertCondition(t, nucleusAction[1].(clienttesting.UpdateActionImpl).Object, spokeCoreApplied, metav1.ConditionFalse)
assertOnlyConditions(t, nucleusAction[1].(clienttesting.UpdateActionImpl).Object, namedCondition(spokeCoreApplied, metav1.ConditionFalse))
// reset for round 2
controller.nucleusClient.ClearActions()
// Add bootstrap secret and sync again
controller.kubeClient.PrependReactor("get", "secrets", func(action clienttesting.Action) (handled bool, ret runtime.Object, err error) {
if action.GetVerb() != "get" {
@@ -246,9 +277,17 @@ func TestSyncWithNoSecret(t *testing.T) {
t.Errorf("Expect 4 actions in the sync loop, actual %#v", nucleusAction)
}
assertGet(t, nucleusAction[0], "nucleus.open-cluster-management.io", "v1", "spokecores")
assertAction(t, nucleusAction[1], "update")
assertOnlyConditions(t, nucleusAction[1].(clienttesting.UpdateActionImpl).Object,
namedCondition(spokeCoreApplied, metav1.ConditionTrue))
assertGet(t, nucleusAction[2], "nucleus.open-cluster-management.io", "v1", "spokecores")
assertAction(t, nucleusAction[3], "update")
assertCondition(t, nucleusAction[3].(clienttesting.UpdateActionImpl).Object, spokeCoreApplied, metav1.ConditionFalse)
assertOnlyConditions(t, nucleusAction[3].(clienttesting.UpdateActionImpl).Object,
namedCondition(spokeCoreApplied, metav1.ConditionTrue), namedCondition(spokeRegistrationDegraded, metav1.ConditionTrue))
// reset for round 3
controller.nucleusClient.ClearActions()
// Add hub config secret and sync again
hubSecret.Data["kubeconfig"] = []byte("dummykubeconfig")
hubSecret.Data["cluster-name"] = []byte("cluster1")
@@ -269,12 +308,15 @@ func TestSyncWithNoSecret(t *testing.T) {
t.Errorf("Expected no error when sync: %v", err)
}
nucleusAction = controller.nucleusClient.Actions()
if len(nucleusAction) != 6 {
t.Errorf("Expect 6 actions in the sync loop, actual %#v", nucleusAction)
if len(nucleusAction) != 3 {
t.Errorf("Expect 3 actions in the sync loop, actual %#v", nucleusAction)
}
assertAction(t, nucleusAction[5], "update")
assertCondition(t, nucleusAction[5].(clienttesting.UpdateActionImpl).Object, spokeCoreApplied, metav1.ConditionTrue)
assertGet(t, nucleusAction[0], "nucleus.open-cluster-management.io", "v1", "spokecores")
assertGet(t, nucleusAction[1], "nucleus.open-cluster-management.io", "v1", "spokecores")
assertAction(t, nucleusAction[2], "update")
assertOnlyConditions(t, nucleusAction[2].(clienttesting.UpdateActionImpl).Object,
namedCondition(spokeCoreApplied, metav1.ConditionTrue), namedCondition(spokeRegistrationDegraded, metav1.ConditionFalse))
}
// TestSyncDelete test cleanup hub deploy