mirror of
https://github.com/open-cluster-management-io/ocm.git
synced 2026-05-16 14:18:42 +00:00
Merge pull request #39 from elgnay/available-status
Add controller to handle Available status
This commit is contained in:
@@ -125,7 +125,7 @@ func MergeStatusConditions(conditions []workapiv1.StatusCondition, newConditions
|
||||
for _, newCondition := range newConditions {
|
||||
// merge two conditions if necessary
|
||||
if condition, ok := cm[newCondition.Type]; ok {
|
||||
merged = append(merged, mergeStatusCondition(condition, newCondition))
|
||||
merged = append(merged, MergeStatusCondition(condition, newCondition))
|
||||
continue
|
||||
}
|
||||
|
||||
@@ -136,9 +136,9 @@ func MergeStatusConditions(conditions []workapiv1.StatusCondition, newConditions
|
||||
return merged
|
||||
}
|
||||
|
||||
// mergeStatusCondition returns a new status condition which merges the properties from the two input conditions.
|
||||
// MergeStatusCondition returns a new status condition which merges the properties from the two input conditions.
|
||||
// It assumes the two conditions have the same condition type.
|
||||
func mergeStatusCondition(condition, newCondition workapiv1.StatusCondition) workapiv1.StatusCondition {
|
||||
func MergeStatusCondition(condition, newCondition workapiv1.StatusCondition) workapiv1.StatusCondition {
|
||||
merged := workapiv1.StatusCondition{
|
||||
Type: newCondition.Type,
|
||||
Status: newCondition.Status,
|
||||
|
||||
@@ -152,10 +152,11 @@ func (m *ManifestWorkController) sync(ctx context.Context, controllerContext fac
|
||||
errs = append(errs, result.Error)
|
||||
}
|
||||
|
||||
resourceMeta, err := buildManifestResourceMeta(index, result.Result, m.restMapper)
|
||||
resourceMeta, err := buildManifestResourceMeta(index, result.Result, manifestWork.Spec.Workload.Manifests[index], m.restMapper)
|
||||
if err != nil {
|
||||
errs = append(errs, err)
|
||||
}
|
||||
|
||||
manifestCondition := workapiv1.ManifestCondition{
|
||||
ResourceMeta: resourceMeta,
|
||||
Conditions: []workapiv1.StatusCondition{},
|
||||
@@ -169,7 +170,6 @@ func (m *ManifestWorkController) sync(ctx context.Context, controllerContext fac
|
||||
|
||||
// merge the new manifest conditions with the existing manifest conditions
|
||||
manifestWork.Status.ResourceStatus.Manifests = helper.MergeManifestConditions(manifestWork.Status.ResourceStatus.Manifests, newManifestConditions)
|
||||
|
||||
// Update work status
|
||||
_, _, err = helper.UpdateManifestWorkStatus(
|
||||
ctx, m.manifestWorkClient, manifestWork.Name, m.generateUpdateStatusFunc(manifestWork.Status.ResourceStatus))
|
||||
@@ -378,7 +378,40 @@ func buildAppliedStatusCondition(err error) workapiv1.StatusCondition {
|
||||
}
|
||||
}
|
||||
|
||||
func buildManifestResourceMeta(index int, object runtime.Object, restMapper *resource.Mapper) (resourceMeta workapiv1.ManifestResourceMeta, err error) {
|
||||
// buildManifestResourceMeta returns resource meta for manifest. It tries to get the resource
|
||||
// meta from the result object in ApplyResult struct. If the resource meta is incompleted, fall
|
||||
// back to manifest template for the meta info.
|
||||
func buildManifestResourceMeta(index int, object runtime.Object, manifest workapiv1.Manifest, restMapper *resource.Mapper) (resourceMeta workapiv1.ManifestResourceMeta, err error) {
|
||||
errs := []error{}
|
||||
|
||||
resourceMeta, err = buildResourceMeta(index, object, restMapper)
|
||||
if err != nil {
|
||||
errs = append(errs, err)
|
||||
} else if len(resourceMeta.Kind) > 0 && len(resourceMeta.Version) > 0 && len(resourceMeta.Name) > 0 {
|
||||
return resourceMeta, nil
|
||||
}
|
||||
|
||||
// try to get resource meta from manifest if the one got from apply result is incompleted
|
||||
switch {
|
||||
case manifest.Object != nil:
|
||||
object = manifest.Object
|
||||
default:
|
||||
unstructuredObj := &unstructured.Unstructured{}
|
||||
if err = unstructuredObj.UnmarshalJSON(manifest.Raw); err != nil {
|
||||
errs = append(errs, err)
|
||||
return resourceMeta, utilerrors.NewAggregate(errs)
|
||||
}
|
||||
object = unstructuredObj
|
||||
}
|
||||
resourceMeta, err = buildResourceMeta(index, object, restMapper)
|
||||
if err == nil {
|
||||
return resourceMeta, nil
|
||||
}
|
||||
|
||||
return resourceMeta, utilerrors.NewAggregate(errs)
|
||||
}
|
||||
|
||||
func buildResourceMeta(index int, object runtime.Object, restMapper *resource.Mapper) (resourceMeta workapiv1.ManifestResourceMeta, err error) {
|
||||
resourceMeta = workapiv1.ManifestResourceMeta{
|
||||
Ordinal: int32(index),
|
||||
}
|
||||
|
||||
@@ -562,7 +562,7 @@ func TestAllInCondition(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestBuildManifestResourceMeta(t *testing.T) {
|
||||
func TestBuildResourceMeta(t *testing.T) {
|
||||
var secret *corev1.Secret
|
||||
var u *unstructured.Unstructured
|
||||
|
||||
@@ -613,7 +613,48 @@ func TestBuildManifestResourceMeta(t *testing.T) {
|
||||
|
||||
for _, c := range cases {
|
||||
t.Run(c.name, func(t *testing.T) {
|
||||
actual, err := buildManifestResourceMeta(0, c.object, c.restMapper)
|
||||
actual, err := buildResourceMeta(0, c.object, c.restMapper)
|
||||
if err != nil {
|
||||
t.Errorf("Should be success with no err: %v", err)
|
||||
}
|
||||
|
||||
actual.Ordinal = c.expected.Ordinal
|
||||
if !equality.Semantic.DeepEqual(actual, c.expected) {
|
||||
t.Errorf(diff.ObjectDiff(actual, c.expected))
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestBuildManifestResourceMeta(t *testing.T) {
|
||||
cases := []struct {
|
||||
name string
|
||||
applyResult runtime.Object
|
||||
manifestObject runtime.Object
|
||||
restMapper *resource.Mapper
|
||||
expected workapiv1.ManifestResourceMeta
|
||||
}{
|
||||
{
|
||||
name: "extract meta from apply result",
|
||||
applyResult: spoketesting.NewSecret("test1", "ns1", "value1"),
|
||||
restMapper: spoketesting.NewFakeRestMapper(),
|
||||
expected: workapiv1.ManifestResourceMeta{Version: "v1", Kind: "Secret", Resource: "secrets", Namespace: "ns1", Name: "test1"},
|
||||
},
|
||||
{
|
||||
name: "fall back to manifest",
|
||||
manifestObject: spoketesting.NewSecret("test2", "ns2", "value2"),
|
||||
restMapper: spoketesting.NewFakeRestMapper(),
|
||||
expected: workapiv1.ManifestResourceMeta{Version: "v1", Kind: "Secret", Resource: "secrets", Namespace: "ns2", Name: "test2"},
|
||||
},
|
||||
}
|
||||
|
||||
for _, c := range cases {
|
||||
t.Run(c.name, func(t *testing.T) {
|
||||
manifest := workapiv1.Manifest{}
|
||||
if c.manifestObject != nil {
|
||||
manifest.Object = c.manifestObject
|
||||
}
|
||||
actual, err := buildManifestResourceMeta(0, c.applyResult, manifest, c.restMapper)
|
||||
if err != nil {
|
||||
t.Errorf("Should be success with no err: %v", err)
|
||||
}
|
||||
|
||||
@@ -0,0 +1,260 @@
|
||||
package statuscontroller
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"reflect"
|
||||
"time"
|
||||
|
||||
workv1client "github.com/open-cluster-management/api/client/work/clientset/versioned/typed/work/v1"
|
||||
workinformer "github.com/open-cluster-management/api/client/work/informers/externalversions/work/v1"
|
||||
worklister "github.com/open-cluster-management/api/client/work/listers/work/v1"
|
||||
workapiv1 "github.com/open-cluster-management/api/work/v1"
|
||||
"github.com/open-cluster-management/work/pkg/helper"
|
||||
"github.com/openshift/library-go/pkg/controller/factory"
|
||||
"github.com/openshift/library-go/pkg/operator/events"
|
||||
"k8s.io/apimachinery/pkg/api/errors"
|
||||
"k8s.io/apimachinery/pkg/api/meta"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
utilerrors "k8s.io/apimachinery/pkg/util/errors"
|
||||
"k8s.io/client-go/dynamic"
|
||||
"k8s.io/klog"
|
||||
)
|
||||
|
||||
// ControllerSyncInterval is exposed so that integration tests can crank up the constroller resync speed.
|
||||
var ControllerReSyncInterval = 30 * time.Second
|
||||
|
||||
// AvailableStatusController is to update the available status conditions of both manifests and manifestworks.
|
||||
type AvailableStatusController struct {
|
||||
manifestWorkClient workv1client.ManifestWorkInterface
|
||||
manifestWorkLister worklister.ManifestWorkNamespaceLister
|
||||
spokeDynamicClient dynamic.Interface
|
||||
}
|
||||
|
||||
// NewAvailableStatusController returns a AvailableStatusController
|
||||
func NewAvailableStatusController(
|
||||
recorder events.Recorder,
|
||||
spokeDynamicClient dynamic.Interface,
|
||||
manifestWorkClient workv1client.ManifestWorkInterface,
|
||||
manifestWorkInformer workinformer.ManifestWorkInformer,
|
||||
manifestWorkLister worklister.ManifestWorkNamespaceLister,
|
||||
) factory.Controller {
|
||||
controller := &AvailableStatusController{
|
||||
manifestWorkClient: manifestWorkClient,
|
||||
manifestWorkLister: manifestWorkLister,
|
||||
spokeDynamicClient: spokeDynamicClient,
|
||||
}
|
||||
|
||||
return factory.New().
|
||||
WithInformersQueueKeyFunc(func(obj runtime.Object) string {
|
||||
accessor, _ := meta.Accessor(obj)
|
||||
return accessor.GetName()
|
||||
}, manifestWorkInformer.Informer()).
|
||||
WithSync(controller.sync).ResyncEvery(ControllerReSyncInterval).ToController("AvailableStatusController", recorder)
|
||||
}
|
||||
|
||||
func (c *AvailableStatusController) sync(ctx context.Context, controllerContext factory.SyncContext) error {
|
||||
manifestWorkName := controllerContext.QueueKey()
|
||||
if manifestWorkName != "key" {
|
||||
// sync a particular manifestwork
|
||||
manifestWork, err := c.manifestWorkLister.Get(manifestWorkName)
|
||||
if errors.IsNotFound(err) {
|
||||
// work not found, could have been deleted, do nothing.
|
||||
return nil
|
||||
}
|
||||
if err != nil {
|
||||
return fmt.Errorf("unable to fetch manifestwork %q: %w", manifestWorkName, err)
|
||||
}
|
||||
|
||||
err = c.syncManifestWork(ctx, manifestWork)
|
||||
if err != nil {
|
||||
return fmt.Errorf("unable to sync manifestwork %q: %w", manifestWork.Name, err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// resync all manifestworks
|
||||
klog.V(4).Infof("Reconciling all ManifestWorks")
|
||||
manifestWorks, err := c.manifestWorkLister.List(labels.Everything())
|
||||
if err != nil {
|
||||
return fmt.Errorf("unable to list manifestworks: %w", err)
|
||||
}
|
||||
|
||||
errs := []error{}
|
||||
for _, manifestWork := range manifestWorks {
|
||||
err = c.syncManifestWork(ctx, manifestWork)
|
||||
if err != nil {
|
||||
errs = append(errs, fmt.Errorf("unable to sync manifestwork %q: %w", manifestWork.Name, err))
|
||||
}
|
||||
}
|
||||
if len(errs) > 0 {
|
||||
return fmt.Errorf("unable to resync manifestworks: %w", utilerrors.NewAggregate(errs))
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *AvailableStatusController) syncManifestWork(ctx context.Context, originalManifestWork *workapiv1.ManifestWork) error {
|
||||
klog.V(4).Infof("Reconciling ManifestWork %q", originalManifestWork.Name)
|
||||
manifestWork := originalManifestWork.DeepCopy()
|
||||
|
||||
// handle status condition of manifests
|
||||
for index, manifest := range manifestWork.Status.ResourceStatus.Manifests {
|
||||
availableStatusCondition := buildAvailableStatusCondition(manifest.ResourceMeta, c.spokeDynamicClient)
|
||||
newConditions := mergeStatusConditions(manifest.Conditions, availableStatusCondition)
|
||||
manifestWork.Status.ResourceStatus.Manifests[index].Conditions = newConditions
|
||||
}
|
||||
|
||||
// handle status condition of manifestwork
|
||||
var workStatusConditions []workapiv1.StatusCondition
|
||||
switch {
|
||||
case len(manifestWork.Status.ResourceStatus.Manifests) == 0:
|
||||
// remove condition with type Available if no Manifests exists
|
||||
for _, condition := range manifestWork.Status.Conditions {
|
||||
if condition.Type != string(workapiv1.WorkAvailable) {
|
||||
workStatusConditions = append(workStatusConditions, condition)
|
||||
}
|
||||
}
|
||||
default:
|
||||
// aggregate ManifestConditions and update work status condition
|
||||
workAvailableStatusCondition := aggregateManifestConditions(manifestWork.Status.ResourceStatus.Manifests)
|
||||
workStatusConditions = mergeStatusConditions(manifestWork.Status.Conditions, workAvailableStatusCondition)
|
||||
}
|
||||
manifestWork.Status.Conditions = workStatusConditions
|
||||
|
||||
// no work if the status of manifestwork does not change
|
||||
if reflect.DeepEqual(originalManifestWork.Status.Conditions, manifestWork.Status.Conditions) {
|
||||
return nil
|
||||
}
|
||||
|
||||
// update status of manifestwork. if this conflicts, try again later
|
||||
_, err := c.manifestWorkClient.UpdateStatus(ctx, manifestWork, metav1.UpdateOptions{})
|
||||
return err
|
||||
}
|
||||
|
||||
// aggregateManifestConditions aggregates status conditions of manifests and returns a status
|
||||
// condition for manifestwork
|
||||
func aggregateManifestConditions(manifests []workapiv1.ManifestCondition) workapiv1.StatusCondition {
|
||||
available, unavailable, unknown := 0, 0, 0
|
||||
for _, manifest := range manifests {
|
||||
for _, condition := range manifest.Conditions {
|
||||
if condition.Type != string(workapiv1.ManifestAvailable) {
|
||||
continue
|
||||
}
|
||||
|
||||
switch condition.Status {
|
||||
case metav1.ConditionTrue:
|
||||
available += 1
|
||||
case metav1.ConditionFalse:
|
||||
unavailable += 1
|
||||
case metav1.ConditionUnknown:
|
||||
unknown += 1
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
switch {
|
||||
case unavailable > 0:
|
||||
return workapiv1.StatusCondition{
|
||||
Type: string(workapiv1.WorkAvailable),
|
||||
Status: metav1.ConditionFalse,
|
||||
Reason: "ResourcesNotAvailable",
|
||||
Message: fmt.Sprintf("%d of %d resources are not available", unavailable, len(manifests)),
|
||||
}
|
||||
case unknown > 0:
|
||||
return workapiv1.StatusCondition{
|
||||
Type: string(workapiv1.WorkAvailable),
|
||||
Status: metav1.ConditionUnknown,
|
||||
Reason: "ResourcesStatusUnknown",
|
||||
Message: fmt.Sprintf("%d of %d resources have unknown status", unknown, len(manifests)),
|
||||
}
|
||||
default:
|
||||
return workapiv1.StatusCondition{
|
||||
Type: string(workapiv1.WorkAvailable),
|
||||
Status: metav1.ConditionTrue,
|
||||
Reason: "ResourcesAvailable",
|
||||
Message: "All resources are available",
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// buildAvailableStatusCondition returns a StatusCondition with type Available for a given manifest resource
|
||||
func buildAvailableStatusCondition(resourceMeta workapiv1.ManifestResourceMeta, dynamicClient dynamic.Interface) workapiv1.StatusCondition {
|
||||
conditionType := string(workapiv1.ManifestAvailable)
|
||||
|
||||
if len(resourceMeta.Resource) == 0 || len(resourceMeta.Version) == 0 || len(resourceMeta.Name) == 0 {
|
||||
return workapiv1.StatusCondition{
|
||||
Type: conditionType,
|
||||
Status: metav1.ConditionUnknown,
|
||||
Reason: "IncompletedResourceMeta",
|
||||
Message: "Resource meta is incompleted",
|
||||
}
|
||||
}
|
||||
|
||||
available, err := isResourceAvailable(resourceMeta.Namespace, resourceMeta.Name, schema.GroupVersionResource{
|
||||
Group: resourceMeta.Group,
|
||||
Version: resourceMeta.Version,
|
||||
Resource: resourceMeta.Resource,
|
||||
}, dynamicClient)
|
||||
if err != nil {
|
||||
return workapiv1.StatusCondition{
|
||||
Type: conditionType,
|
||||
Status: metav1.ConditionUnknown,
|
||||
Reason: "FetchingResourceFailed",
|
||||
Message: fmt.Sprintf("Failed to fetch resource: %v", err),
|
||||
}
|
||||
}
|
||||
|
||||
if available {
|
||||
return workapiv1.StatusCondition{
|
||||
Type: conditionType,
|
||||
Status: metav1.ConditionTrue,
|
||||
Reason: "ResourceAvailable",
|
||||
Message: "Resource is available",
|
||||
}
|
||||
}
|
||||
|
||||
return workapiv1.StatusCondition{
|
||||
Type: conditionType,
|
||||
Status: metav1.ConditionFalse,
|
||||
Reason: "ResourceNotAvailable",
|
||||
Message: "Resource is not available",
|
||||
}
|
||||
}
|
||||
|
||||
// isResourceAvailable checks if the specific resource is available or not
|
||||
func isResourceAvailable(namespace, name string, gvr schema.GroupVersionResource, dynamicClient dynamic.Interface) (bool, error) {
|
||||
_, err := dynamicClient.Resource(gvr).Namespace(namespace).Get(context.TODO(), name, metav1.GetOptions{})
|
||||
if errors.IsNotFound(err) {
|
||||
return false, nil
|
||||
}
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
return true, nil
|
||||
}
|
||||
|
||||
func mergeStatusConditions(conditions []workapiv1.StatusCondition, newCondition workapiv1.StatusCondition) []workapiv1.StatusCondition {
|
||||
mergedConditions := []workapiv1.StatusCondition{}
|
||||
|
||||
merged := false
|
||||
for _, condition := range conditions {
|
||||
// merge two conditions if necessary
|
||||
if condition.Type == newCondition.Type {
|
||||
mergedConditions = append(mergedConditions, helper.MergeStatusCondition(condition, newCondition))
|
||||
merged = true
|
||||
continue
|
||||
}
|
||||
|
||||
mergedConditions = append(mergedConditions, condition)
|
||||
}
|
||||
|
||||
if !merged {
|
||||
newCondition.LastTransitionTime = metav1.NewTime(time.Now())
|
||||
mergedConditions = append(mergedConditions, newCondition)
|
||||
}
|
||||
|
||||
return mergedConditions
|
||||
}
|
||||
@@ -0,0 +1,179 @@
|
||||
package statuscontroller
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
|
||||
"github.com/davecgh/go-spew/spew"
|
||||
fakeworkclient "github.com/open-cluster-management/api/client/work/clientset/versioned/fake"
|
||||
workapiv1 "github.com/open-cluster-management/api/work/v1"
|
||||
"github.com/open-cluster-management/work/pkg/spoke/spoketesting"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
fakedynamic "k8s.io/client-go/dynamic/fake"
|
||||
clienttesting "k8s.io/client-go/testing"
|
||||
)
|
||||
|
||||
func TestSyncManifestWork(t *testing.T) {
|
||||
cases := []struct {
|
||||
name string
|
||||
existingResources []runtime.Object
|
||||
manifests []workapiv1.ManifestCondition
|
||||
workConditions []workapiv1.StatusCondition
|
||||
validateActions func(t *testing.T, actions []clienttesting.Action)
|
||||
}{
|
||||
{
|
||||
name: "remove available status from work whose manifests become empty",
|
||||
workConditions: []workapiv1.StatusCondition{
|
||||
{
|
||||
Type: string(workapiv1.WorkAvailable),
|
||||
},
|
||||
},
|
||||
validateActions: func(t *testing.T, actions []clienttesting.Action) {
|
||||
if len(actions) != 1 {
|
||||
t.Fatal(spew.Sdump(actions))
|
||||
}
|
||||
|
||||
work := actions[0].(clienttesting.UpdateAction).GetObject().(*workapiv1.ManifestWork)
|
||||
if len(work.Status.Conditions) != 0 {
|
||||
t.Fatal(spew.Sdump(actions))
|
||||
}
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "build status with existing resource",
|
||||
existingResources: []runtime.Object{
|
||||
spoketesting.NewUnstructuredSecret("ns1", "n1", false, "ns1-n1"),
|
||||
},
|
||||
manifests: []workapiv1.ManifestCondition{
|
||||
newManifest("", "v1", "secrets", "ns1", "n1"),
|
||||
},
|
||||
validateActions: func(t *testing.T, actions []clienttesting.Action) {
|
||||
if len(actions) != 1 {
|
||||
t.Fatal(spew.Sdump(actions))
|
||||
}
|
||||
|
||||
work := actions[0].(clienttesting.UpdateAction).GetObject().(*workapiv1.ManifestWork)
|
||||
if len(work.Status.ResourceStatus.Manifests) != 1 {
|
||||
t.Fatal(spew.Sdump(work.Status.ResourceStatus.Manifests))
|
||||
}
|
||||
if !hasStatusCondition(work.Status.ResourceStatus.Manifests[0].Conditions, string(workapiv1.ManifestAvailable), metav1.ConditionTrue) {
|
||||
t.Fatal(spew.Sdump(work.Status.ResourceStatus.Manifests[0].Conditions))
|
||||
}
|
||||
|
||||
if !hasStatusCondition(work.Status.Conditions, string(workapiv1.WorkAvailable), metav1.ConditionTrue) {
|
||||
t.Fatal(spew.Sdump(work.Status.Conditions))
|
||||
}
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "build status when one of resources doess not exists",
|
||||
existingResources: []runtime.Object{
|
||||
spoketesting.NewUnstructuredSecret("ns1", "n1", false, "ns1-n1"),
|
||||
},
|
||||
manifests: []workapiv1.ManifestCondition{
|
||||
newManifest("", "v1", "secrets", "ns1", "n1"),
|
||||
newManifest("", "v1", "secrets", "ns2", "n2"),
|
||||
},
|
||||
validateActions: func(t *testing.T, actions []clienttesting.Action) {
|
||||
if len(actions) != 1 {
|
||||
t.Fatal(spew.Sdump(actions))
|
||||
}
|
||||
|
||||
work := actions[0].(clienttesting.UpdateAction).GetObject().(*workapiv1.ManifestWork)
|
||||
if len(work.Status.ResourceStatus.Manifests) != 2 {
|
||||
t.Fatal(spew.Sdump(work.Status.ResourceStatus.Manifests))
|
||||
}
|
||||
if !hasStatusCondition(work.Status.ResourceStatus.Manifests[0].Conditions, string(workapiv1.ManifestAvailable), metav1.ConditionTrue) {
|
||||
t.Fatal(spew.Sdump(work.Status.ResourceStatus.Manifests[0].Conditions))
|
||||
}
|
||||
if !hasStatusCondition(work.Status.ResourceStatus.Manifests[1].Conditions, string(workapiv1.ManifestAvailable), metav1.ConditionFalse) {
|
||||
t.Fatal(spew.Sdump(work.Status.ResourceStatus.Manifests[1].Conditions))
|
||||
}
|
||||
|
||||
if !hasStatusCondition(work.Status.Conditions, string(workapiv1.WorkAvailable), metav1.ConditionFalse) {
|
||||
t.Fatal(spew.Sdump(work.Status.Conditions))
|
||||
}
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "build status when one of resosurce has incompleted meta",
|
||||
existingResources: []runtime.Object{
|
||||
spoketesting.NewUnstructuredSecret("ns1", "n1", false, "ns1-n1"),
|
||||
},
|
||||
manifests: []workapiv1.ManifestCondition{
|
||||
newManifest("", "v1", "secrets", "ns1", "n1"),
|
||||
newManifest("", "", "", "", ""),
|
||||
},
|
||||
validateActions: func(t *testing.T, actions []clienttesting.Action) {
|
||||
if len(actions) != 1 {
|
||||
t.Fatal(spew.Sdump(actions))
|
||||
}
|
||||
|
||||
work := actions[0].(clienttesting.UpdateAction).GetObject().(*workapiv1.ManifestWork)
|
||||
if len(work.Status.ResourceStatus.Manifests) != 2 {
|
||||
t.Fatal(spew.Sdump(work.Status.ResourceStatus.Manifests))
|
||||
}
|
||||
if !hasStatusCondition(work.Status.ResourceStatus.Manifests[0].Conditions, string(workapiv1.ManifestAvailable), metav1.ConditionTrue) {
|
||||
t.Fatal(spew.Sdump(work.Status.ResourceStatus.Manifests[0].Conditions))
|
||||
}
|
||||
if !hasStatusCondition(work.Status.ResourceStatus.Manifests[1].Conditions, string(workapiv1.ManifestAvailable), metav1.ConditionUnknown) {
|
||||
t.Fatal(spew.Sdump(work.Status.ResourceStatus.Manifests[1].Conditions))
|
||||
}
|
||||
|
||||
if !hasStatusCondition(work.Status.Conditions, string(workapiv1.WorkAvailable), metav1.ConditionUnknown) {
|
||||
t.Fatal(spew.Sdump(work.Status.Conditions))
|
||||
}
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, c := range cases {
|
||||
t.Run(c.name, func(t *testing.T) {
|
||||
testingWork, _ := spoketesting.NewManifestWork(0)
|
||||
testingWork.Status = workapiv1.ManifestWorkStatus{
|
||||
Conditions: c.workConditions,
|
||||
ResourceStatus: workapiv1.ManifestResourceStatus{
|
||||
Manifests: c.manifests,
|
||||
},
|
||||
}
|
||||
|
||||
fakeClient := fakeworkclient.NewSimpleClientset(testingWork)
|
||||
fakeDynamicClient := fakedynamic.NewSimpleDynamicClient(runtime.NewScheme(), c.existingResources...)
|
||||
controller := AvailableStatusController{
|
||||
manifestWorkClient: fakeClient.WorkV1().ManifestWorks(testingWork.Namespace),
|
||||
spokeDynamicClient: fakeDynamicClient,
|
||||
}
|
||||
|
||||
err := controller.syncManifestWork(context.TODO(), testingWork)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
c.validateActions(t, fakeClient.Actions())
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func newManifest(group, version, resource, namespace, name string) workapiv1.ManifestCondition {
|
||||
return workapiv1.ManifestCondition{
|
||||
ResourceMeta: workapiv1.ManifestResourceMeta{
|
||||
Group: group,
|
||||
Version: version,
|
||||
Resource: resource,
|
||||
Namespace: namespace,
|
||||
Name: name,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func hasStatusCondition(conditions []workapiv1.StatusCondition, conditionType string, status metav1.ConditionStatus) bool {
|
||||
for _, condition := range conditions {
|
||||
if condition.Type != conditionType {
|
||||
continue
|
||||
}
|
||||
|
||||
return condition.Status == status
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
@@ -8,6 +8,7 @@ import (
|
||||
"github.com/open-cluster-management/work/pkg/spoke/controllers/appliedmanifestcontroller"
|
||||
"github.com/open-cluster-management/work/pkg/spoke/controllers/finalizercontroller"
|
||||
"github.com/open-cluster-management/work/pkg/spoke/controllers/manifestcontroller"
|
||||
"github.com/open-cluster-management/work/pkg/spoke/controllers/statuscontroller"
|
||||
|
||||
"github.com/openshift/library-go/pkg/controller/controllercmd"
|
||||
"github.com/spf13/cobra"
|
||||
@@ -132,6 +133,13 @@ func (o *WorkloadAgentOptions) RunWorkloadAgent(ctx context.Context, controllerC
|
||||
spokeWorkInformerFactory.Work().V1().AppliedManifestWorks(),
|
||||
hubhash,
|
||||
)
|
||||
availableStatusController := statuscontroller.NewAvailableStatusController(
|
||||
controllerContext.EventRecorder,
|
||||
spokeDynamicClient,
|
||||
hubWorkClient.WorkV1().ManifestWorks(o.SpokeClusterName),
|
||||
workInformerFactory.Work().V1().ManifestWorks(),
|
||||
workInformerFactory.Work().V1().ManifestWorks().Lister().ManifestWorks(o.SpokeClusterName),
|
||||
)
|
||||
|
||||
go workInformerFactory.Start(ctx.Done())
|
||||
go spokeWorkInformerFactory.Start(ctx.Done())
|
||||
@@ -140,6 +148,7 @@ func (o *WorkloadAgentOptions) RunWorkloadAgent(ctx context.Context, controllerC
|
||||
go appliedManifestWorkController.Run(ctx, 1)
|
||||
go manifestWorkController.Run(ctx, 1)
|
||||
go manifestWorkFinalizeController.Run(ctx, 1)
|
||||
go availableStatusController.Run(ctx, 1)
|
||||
<-ctx.Done()
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -177,12 +177,16 @@ var _ = ginkgo.Describe("Work agent", func() {
|
||||
|
||||
// check manifest status conditions
|
||||
expectedManifestStatuses := []metav1.ConditionStatus{metav1.ConditionTrue, metav1.ConditionTrue, metav1.ConditionTrue, metav1.ConditionTrue}
|
||||
if ok := haveManifestCondition(work.Status.ResourceStatus.Manifests, string(workapiv1.WorkApplied), expectedManifestStatuses); !ok {
|
||||
if ok := haveManifestCondition(work.Status.ResourceStatus.Manifests, string(workapiv1.ManifestApplied), expectedManifestStatuses); !ok {
|
||||
return false
|
||||
}
|
||||
if ok := haveManifestCondition(work.Status.ResourceStatus.Manifests, string(workapiv1.ManifestAvailable), expectedManifestStatuses); !ok {
|
||||
return false
|
||||
}
|
||||
|
||||
// check work status condition
|
||||
return haveCondition(work.Status.Conditions, string(workapiv1.WorkApplied), metav1.ConditionTrue)
|
||||
return haveCondition(work.Status.Conditions, string(workapiv1.WorkApplied), metav1.ConditionTrue) &&
|
||||
haveCondition(work.Status.Conditions, string(workapiv1.WorkAvailable), metav1.ConditionTrue)
|
||||
}, eventuallyTimeout, eventuallyInterval).Should(gomega.BeTrue())
|
||||
|
||||
// get the corresponding AppliedManifestWork
|
||||
@@ -251,12 +255,16 @@ var _ = ginkgo.Describe("Work agent", func() {
|
||||
|
||||
// check manifest status conditions
|
||||
expectedManifestStatuses := []metav1.ConditionStatus{metav1.ConditionTrue, metav1.ConditionTrue, metav1.ConditionTrue}
|
||||
if ok := haveManifestCondition(work.Status.ResourceStatus.Manifests, string(workapiv1.WorkApplied), expectedManifestStatuses); !ok {
|
||||
if ok := haveManifestCondition(work.Status.ResourceStatus.Manifests, string(workapiv1.ManifestApplied), expectedManifestStatuses); !ok {
|
||||
return false
|
||||
}
|
||||
if ok := haveManifestCondition(work.Status.ResourceStatus.Manifests, string(workapiv1.ManifestAvailable), expectedManifestStatuses); !ok {
|
||||
return false
|
||||
}
|
||||
|
||||
// check work status condition
|
||||
return haveCondition(work.Status.Conditions, string(workapiv1.WorkApplied), metav1.ConditionTrue)
|
||||
return haveCondition(work.Status.Conditions, string(workapiv1.WorkApplied), metav1.ConditionTrue) &&
|
||||
haveCondition(work.Status.Conditions, string(workapiv1.WorkAvailable), metav1.ConditionTrue)
|
||||
}, eventuallyTimeout, eventuallyInterval).Should(gomega.BeTrue())
|
||||
|
||||
// check if cm1 is deleted
|
||||
@@ -370,12 +378,16 @@ var _ = ginkgo.Describe("Work agent", func() {
|
||||
|
||||
// check manifest status conditions
|
||||
expectedManifestStatuses := []metav1.ConditionStatus{metav1.ConditionTrue, metav1.ConditionTrue, metav1.ConditionTrue}
|
||||
if ok := haveManifestCondition(work.Status.ResourceStatus.Manifests, string(workapiv1.WorkApplied), expectedManifestStatuses); !ok {
|
||||
if ok := haveManifestCondition(work.Status.ResourceStatus.Manifests, string(workapiv1.ManifestApplied), expectedManifestStatuses); !ok {
|
||||
return false
|
||||
}
|
||||
if ok := haveManifestCondition(work.Status.ResourceStatus.Manifests, string(workapiv1.ManifestAvailable), expectedManifestStatuses); !ok {
|
||||
return false
|
||||
}
|
||||
|
||||
// check work status condition
|
||||
return haveCondition(work.Status.Conditions, string(workapiv1.WorkApplied), metav1.ConditionTrue)
|
||||
return haveCondition(work.Status.Conditions, string(workapiv1.WorkApplied), metav1.ConditionTrue) &&
|
||||
haveCondition(work.Status.Conditions, string(workapiv1.WorkAvailable), metav1.ConditionTrue)
|
||||
}, eventuallyTimeout, eventuallyInterval).Should(gomega.BeTrue())
|
||||
})
|
||||
})
|
||||
|
||||
@@ -19,6 +19,7 @@ import (
|
||||
|
||||
workapiv1 "github.com/open-cluster-management/api/work/v1"
|
||||
"github.com/open-cluster-management/work/pkg/spoke"
|
||||
"github.com/open-cluster-management/work/pkg/spoke/controllers/statuscontroller"
|
||||
"github.com/open-cluster-management/work/pkg/spoke/resource"
|
||||
"github.com/open-cluster-management/work/test/integration/util"
|
||||
)
|
||||
@@ -52,6 +53,7 @@ var _ = ginkgo.Describe("ManifestWork", func() {
|
||||
gomega.Expect(err).ToNot(gomega.HaveOccurred())
|
||||
|
||||
resource.MapperRefreshInterval = 2 * time.Second
|
||||
statuscontroller.ControllerReSyncInterval = 3 * time.Second
|
||||
|
||||
var ctx context.Context
|
||||
ctx, cancel = context.WithCancel(context.Background())
|
||||
@@ -88,11 +90,15 @@ var _ = ginkgo.Describe("ManifestWork", func() {
|
||||
|
||||
util.AssertWorkCondition(work.Namespace, work.Name, hubWorkClient, string(workapiv1.WorkApplied), metav1.ConditionTrue,
|
||||
[]metav1.ConditionStatus{metav1.ConditionTrue}, eventuallyTimeout, eventuallyInterval)
|
||||
util.AssertWorkCondition(work.Namespace, work.Name, hubWorkClient, string(workapiv1.WorkAvailable), metav1.ConditionTrue,
|
||||
[]metav1.ConditionStatus{metav1.ConditionTrue}, eventuallyTimeout, eventuallyInterval)
|
||||
})
|
||||
|
||||
ginkgo.It("should update work and then apply it successfully", func() {
|
||||
util.AssertWorkCondition(work.Namespace, work.Name, hubWorkClient, string(workapiv1.WorkApplied), metav1.ConditionTrue,
|
||||
[]metav1.ConditionStatus{metav1.ConditionTrue}, eventuallyTimeout, eventuallyInterval)
|
||||
util.AssertWorkCondition(work.Namespace, work.Name, hubWorkClient, string(workapiv1.WorkAvailable), metav1.ConditionTrue,
|
||||
[]metav1.ConditionStatus{metav1.ConditionTrue}, eventuallyTimeout, eventuallyInterval)
|
||||
|
||||
newManifests := []workapiv1.Manifest{
|
||||
util.ToManifest(util.NewConfigmap(o.SpokeClusterName, "cm2", map[string]string{"x": "y"}, nil)),
|
||||
@@ -150,11 +156,15 @@ var _ = ginkgo.Describe("ManifestWork", func() {
|
||||
|
||||
util.AssertWorkCondition(work.Namespace, work.Name, hubWorkClient, string(workapiv1.WorkApplied), metav1.ConditionFalse,
|
||||
[]metav1.ConditionStatus{metav1.ConditionFalse, metav1.ConditionTrue, metav1.ConditionTrue}, eventuallyTimeout, eventuallyInterval)
|
||||
util.AssertWorkCondition(work.Namespace, work.Name, hubWorkClient, string(workapiv1.WorkAvailable), metav1.ConditionFalse,
|
||||
[]metav1.ConditionStatus{metav1.ConditionFalse, metav1.ConditionTrue, metav1.ConditionTrue}, eventuallyTimeout, eventuallyInterval)
|
||||
})
|
||||
|
||||
ginkgo.It("should update work and then apply it successfully", func() {
|
||||
util.AssertWorkCondition(work.Namespace, work.Name, hubWorkClient, string(workapiv1.WorkApplied), metav1.ConditionFalse,
|
||||
[]metav1.ConditionStatus{metav1.ConditionFalse, metav1.ConditionTrue, metav1.ConditionTrue}, eventuallyTimeout, eventuallyInterval)
|
||||
util.AssertWorkCondition(work.Namespace, work.Name, hubWorkClient, string(workapiv1.WorkAvailable), metav1.ConditionFalse,
|
||||
[]metav1.ConditionStatus{metav1.ConditionFalse, metav1.ConditionTrue, metav1.ConditionTrue}, eventuallyTimeout, eventuallyInterval)
|
||||
|
||||
newManifests := []workapiv1.Manifest{
|
||||
util.ToManifest(util.NewConfigmap(o.SpokeClusterName, "cm1", map[string]string{"a": "b"}, nil)),
|
||||
@@ -170,6 +180,10 @@ var _ = ginkgo.Describe("ManifestWork", func() {
|
||||
|
||||
util.AssertExistenceOfConfigMaps(newManifests, spokeKubeClient, eventuallyTimeout, eventuallyInterval)
|
||||
|
||||
// check if Available status is updated or not
|
||||
util.AssertWorkCondition(work.Namespace, work.Name, hubWorkClient, string(workapiv1.WorkAvailable), metav1.ConditionTrue,
|
||||
[]metav1.ConditionStatus{metav1.ConditionTrue, metav1.ConditionTrue, metav1.ConditionTrue}, eventuallyTimeout, eventuallyInterval)
|
||||
|
||||
// check if resource created by stale manifest is deleted once it is removed from applied resource list
|
||||
gomega.Eventually(func() bool {
|
||||
appliedManifestWork, err := spokeWorkClient.WorkV1().AppliedManifestWorks().Get(context.Background(), appliedManifestWorkName, metav1.GetOptions{})
|
||||
@@ -230,6 +244,8 @@ var _ = ginkgo.Describe("ManifestWork", func() {
|
||||
ginkgo.It("should create CRD and CR successfully", func() {
|
||||
util.AssertWorkCondition(work.Namespace, work.Name, hubWorkClient, string(workapiv1.WorkApplied), metav1.ConditionTrue,
|
||||
[]metav1.ConditionStatus{metav1.ConditionTrue, metav1.ConditionTrue}, eventuallyTimeout, eventuallyInterval)
|
||||
util.AssertWorkCondition(work.Namespace, work.Name, hubWorkClient, string(workapiv1.WorkAvailable), metav1.ConditionTrue,
|
||||
[]metav1.ConditionStatus{metav1.ConditionTrue, metav1.ConditionTrue}, eventuallyTimeout, eventuallyInterval)
|
||||
|
||||
var namespaces, names []string
|
||||
for _, obj := range objects {
|
||||
@@ -244,6 +260,8 @@ var _ = ginkgo.Describe("ManifestWork", func() {
|
||||
ginkgo.It("should delete CRD and CR successfully", func() {
|
||||
util.AssertWorkCondition(work.Namespace, work.Name, hubWorkClient, string(workapiv1.WorkApplied), metav1.ConditionTrue,
|
||||
[]metav1.ConditionStatus{metav1.ConditionTrue, metav1.ConditionTrue}, eventuallyTimeout, eventuallyInterval)
|
||||
util.AssertWorkCondition(work.Namespace, work.Name, hubWorkClient, string(workapiv1.WorkAvailable), metav1.ConditionTrue,
|
||||
[]metav1.ConditionStatus{metav1.ConditionTrue, metav1.ConditionTrue}, eventuallyTimeout, eventuallyInterval)
|
||||
|
||||
var namespaces, names []string
|
||||
for _, obj := range objects {
|
||||
@@ -312,6 +330,9 @@ var _ = ginkgo.Describe("ManifestWork", func() {
|
||||
util.AssertWorkCondition(work.Namespace, work.Name, hubWorkClient, string(workapiv1.WorkApplied), metav1.ConditionTrue,
|
||||
[]metav1.ConditionStatus{metav1.ConditionTrue, metav1.ConditionTrue, metav1.ConditionTrue, metav1.ConditionTrue},
|
||||
eventuallyTimeout, eventuallyInterval)
|
||||
util.AssertWorkCondition(work.Namespace, work.Name, hubWorkClient, string(workapiv1.WorkAvailable), metav1.ConditionTrue,
|
||||
[]metav1.ConditionStatus{metav1.ConditionTrue, metav1.ConditionTrue, metav1.ConditionTrue, metav1.ConditionTrue},
|
||||
eventuallyTimeout, eventuallyInterval)
|
||||
|
||||
var namespaces, names []string
|
||||
for _, obj := range objects {
|
||||
@@ -328,6 +349,9 @@ var _ = ginkgo.Describe("ManifestWork", func() {
|
||||
util.AssertWorkCondition(work.Namespace, work.Name, hubWorkClient, string(workapiv1.WorkApplied), metav1.ConditionTrue,
|
||||
[]metav1.ConditionStatus{metav1.ConditionTrue, metav1.ConditionTrue, metav1.ConditionTrue, metav1.ConditionTrue},
|
||||
eventuallyTimeout, eventuallyInterval)
|
||||
util.AssertWorkCondition(work.Namespace, work.Name, hubWorkClient, string(workapiv1.WorkAvailable), metav1.ConditionTrue,
|
||||
[]metav1.ConditionStatus{metav1.ConditionTrue, metav1.ConditionTrue, metav1.ConditionTrue, metav1.ConditionTrue},
|
||||
eventuallyTimeout, eventuallyInterval)
|
||||
|
||||
ginkgo.By("check existence of all maintained resources")
|
||||
var namespaces, names []string
|
||||
@@ -401,7 +425,7 @@ var _ = ginkgo.Describe("ManifestWork", func() {
|
||||
}
|
||||
|
||||
for i := range work.Status.ResourceStatus.Manifests {
|
||||
if len(work.Status.ResourceStatus.Manifests[i].Conditions) != 1 {
|
||||
if len(work.Status.ResourceStatus.Manifests[i].Conditions) != 2 {
|
||||
return false
|
||||
}
|
||||
}
|
||||
@@ -442,6 +466,8 @@ var _ = ginkgo.Describe("ManifestWork", func() {
|
||||
|
||||
util.AssertWorkCondition(work.Namespace, work.Name, hubWorkClient, string(workapiv1.WorkApplied), metav1.ConditionTrue,
|
||||
[]metav1.ConditionStatus{metav1.ConditionTrue, metav1.ConditionTrue, metav1.ConditionTrue}, eventuallyTimeout, eventuallyInterval)
|
||||
util.AssertWorkCondition(work.Namespace, work.Name, hubWorkClient, string(workapiv1.WorkAvailable), metav1.ConditionTrue,
|
||||
[]metav1.ConditionStatus{metav1.ConditionTrue, metav1.ConditionTrue, metav1.ConditionTrue}, eventuallyTimeout, eventuallyInterval)
|
||||
|
||||
work, err = hubWorkClient.WorkV1().ManifestWorks(o.SpokeClusterName).Get(context.Background(), work.Name, metav1.GetOptions{})
|
||||
gomega.Expect(err).ToNot(gomega.HaveOccurred())
|
||||
@@ -490,6 +516,8 @@ var _ = ginkgo.Describe("ManifestWork", func() {
|
||||
|
||||
util.AssertWorkCondition(work.Namespace, work.Name, hubWorkClient, string(workapiv1.WorkApplied), metav1.ConditionTrue,
|
||||
[]metav1.ConditionStatus{metav1.ConditionTrue, metav1.ConditionTrue, metav1.ConditionTrue}, eventuallyTimeout, eventuallyInterval)
|
||||
util.AssertWorkCondition(work.Namespace, work.Name, hubWorkClient, string(workapiv1.WorkAvailable), metav1.ConditionTrue,
|
||||
[]metav1.ConditionStatus{metav1.ConditionTrue, metav1.ConditionTrue, metav1.ConditionTrue}, eventuallyTimeout, eventuallyInterval)
|
||||
|
||||
err := hubWorkClient.WorkV1().ManifestWorks(work.Namespace).Delete(context.Background(), work.Name, metav1.DeleteOptions{})
|
||||
gomega.Expect(err).ToNot(gomega.HaveOccurred())
|
||||
|
||||
Reference in New Issue
Block a user