Merge pull request #39 from elgnay/available-status

Add controller to handle Available status
This commit is contained in:
OpenShift Merge Robot
2020-08-07 00:29:42 -04:00
committed by GitHub
8 changed files with 577 additions and 15 deletions

View File

@@ -177,12 +177,16 @@ var _ = ginkgo.Describe("Work agent", func() {
// check manifest status conditions
expectedManifestStatuses := []metav1.ConditionStatus{metav1.ConditionTrue, metav1.ConditionTrue, metav1.ConditionTrue, metav1.ConditionTrue}
if ok := haveManifestCondition(work.Status.ResourceStatus.Manifests, string(workapiv1.WorkApplied), expectedManifestStatuses); !ok {
if ok := haveManifestCondition(work.Status.ResourceStatus.Manifests, string(workapiv1.ManifestApplied), expectedManifestStatuses); !ok {
return false
}
if ok := haveManifestCondition(work.Status.ResourceStatus.Manifests, string(workapiv1.ManifestAvailable), expectedManifestStatuses); !ok {
return false
}
// check work status condition
return haveCondition(work.Status.Conditions, string(workapiv1.WorkApplied), metav1.ConditionTrue)
return haveCondition(work.Status.Conditions, string(workapiv1.WorkApplied), metav1.ConditionTrue) &&
haveCondition(work.Status.Conditions, string(workapiv1.WorkAvailable), metav1.ConditionTrue)
}, eventuallyTimeout, eventuallyInterval).Should(gomega.BeTrue())
// get the corresponding AppliedManifestWork
@@ -251,12 +255,16 @@ var _ = ginkgo.Describe("Work agent", func() {
// check manifest status conditions
expectedManifestStatuses := []metav1.ConditionStatus{metav1.ConditionTrue, metav1.ConditionTrue, metav1.ConditionTrue}
if ok := haveManifestCondition(work.Status.ResourceStatus.Manifests, string(workapiv1.WorkApplied), expectedManifestStatuses); !ok {
if ok := haveManifestCondition(work.Status.ResourceStatus.Manifests, string(workapiv1.ManifestApplied), expectedManifestStatuses); !ok {
return false
}
if ok := haveManifestCondition(work.Status.ResourceStatus.Manifests, string(workapiv1.ManifestAvailable), expectedManifestStatuses); !ok {
return false
}
// check work status condition
return haveCondition(work.Status.Conditions, string(workapiv1.WorkApplied), metav1.ConditionTrue)
return haveCondition(work.Status.Conditions, string(workapiv1.WorkApplied), metav1.ConditionTrue) &&
haveCondition(work.Status.Conditions, string(workapiv1.WorkAvailable), metav1.ConditionTrue)
}, eventuallyTimeout, eventuallyInterval).Should(gomega.BeTrue())
// check if cm1 is deleted
@@ -370,12 +378,16 @@ var _ = ginkgo.Describe("Work agent", func() {
// check manifest status conditions
expectedManifestStatuses := []metav1.ConditionStatus{metav1.ConditionTrue, metav1.ConditionTrue, metav1.ConditionTrue}
if ok := haveManifestCondition(work.Status.ResourceStatus.Manifests, string(workapiv1.WorkApplied), expectedManifestStatuses); !ok {
if ok := haveManifestCondition(work.Status.ResourceStatus.Manifests, string(workapiv1.ManifestApplied), expectedManifestStatuses); !ok {
return false
}
if ok := haveManifestCondition(work.Status.ResourceStatus.Manifests, string(workapiv1.ManifestAvailable), expectedManifestStatuses); !ok {
return false
}
// check work status condition
return haveCondition(work.Status.Conditions, string(workapiv1.WorkApplied), metav1.ConditionTrue)
return haveCondition(work.Status.Conditions, string(workapiv1.WorkApplied), metav1.ConditionTrue) &&
haveCondition(work.Status.Conditions, string(workapiv1.WorkAvailable), metav1.ConditionTrue)
}, eventuallyTimeout, eventuallyInterval).Should(gomega.BeTrue())
})
})

View File

@@ -19,6 +19,7 @@ import (
workapiv1 "github.com/open-cluster-management/api/work/v1"
"github.com/open-cluster-management/work/pkg/spoke"
"github.com/open-cluster-management/work/pkg/spoke/controllers/statuscontroller"
"github.com/open-cluster-management/work/pkg/spoke/resource"
"github.com/open-cluster-management/work/test/integration/util"
)
@@ -52,6 +53,7 @@ var _ = ginkgo.Describe("ManifestWork", func() {
gomega.Expect(err).ToNot(gomega.HaveOccurred())
resource.MapperRefreshInterval = 2 * time.Second
statuscontroller.ControllerReSyncInterval = 3 * time.Second
var ctx context.Context
ctx, cancel = context.WithCancel(context.Background())
@@ -88,11 +90,15 @@ var _ = ginkgo.Describe("ManifestWork", func() {
util.AssertWorkCondition(work.Namespace, work.Name, hubWorkClient, string(workapiv1.WorkApplied), metav1.ConditionTrue,
[]metav1.ConditionStatus{metav1.ConditionTrue}, eventuallyTimeout, eventuallyInterval)
util.AssertWorkCondition(work.Namespace, work.Name, hubWorkClient, string(workapiv1.WorkAvailable), metav1.ConditionTrue,
[]metav1.ConditionStatus{metav1.ConditionTrue}, eventuallyTimeout, eventuallyInterval)
})
ginkgo.It("should update work and then apply it successfully", func() {
util.AssertWorkCondition(work.Namespace, work.Name, hubWorkClient, string(workapiv1.WorkApplied), metav1.ConditionTrue,
[]metav1.ConditionStatus{metav1.ConditionTrue}, eventuallyTimeout, eventuallyInterval)
util.AssertWorkCondition(work.Namespace, work.Name, hubWorkClient, string(workapiv1.WorkAvailable), metav1.ConditionTrue,
[]metav1.ConditionStatus{metav1.ConditionTrue}, eventuallyTimeout, eventuallyInterval)
newManifests := []workapiv1.Manifest{
util.ToManifest(util.NewConfigmap(o.SpokeClusterName, "cm2", map[string]string{"x": "y"}, nil)),
@@ -150,11 +156,15 @@ var _ = ginkgo.Describe("ManifestWork", func() {
util.AssertWorkCondition(work.Namespace, work.Name, hubWorkClient, string(workapiv1.WorkApplied), metav1.ConditionFalse,
[]metav1.ConditionStatus{metav1.ConditionFalse, metav1.ConditionTrue, metav1.ConditionTrue}, eventuallyTimeout, eventuallyInterval)
util.AssertWorkCondition(work.Namespace, work.Name, hubWorkClient, string(workapiv1.WorkAvailable), metav1.ConditionFalse,
[]metav1.ConditionStatus{metav1.ConditionFalse, metav1.ConditionTrue, metav1.ConditionTrue}, eventuallyTimeout, eventuallyInterval)
})
ginkgo.It("should update work and then apply it successfully", func() {
util.AssertWorkCondition(work.Namespace, work.Name, hubWorkClient, string(workapiv1.WorkApplied), metav1.ConditionFalse,
[]metav1.ConditionStatus{metav1.ConditionFalse, metav1.ConditionTrue, metav1.ConditionTrue}, eventuallyTimeout, eventuallyInterval)
util.AssertWorkCondition(work.Namespace, work.Name, hubWorkClient, string(workapiv1.WorkAvailable), metav1.ConditionFalse,
[]metav1.ConditionStatus{metav1.ConditionFalse, metav1.ConditionTrue, metav1.ConditionTrue}, eventuallyTimeout, eventuallyInterval)
newManifests := []workapiv1.Manifest{
util.ToManifest(util.NewConfigmap(o.SpokeClusterName, "cm1", map[string]string{"a": "b"}, nil)),
@@ -170,6 +180,10 @@ var _ = ginkgo.Describe("ManifestWork", func() {
util.AssertExistenceOfConfigMaps(newManifests, spokeKubeClient, eventuallyTimeout, eventuallyInterval)
// check if Available status is updated or not
util.AssertWorkCondition(work.Namespace, work.Name, hubWorkClient, string(workapiv1.WorkAvailable), metav1.ConditionTrue,
[]metav1.ConditionStatus{metav1.ConditionTrue, metav1.ConditionTrue, metav1.ConditionTrue}, eventuallyTimeout, eventuallyInterval)
// check if resource created by stale manifest is deleted once it is removed from applied resource list
gomega.Eventually(func() bool {
appliedManifestWork, err := spokeWorkClient.WorkV1().AppliedManifestWorks().Get(context.Background(), appliedManifestWorkName, metav1.GetOptions{})
@@ -230,6 +244,8 @@ var _ = ginkgo.Describe("ManifestWork", func() {
ginkgo.It("should create CRD and CR successfully", func() {
util.AssertWorkCondition(work.Namespace, work.Name, hubWorkClient, string(workapiv1.WorkApplied), metav1.ConditionTrue,
[]metav1.ConditionStatus{metav1.ConditionTrue, metav1.ConditionTrue}, eventuallyTimeout, eventuallyInterval)
util.AssertWorkCondition(work.Namespace, work.Name, hubWorkClient, string(workapiv1.WorkAvailable), metav1.ConditionTrue,
[]metav1.ConditionStatus{metav1.ConditionTrue, metav1.ConditionTrue}, eventuallyTimeout, eventuallyInterval)
var namespaces, names []string
for _, obj := range objects {
@@ -244,6 +260,8 @@ var _ = ginkgo.Describe("ManifestWork", func() {
ginkgo.It("should delete CRD and CR successfully", func() {
util.AssertWorkCondition(work.Namespace, work.Name, hubWorkClient, string(workapiv1.WorkApplied), metav1.ConditionTrue,
[]metav1.ConditionStatus{metav1.ConditionTrue, metav1.ConditionTrue}, eventuallyTimeout, eventuallyInterval)
util.AssertWorkCondition(work.Namespace, work.Name, hubWorkClient, string(workapiv1.WorkAvailable), metav1.ConditionTrue,
[]metav1.ConditionStatus{metav1.ConditionTrue, metav1.ConditionTrue}, eventuallyTimeout, eventuallyInterval)
var namespaces, names []string
for _, obj := range objects {
@@ -312,6 +330,9 @@ var _ = ginkgo.Describe("ManifestWork", func() {
util.AssertWorkCondition(work.Namespace, work.Name, hubWorkClient, string(workapiv1.WorkApplied), metav1.ConditionTrue,
[]metav1.ConditionStatus{metav1.ConditionTrue, metav1.ConditionTrue, metav1.ConditionTrue, metav1.ConditionTrue},
eventuallyTimeout, eventuallyInterval)
util.AssertWorkCondition(work.Namespace, work.Name, hubWorkClient, string(workapiv1.WorkAvailable), metav1.ConditionTrue,
[]metav1.ConditionStatus{metav1.ConditionTrue, metav1.ConditionTrue, metav1.ConditionTrue, metav1.ConditionTrue},
eventuallyTimeout, eventuallyInterval)
var namespaces, names []string
for _, obj := range objects {
@@ -328,6 +349,9 @@ var _ = ginkgo.Describe("ManifestWork", func() {
util.AssertWorkCondition(work.Namespace, work.Name, hubWorkClient, string(workapiv1.WorkApplied), metav1.ConditionTrue,
[]metav1.ConditionStatus{metav1.ConditionTrue, metav1.ConditionTrue, metav1.ConditionTrue, metav1.ConditionTrue},
eventuallyTimeout, eventuallyInterval)
util.AssertWorkCondition(work.Namespace, work.Name, hubWorkClient, string(workapiv1.WorkAvailable), metav1.ConditionTrue,
[]metav1.ConditionStatus{metav1.ConditionTrue, metav1.ConditionTrue, metav1.ConditionTrue, metav1.ConditionTrue},
eventuallyTimeout, eventuallyInterval)
ginkgo.By("check existence of all maintained resources")
var namespaces, names []string
@@ -401,7 +425,7 @@ var _ = ginkgo.Describe("ManifestWork", func() {
}
for i := range work.Status.ResourceStatus.Manifests {
if len(work.Status.ResourceStatus.Manifests[i].Conditions) != 1 {
if len(work.Status.ResourceStatus.Manifests[i].Conditions) != 2 {
return false
}
}
@@ -442,6 +466,8 @@ var _ = ginkgo.Describe("ManifestWork", func() {
util.AssertWorkCondition(work.Namespace, work.Name, hubWorkClient, string(workapiv1.WorkApplied), metav1.ConditionTrue,
[]metav1.ConditionStatus{metav1.ConditionTrue, metav1.ConditionTrue, metav1.ConditionTrue}, eventuallyTimeout, eventuallyInterval)
util.AssertWorkCondition(work.Namespace, work.Name, hubWorkClient, string(workapiv1.WorkAvailable), metav1.ConditionTrue,
[]metav1.ConditionStatus{metav1.ConditionTrue, metav1.ConditionTrue, metav1.ConditionTrue}, eventuallyTimeout, eventuallyInterval)
work, err = hubWorkClient.WorkV1().ManifestWorks(o.SpokeClusterName).Get(context.Background(), work.Name, metav1.GetOptions{})
gomega.Expect(err).ToNot(gomega.HaveOccurred())
@@ -490,6 +516,8 @@ var _ = ginkgo.Describe("ManifestWork", func() {
util.AssertWorkCondition(work.Namespace, work.Name, hubWorkClient, string(workapiv1.WorkApplied), metav1.ConditionTrue,
[]metav1.ConditionStatus{metav1.ConditionTrue, metav1.ConditionTrue, metav1.ConditionTrue}, eventuallyTimeout, eventuallyInterval)
util.AssertWorkCondition(work.Namespace, work.Name, hubWorkClient, string(workapiv1.WorkAvailable), metav1.ConditionTrue,
[]metav1.ConditionStatus{metav1.ConditionTrue, metav1.ConditionTrue, metav1.ConditionTrue}, eventuallyTimeout, eventuallyInterval)
err := hubWorkClient.WorkV1().ManifestWorks(work.Namespace).Delete(context.Background(), work.Name, metav1.DeleteOptions{})
gomega.Expect(err).ToNot(gomega.HaveOccurred())