Requeue ssar check if only hubKubeConfigSecret is unauthorized (#1169) (#1164)

Signed-off-by: Jian Qiu <jqiu@redhat.com>
This commit is contained in:
Jian Qiu
2025-09-08 15:11:44 +08:00
committed by GitHub
parent 7d42f5f9f6
commit b4b42aa0b5
20 changed files with 618 additions and 601 deletions

2
go.mod
View File

@@ -41,7 +41,7 @@ require (
k8s.io/utils v0.0.0-20241210054802-24370beab758
open-cluster-management.io/addon-framework v1.0.1-0.20250811135502-4bae358d84c6
open-cluster-management.io/api v1.0.1-0.20250903073454-c6702adf44cc
open-cluster-management.io/sdk-go v1.0.1-0.20250901084824-d4c9f78c2e6a
open-cluster-management.io/sdk-go v1.0.1-0.20250905083121-3fc951c340cc
sigs.k8s.io/about-api v0.0.0-20250131010323-518069c31c03
sigs.k8s.io/cluster-inventory-api v0.0.0-20240730014211-ef0154379848
sigs.k8s.io/controller-runtime v0.21.0

4
go.sum
View File

@@ -559,8 +559,8 @@ open-cluster-management.io/addon-framework v1.0.1-0.20250811135502-4bae358d84c6
open-cluster-management.io/addon-framework v1.0.1-0.20250811135502-4bae358d84c6/go.mod h1:fOPWaRyo6upgHFskcL18Al1kI2Ua9HzrS8uothWEe84=
open-cluster-management.io/api v1.0.1-0.20250903073454-c6702adf44cc h1:U8O6RhHjp088oWuQsGx6pwwFpOFgWo1gl9qhgIGgDpk=
open-cluster-management.io/api v1.0.1-0.20250903073454-c6702adf44cc/go.mod h1:lEc5Wkc9ON5ym/qAtIqNgrE7NW7IEOCOC611iQMlnKM=
open-cluster-management.io/sdk-go v1.0.1-0.20250901084824-d4c9f78c2e6a h1:3AUAQPzzQXqxs4Dk7BwAIIjsWaQEps9JsXp88HEXq3E=
open-cluster-management.io/sdk-go v1.0.1-0.20250901084824-d4c9f78c2e6a/go.mod h1:JVQupKu0xVcuVP4IUJF7hjvrXK8plZiwGPZcdqngjXk=
open-cluster-management.io/sdk-go v1.0.1-0.20250905083121-3fc951c340cc h1:wi6w+Gi7MhGepfDFNN71JGc5jIsWWJOtcXJFQfHripM=
open-cluster-management.io/sdk-go v1.0.1-0.20250905083121-3fc951c340cc/go.mod h1:JVQupKu0xVcuVP4IUJF7hjvrXK8plZiwGPZcdqngjXk=
sigs.k8s.io/about-api v0.0.0-20250131010323-518069c31c03 h1:1ShFiMjGQOR/8jTBkmZrk1gORxnvMwm1nOy2/DbHg4U=
sigs.k8s.io/about-api v0.0.0-20250131010323-518069c31c03/go.mod h1:F1pT4mK53U6F16/zuaPSYpBaR7x5Kjym6aKJJC0/DHU=
sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.31.2 h1:jpcvIRr3GLoUoEKRkHKSmGjxb6lWwrBlJsXc+eUYQHM=

View File

@@ -147,7 +147,7 @@ func (m *unmanagedAppliedWorkController) evictAppliedManifestWork(ctx context.Co
if err != nil {
return err
}
m.recorder.Eventf("AppliedManifestWorkEvicted", appliedManifestWork.Name,
m.recorder.Eventf("AppliedManifestWorkEvicted",
"AppliedManifestWork %s evicted by agent %s after eviction grace period", appliedManifestWork.Name, m.agentID)
return nil
}

View File

@@ -68,15 +68,10 @@ test-cloudevents-work-mqtt-integration: ensure-kubebuilder-tools build-work-inte
.PHONY: test-cloudevents-work-mqtt-integration
# In the cloud events scenario, skip the following tests
# - executor_test.go, this feature is not supported yet by cloud events work client
# - unmanaged_appliedwork_test.go, this test mainly focus on switching the hub kube-apiserver
# - manifestworkreplicaset_test.go, this test needs to update the work status with the hub work client,
# cloud events work client does not support it. (TODO) may add e2e to for mwrs.
# - executor_test.go, this feature is not supported yet since the executor field is not in the manifestbundle.
test-cloudevents-work-grpc-integration: ensure-kubebuilder-tools build-work-integration
./work-integration.test -ginkgo.slow-spec-threshold=15s -ginkgo.v -ginkgo.fail-fast \
-ginkgo.skip-file manifestworkreplicaset_test.go \
-ginkgo.skip-file executor_test.go \
-ginkgo.skip-file unmanaged_appliedwork_test.go \
-test.driver=grpc \
-v=4 ${ARGS}
.PHONY: test-cloudevents-work-grpc-integration

View File

@@ -75,7 +75,7 @@ var _ = ginkgo.Describe("Registration using GRPC", ginkgo.Ordered, ginkgo.Label(
gomega.Expect(tempDir).ToNot(gomega.BeEmpty())
bootstrapGRPCConfigFile = path.Join(tempDir, "grpcconfig")
_, gRPCServerOptions, gRPCCAKeyFile, err = util.CreateGRPCConfigs(bootstrapGRPCConfigFile)
_, gRPCServerOptions, gRPCCAKeyFile, err = util.CreateGRPCConfigs(bootstrapGRPCConfigFile, "8090")
gomega.Expect(err).NotTo(gomega.HaveOccurred())
var grpcServerCtx context.Context

View File

@@ -97,7 +97,7 @@ func (h *GRPCServerRegistrationHook) Run(ctx context.Context) {
go h.AddOnInformers.Start(ctx.Done())
}
func CreateGRPCConfigs(configFileName string) (string, *sdkgrpc.GRPCServerOptions, string, error) {
func CreateGRPCConfigs(configFileName string, port string) (string, *sdkgrpc.GRPCServerOptions, string, error) {
serverCertPairs, err := util.NewServerCertPairs()
if err != nil {
return "", nil, "", err
@@ -133,6 +133,7 @@ func CreateGRPCConfigs(configFileName string) (string, *sdkgrpc.GRPCServerOption
serverOptions.ClientCAFile = caFile
serverOptions.TLSCertFile = serverCertFile
serverOptions.TLSKeyFile = serverKeyFile
serverOptions.ServerBindPort = port
config := &grpc.GRPCConfig{
CertConfig: cert.CertConfig{

View File

@@ -285,7 +285,7 @@ func NewDeployment(namespace, name, sa string) (u *unstructured.Unstructured, gv
return u, gvr, nil
}
func NewDaesonSet(namespace, name string) (u *unstructured.Unstructured, gvr schema.GroupVersionResource, err error) {
func NewDaemonSet(namespace, name string) (u *unstructured.Unstructured, gvr schema.GroupVersionResource, err error) {
u, err = loadResourceFromJSON(daemonsetJson)
if err != nil {
return u, gvr, err

View File

@@ -34,11 +34,6 @@ func NewWorkPatch(old, new *workapiv1.ManifestWork) ([]byte, error) {
return patchBytes, nil
}
func AppliedManifestWorkName(sourceDriver, hubHash string, work *workapiv1.ManifestWork) string {
if sourceDriver != KubeDriver {
// if the source is not kube, the uid will be used as the manifestwork name on the agent side
return fmt.Sprintf("%s-%s", hubHash, work.UID)
}
func AppliedManifestWorkName(hubHash string, work *workapiv1.ManifestWork) string {
return fmt.Sprintf("%s-%s", hubHash, work.Name)
}

View File

@@ -3,7 +3,6 @@ package work
import (
"context"
"fmt"
"time"
"github.com/onsi/ginkgo/v2"
"github.com/onsi/gomega"
@@ -15,15 +14,12 @@ import (
workapiv1 "open-cluster-management.io/api/work/v1"
commonoptions "open-cluster-management.io/ocm/pkg/common/options"
"open-cluster-management.io/ocm/pkg/work/spoke"
"open-cluster-management.io/ocm/test/integration/util"
)
var _ = ginkgo.Describe("ManifestWork Condition Rules", func() {
var o *spoke.WorkloadAgentOptions
var commOptions *commonoptions.AgentOptions
var cancel context.CancelFunc
var clusterName string
var workName string
var work *workapiv1.ManifestWork
@@ -33,22 +29,10 @@ var _ = ginkgo.Describe("ManifestWork Condition Rules", func() {
ginkgo.BeforeEach(func() {
workName = fmt.Sprintf("condition-rules-work-%s", rand.String(5))
clusterName := rand.String(5)
o = spoke.NewWorkloadAgentOptions()
o.StatusSyncInterval = 3 * time.Second
o.WorkloadSourceDriver = sourceDriver
o.WorkloadSourceConfig = sourceConfigFileName
if sourceDriver != util.KubeDriver {
o.CloudEventsClientID = fmt.Sprintf("%s-work-agent", clusterName)
o.CloudEventsClientCodecs = []string{"manifestbundle"}
}
commOptions = commonoptions.NewAgentOptions()
commOptions.SpokeClusterName = clusterName
clusterName = rand.String(5)
ns := &corev1.Namespace{}
ns.Name = commOptions.SpokeClusterName
ns.Name = clusterName
_, err = spokeKubeClient.CoreV1().Namespaces().Create(context.Background(), ns, metav1.CreateOptions{})
gomega.Expect(err).ToNot(gomega.HaveOccurred())
@@ -57,24 +41,24 @@ var _ = ginkgo.Describe("ManifestWork Condition Rules", func() {
})
ginkgo.JustBeforeEach(func() {
work = util.NewManifestWork(commOptions.SpokeClusterName, workName, manifests)
work = util.NewManifestWork(clusterName, workName, manifests)
gomega.Expect(err).ToNot(gomega.HaveOccurred())
})
ginkgo.AfterEach(func() {
err := spokeKubeClient.CoreV1().Namespaces().Delete(context.Background(), commOptions.SpokeClusterName, metav1.DeleteOptions{})
err := spokeKubeClient.CoreV1().Namespaces().Delete(context.Background(), clusterName, metav1.DeleteOptions{})
gomega.Expect(err).ToNot(gomega.HaveOccurred())
})
ginkgo.Context("Job Condition Rules", func() {
ginkgo.BeforeEach(func() {
u, _, err := util.NewJob(commOptions.SpokeClusterName, "job1", "sa")
u, _, err := util.NewJob(clusterName, "job1", "sa")
gomega.Expect(err).ToNot(gomega.HaveOccurred())
manifests = append(manifests, util.ToManifest(u))
var ctx context.Context
ctx, cancel = context.WithCancel(context.Background())
go startWorkAgent(ctx, o, commOptions)
go startWorkAgent(ctx, clusterName)
})
ginkgo.AfterEach(func() {
@@ -89,7 +73,7 @@ var _ = ginkgo.Describe("ManifestWork Condition Rules", func() {
ResourceIdentifier: workapiv1.ResourceIdentifier{
Group: "batch",
Resource: "jobs",
Namespace: commOptions.SpokeClusterName,
Namespace: clusterName,
Name: "job1",
},
UpdateStrategy: &workapiv1.UpdateStrategy{
@@ -104,7 +88,7 @@ var _ = ginkgo.Describe("ManifestWork Condition Rules", func() {
},
}
work, err = hubWorkClient.WorkV1().ManifestWorks(commOptions.SpokeClusterName).Create(context.Background(), work, metav1.CreateOptions{})
work, err = hubWorkClient.WorkV1().ManifestWorks(clusterName).Create(context.Background(), work, metav1.CreateOptions{})
gomega.Expect(err).ToNot(gomega.HaveOccurred())
util.AssertWorkCondition(work.Namespace, work.Name, hubWorkClient, workapiv1.WorkApplied, metav1.ConditionTrue,
@@ -114,7 +98,7 @@ var _ = ginkgo.Describe("ManifestWork Condition Rules", func() {
// Update Job status on spoke
gomega.Eventually(func() error {
job, err := spokeKubeClient.BatchV1().Jobs(commOptions.SpokeClusterName).Get(context.Background(), "job1", metav1.GetOptions{})
job, err := spokeKubeClient.BatchV1().Jobs(clusterName).Get(context.Background(), "job1", metav1.GetOptions{})
if err != nil {
return err
}
@@ -122,13 +106,13 @@ var _ = ginkgo.Describe("ManifestWork Condition Rules", func() {
job.Status.Active = 1
job.Status.Ready = ptr.To(int32(1))
_, err = spokeKubeClient.BatchV1().Jobs(commOptions.SpokeClusterName).UpdateStatus(context.Background(), job, metav1.UpdateOptions{})
_, err = spokeKubeClient.BatchV1().Jobs(clusterName).UpdateStatus(context.Background(), job, metav1.UpdateOptions{})
return err
}, eventuallyTimeout, eventuallyInterval).ShouldNot(gomega.HaveOccurred())
// Check completed condition
gomega.Eventually(func() error {
work, err = hubWorkClient.WorkV1().ManifestWorks(commOptions.SpokeClusterName).Get(context.Background(), work.Name, metav1.GetOptions{})
work, err = hubWorkClient.WorkV1().ManifestWorks(clusterName).Get(context.Background(), work.Name, metav1.GetOptions{})
if err != nil {
return err
}
@@ -146,7 +130,7 @@ var _ = ginkgo.Describe("ManifestWork Condition Rules", func() {
// Update complete condition on job
gomega.Eventually(func() error {
job, err := spokeKubeClient.BatchV1().Jobs(commOptions.SpokeClusterName).Get(context.Background(), "job1", metav1.GetOptions{})
job, err := spokeKubeClient.BatchV1().Jobs(clusterName).Get(context.Background(), "job1", metav1.GetOptions{})
if err != nil {
return err
}
@@ -163,13 +147,13 @@ var _ = ginkgo.Describe("ManifestWork Condition Rules", func() {
},
}
_, err = spokeKubeClient.BatchV1().Jobs(commOptions.SpokeClusterName).UpdateStatus(context.Background(), job, metav1.UpdateOptions{})
_, err = spokeKubeClient.BatchV1().Jobs(clusterName).UpdateStatus(context.Background(), job, metav1.UpdateOptions{})
return err
}, eventuallyTimeout, eventuallyInterval).ShouldNot(gomega.HaveOccurred())
// Check if the condition is updated on work api
gomega.Eventually(func() error {
work, err = hubWorkClient.WorkV1().ManifestWorks(commOptions.SpokeClusterName).Get(context.Background(), work.Name, metav1.GetOptions{})
work, err = hubWorkClient.WorkV1().ManifestWorks(clusterName).Get(context.Background(), work.Name, metav1.GetOptions{})
if err != nil {
return err
}
@@ -211,7 +195,7 @@ var _ = ginkgo.Describe("ManifestWork Condition Rules", func() {
},
}
work, err = hubWorkClient.WorkV1().ManifestWorks(commOptions.SpokeClusterName).Create(context.Background(), work, metav1.CreateOptions{})
work, err = hubWorkClient.WorkV1().ManifestWorks(clusterName).Create(context.Background(), work, metav1.CreateOptions{})
gomega.Expect(err).ToNot(gomega.HaveOccurred())
util.AssertWorkCondition(work.Namespace, work.Name, hubWorkClient, workapiv1.WorkApplied, metav1.ConditionTrue,
@@ -220,7 +204,7 @@ var _ = ginkgo.Describe("ManifestWork Condition Rules", func() {
[]metav1.ConditionStatus{metav1.ConditionTrue}, eventuallyTimeout, eventuallyInterval)
gomega.Eventually(func() error {
job, err := spokeKubeClient.BatchV1().Jobs(commOptions.SpokeClusterName).Get(context.Background(), "job1", metav1.GetOptions{})
job, err := spokeKubeClient.BatchV1().Jobs(clusterName).Get(context.Background(), "job1", metav1.GetOptions{})
if err != nil {
return err
}
@@ -228,13 +212,13 @@ var _ = ginkgo.Describe("ManifestWork Condition Rules", func() {
job.Status.Active = 3
job.Status.Conditions = []batchv1.JobCondition{}
_, err = spokeKubeClient.BatchV1().Jobs(commOptions.SpokeClusterName).UpdateStatus(context.Background(), job, metav1.UpdateOptions{})
_, err = spokeKubeClient.BatchV1().Jobs(clusterName).UpdateStatus(context.Background(), job, metav1.UpdateOptions{})
return err
}, eventuallyTimeout, eventuallyInterval).ShouldNot(gomega.HaveOccurred())
// Check if we get condition on work api
gomega.Eventually(func() error {
work, err = hubWorkClient.WorkV1().ManifestWorks(commOptions.SpokeClusterName).Get(context.Background(), work.Name, metav1.GetOptions{})
work, err = hubWorkClient.WorkV1().ManifestWorks(clusterName).Get(context.Background(), work.Name, metav1.GetOptions{})
if err != nil {
return err
}
@@ -253,20 +237,20 @@ var _ = ginkgo.Describe("ManifestWork Condition Rules", func() {
// Set active to 1
gomega.Eventually(func() error {
job, err := spokeKubeClient.BatchV1().Jobs(commOptions.SpokeClusterName).Get(context.Background(), "job1", metav1.GetOptions{})
job, err := spokeKubeClient.BatchV1().Jobs(clusterName).Get(context.Background(), "job1", metav1.GetOptions{})
if err != nil {
return err
}
job.Status.Active = 1
_, err = spokeKubeClient.BatchV1().Jobs(commOptions.SpokeClusterName).UpdateStatus(context.Background(), job, metav1.UpdateOptions{})
_, err = spokeKubeClient.BatchV1().Jobs(clusterName).UpdateStatus(context.Background(), job, metav1.UpdateOptions{})
return err
}, eventuallyTimeout, eventuallyInterval).ShouldNot(gomega.HaveOccurred())
// Check if condition is updated on work api
gomega.Eventually(func() error {
work, err = hubWorkClient.WorkV1().ManifestWorks(commOptions.SpokeClusterName).Get(context.Background(), work.Name, metav1.GetOptions{})
work, err = hubWorkClient.WorkV1().ManifestWorks(clusterName).Get(context.Background(), work.Name, metav1.GetOptions{})
if err != nil {
return err
}
@@ -290,7 +274,7 @@ var _ = ginkgo.Describe("ManifestWork Condition Rules", func() {
ResourceIdentifier: workapiv1.ResourceIdentifier{
Group: "batch",
Resource: "jobs",
Namespace: commOptions.SpokeClusterName,
Namespace: clusterName,
Name: "job1",
},
UpdateStrategy: &workapiv1.UpdateStrategy{
@@ -305,7 +289,7 @@ var _ = ginkgo.Describe("ManifestWork Condition Rules", func() {
},
}
work, err = hubWorkClient.WorkV1().ManifestWorks(commOptions.SpokeClusterName).Create(context.Background(), work, metav1.CreateOptions{})
work, err = hubWorkClient.WorkV1().ManifestWorks(clusterName).Create(context.Background(), work, metav1.CreateOptions{})
gomega.Expect(err).ToNot(gomega.HaveOccurred())
util.AssertWorkCondition(work.Namespace, work.Name, hubWorkClient, workapiv1.WorkApplied, metav1.ConditionTrue,
@@ -315,7 +299,7 @@ var _ = ginkgo.Describe("ManifestWork Condition Rules", func() {
// Update Job status on spoke
gomega.Eventually(func() error {
job, err := spokeKubeClient.BatchV1().Jobs(commOptions.SpokeClusterName).Get(context.Background(), "job1", metav1.GetOptions{})
job, err := spokeKubeClient.BatchV1().Jobs(clusterName).Get(context.Background(), "job1", metav1.GetOptions{})
if err != nil {
return err
}
@@ -324,13 +308,13 @@ var _ = ginkgo.Describe("ManifestWork Condition Rules", func() {
job.Status.Ready = ptr.To(int32(1))
job.Status.Conditions = []batchv1.JobCondition{}
_, err = spokeKubeClient.BatchV1().Jobs(commOptions.SpokeClusterName).UpdateStatus(context.Background(), job, metav1.UpdateOptions{})
_, err = spokeKubeClient.BatchV1().Jobs(clusterName).UpdateStatus(context.Background(), job, metav1.UpdateOptions{})
return err
}, eventuallyTimeout, eventuallyInterval).ShouldNot(gomega.HaveOccurred())
// Check if we get condition on work api
gomega.Eventually(func() error {
work, err = hubWorkClient.WorkV1().ManifestWorks(commOptions.SpokeClusterName).Get(context.Background(), work.Name, metav1.GetOptions{})
work, err = hubWorkClient.WorkV1().ManifestWorks(clusterName).Get(context.Background(), work.Name, metav1.GetOptions{})
if err != nil {
return err
}
@@ -349,16 +333,16 @@ var _ = ginkgo.Describe("ManifestWork Condition Rules", func() {
ginkgo.Context("Jobs condition rules with wildcard", func() {
ginkgo.BeforeEach(func() {
job1, _, err := util.NewJob(commOptions.SpokeClusterName, "job1", "sa")
job1, _, err := util.NewJob(clusterName, "job1", "sa")
gomega.Expect(err).ToNot(gomega.HaveOccurred())
manifests = append(manifests, util.ToManifest(job1))
job2, _, err := util.NewJob(commOptions.SpokeClusterName, "job2", "sa")
job2, _, err := util.NewJob(clusterName, "job2", "sa")
gomega.Expect(err).ToNot(gomega.HaveOccurred())
manifests = append(manifests, util.ToManifest(job2))
var ctx context.Context
ctx, cancel = context.WithCancel(context.Background())
go startWorkAgent(ctx, o, commOptions)
go startWorkAgent(ctx, clusterName)
})
ginkgo.AfterEach(func() {
@@ -388,7 +372,7 @@ var _ = ginkgo.Describe("ManifestWork Condition Rules", func() {
},
}
work, err = hubWorkClient.WorkV1().ManifestWorks(commOptions.SpokeClusterName).Create(context.Background(), work, metav1.CreateOptions{})
work, err = hubWorkClient.WorkV1().ManifestWorks(clusterName).Create(context.Background(), work, metav1.CreateOptions{})
gomega.Expect(err).ToNot(gomega.HaveOccurred())
util.AssertWorkCondition(work.Namespace, work.Name, hubWorkClient, workapiv1.WorkApplied, metav1.ConditionTrue,
@@ -399,7 +383,7 @@ var _ = ginkgo.Describe("ManifestWork Condition Rules", func() {
// Update Job status on spoke
gomega.Eventually(func() error {
// Set first job as active
job, err := spokeKubeClient.BatchV1().Jobs(commOptions.SpokeClusterName).Get(context.Background(), "job1", metav1.GetOptions{})
job, err := spokeKubeClient.BatchV1().Jobs(clusterName).Get(context.Background(), "job1", metav1.GetOptions{})
if err != nil {
return err
}
@@ -407,13 +391,13 @@ var _ = ginkgo.Describe("ManifestWork Condition Rules", func() {
job.Status.Active = 1
job.Status.Ready = ptr.To(int32(1))
_, err = spokeKubeClient.BatchV1().Jobs(commOptions.SpokeClusterName).UpdateStatus(context.Background(), job, metav1.UpdateOptions{})
_, err = spokeKubeClient.BatchV1().Jobs(clusterName).UpdateStatus(context.Background(), job, metav1.UpdateOptions{})
if err != nil {
return err
}
// Set second job as complete
job, err = spokeKubeClient.BatchV1().Jobs(commOptions.SpokeClusterName).Get(context.Background(), "job2", metav1.GetOptions{})
job, err = spokeKubeClient.BatchV1().Jobs(clusterName).Get(context.Background(), "job2", metav1.GetOptions{})
if err != nil {
return err
}
@@ -430,13 +414,13 @@ var _ = ginkgo.Describe("ManifestWork Condition Rules", func() {
},
}
_, err = spokeKubeClient.BatchV1().Jobs(commOptions.SpokeClusterName).UpdateStatus(context.Background(), job, metav1.UpdateOptions{})
_, err = spokeKubeClient.BatchV1().Jobs(clusterName).UpdateStatus(context.Background(), job, metav1.UpdateOptions{})
return err
}, eventuallyTimeout, eventuallyInterval).ShouldNot(gomega.HaveOccurred())
// Check if we get conditions of jobs on work api
gomega.Eventually(func() error {
work, err = hubWorkClient.WorkV1().ManifestWorks(commOptions.SpokeClusterName).Get(context.Background(), work.Name, metav1.GetOptions{})
work, err = hubWorkClient.WorkV1().ManifestWorks(clusterName).Get(context.Background(), work.Name, metav1.GetOptions{})
if err != nil {
return err
}
@@ -504,7 +488,7 @@ var _ = ginkgo.Describe("ManifestWork Condition Rules", func() {
},
}
work, err = hubWorkClient.WorkV1().ManifestWorks(commOptions.SpokeClusterName).Create(context.Background(),
work, err = hubWorkClient.WorkV1().ManifestWorks(clusterName).Create(context.Background(),
work, metav1.CreateOptions{})
gomega.Expect(err).ToNot(gomega.HaveOccurred())
@@ -516,7 +500,7 @@ var _ = ginkgo.Describe("ManifestWork Condition Rules", func() {
// Update Job status on spoke
gomega.Eventually(func() error {
// Set first job as active
job, err := spokeKubeClient.BatchV1().Jobs(commOptions.SpokeClusterName).Get(context.Background(), "job1", metav1.GetOptions{})
job, err := spokeKubeClient.BatchV1().Jobs(clusterName).Get(context.Background(), "job1", metav1.GetOptions{})
if err != nil {
return err
}
@@ -524,13 +508,13 @@ var _ = ginkgo.Describe("ManifestWork Condition Rules", func() {
job.Status.Active = 1
job.Status.Ready = ptr.To(int32(1))
_, err = spokeKubeClient.BatchV1().Jobs(commOptions.SpokeClusterName).UpdateStatus(context.Background(), job, metav1.UpdateOptions{})
_, err = spokeKubeClient.BatchV1().Jobs(clusterName).UpdateStatus(context.Background(), job, metav1.UpdateOptions{})
if err != nil {
return err
}
// Set second job as complete
job, err = spokeKubeClient.BatchV1().Jobs(commOptions.SpokeClusterName).Get(context.Background(), "job2", metav1.GetOptions{})
job, err = spokeKubeClient.BatchV1().Jobs(clusterName).Get(context.Background(), "job2", metav1.GetOptions{})
if err != nil {
return err
}
@@ -547,13 +531,13 @@ var _ = ginkgo.Describe("ManifestWork Condition Rules", func() {
},
}
_, err = spokeKubeClient.BatchV1().Jobs(commOptions.SpokeClusterName).UpdateStatus(context.Background(), job, metav1.UpdateOptions{})
_, err = spokeKubeClient.BatchV1().Jobs(clusterName).UpdateStatus(context.Background(), job, metav1.UpdateOptions{})
return err
}, eventuallyTimeout, eventuallyInterval).ShouldNot(gomega.HaveOccurred())
// Check if we get conditions of jobs on work api
gomega.Eventually(func() error {
work, err = hubWorkClient.WorkV1().ManifestWorks(commOptions.SpokeClusterName).Get(context.Background(), work.Name, metav1.GetOptions{})
work, err = hubWorkClient.WorkV1().ManifestWorks(clusterName).Get(context.Background(), work.Name, metav1.GetOptions{})
if err != nil {
return err
}

View File

@@ -3,7 +3,6 @@ package work
import (
"context"
"fmt"
"time"
"github.com/onsi/ginkgo/v2"
"github.com/onsi/gomega"
@@ -15,17 +14,14 @@ import (
workapiv1 "open-cluster-management.io/api/work/v1"
commonoptions "open-cluster-management.io/ocm/pkg/common/options"
"open-cluster-management.io/ocm/pkg/work/spoke"
"open-cluster-management.io/ocm/test/integration/util"
)
var _ = ginkgo.Describe("ManifestWork Delete Option", func() {
var o *spoke.WorkloadAgentOptions
var commOptions *commonoptions.AgentOptions
var cancel context.CancelFunc
var workName string
var clusterName string
var work *workapiv1.ManifestWork
var appliedManifestWorkName string
var manifests []workapiv1.Manifest
@@ -33,44 +29,31 @@ var _ = ginkgo.Describe("ManifestWork Delete Option", func() {
var err error
ginkgo.BeforeEach(func() {
clusterName := rand.String(5)
clusterName = rand.String(5)
workName = fmt.Sprintf("work-delete-option-%s", rand.String(5))
o = spoke.NewWorkloadAgentOptions()
o.StatusSyncInterval = 3 * time.Second
o.WorkloadSourceDriver = sourceDriver
o.WorkloadSourceConfig = sourceConfigFileName
if sourceDriver != util.KubeDriver {
o.CloudEventsClientID = fmt.Sprintf("%s-work-agent", clusterName)
o.CloudEventsClientCodecs = []string{"manifestbundle"}
}
commOptions = commonoptions.NewAgentOptions()
commOptions.SpokeClusterName = clusterName
ns := &corev1.Namespace{}
ns.Name = commOptions.SpokeClusterName
ns.Name = clusterName
_, err := spokeKubeClient.CoreV1().Namespaces().Create(context.Background(), ns, metav1.CreateOptions{})
gomega.Expect(err).ToNot(gomega.HaveOccurred())
var ctx context.Context
ctx, cancel = context.WithCancel(context.Background())
go startWorkAgent(ctx, o, commOptions)
go startWorkAgent(ctx, clusterName)
// reset manifests
manifests = nil
})
ginkgo.JustBeforeEach(func() {
work = util.NewManifestWork(commOptions.SpokeClusterName, workName, manifests)
gomega.Expect(err).ToNot(gomega.HaveOccurred())
work = util.NewManifestWork(clusterName, workName, manifests)
})
ginkgo.AfterEach(func() {
if cancel != nil {
cancel()
}
err := spokeKubeClient.CoreV1().Namespaces().Delete(context.Background(), commOptions.SpokeClusterName, metav1.DeleteOptions{})
err := spokeKubeClient.CoreV1().Namespaces().Delete(context.Background(), clusterName, metav1.DeleteOptions{})
gomega.Expect(err).ToNot(gomega.HaveOccurred())
})
@@ -79,40 +62,40 @@ var _ = ginkgo.Describe("ManifestWork Delete Option", func() {
var anotherAppliedManifestWorkName string
ginkgo.BeforeEach(func() {
manifests = []workapiv1.Manifest{
util.ToManifest(util.NewConfigmap(commOptions.SpokeClusterName, cm1, map[string]string{"a": "b"}, []string{})),
util.ToManifest(util.NewConfigmap(commOptions.SpokeClusterName, cm2, map[string]string{"c": "d"}, []string{})),
util.ToManifest(util.NewConfigmap(clusterName, cm1, map[string]string{"a": "b"}, []string{})),
util.ToManifest(util.NewConfigmap(clusterName, cm2, map[string]string{"c": "d"}, []string{})),
}
// Create another manifestworks with one shared resource.
anotherWork = util.NewManifestWork(commOptions.SpokeClusterName, "sharing-resource-work", []workapiv1.Manifest{manifests[0]})
anotherWork = util.NewManifestWork(clusterName, "sharing-resource-work", []workapiv1.Manifest{manifests[0]})
})
ginkgo.JustBeforeEach(func() {
work, err = hubWorkClient.WorkV1().ManifestWorks(commOptions.SpokeClusterName).Create(context.Background(), work, metav1.CreateOptions{})
work, err = hubWorkClient.WorkV1().ManifestWorks(clusterName).Create(context.Background(), work, metav1.CreateOptions{})
gomega.Expect(err).ToNot(gomega.HaveOccurred())
appliedManifestWorkName = util.AppliedManifestWorkName(sourceDriver, hubHash, work)
appliedManifestWorkName = util.AppliedManifestWorkName(hubHash, work)
util.AssertWorkCondition(work.Namespace, work.Name, hubWorkClient, workapiv1.WorkApplied, metav1.ConditionTrue,
[]metav1.ConditionStatus{metav1.ConditionTrue, metav1.ConditionTrue}, eventuallyTimeout, eventuallyInterval)
util.AssertWorkCondition(work.Namespace, work.Name, hubWorkClient, workapiv1.WorkAvailable, metav1.ConditionTrue,
[]metav1.ConditionStatus{metav1.ConditionTrue, metav1.ConditionTrue}, eventuallyTimeout, eventuallyInterval)
anotherWork, err = hubWorkClient.WorkV1().ManifestWorks(commOptions.SpokeClusterName).Create(context.Background(), anotherWork, metav1.CreateOptions{})
anotherWork, err = hubWorkClient.WorkV1().ManifestWorks(clusterName).Create(context.Background(), anotherWork, metav1.CreateOptions{})
gomega.Expect(err).ToNot(gomega.HaveOccurred())
util.AssertWorkCondition(anotherWork.Namespace, anotherWork.Name, hubWorkClient, workapiv1.WorkApplied, metav1.ConditionTrue,
[]metav1.ConditionStatus{metav1.ConditionTrue}, eventuallyTimeout, eventuallyInterval)
util.AssertWorkCondition(anotherWork.Namespace, anotherWork.Name, hubWorkClient, workapiv1.WorkAvailable, metav1.ConditionTrue,
[]metav1.ConditionStatus{metav1.ConditionTrue}, eventuallyTimeout, eventuallyInterval)
anotherAppliedManifestWorkName = util.AppliedManifestWorkName(sourceDriver, hubHash, anotherWork)
anotherAppliedManifestWorkName = util.AppliedManifestWorkName(hubHash, anotherWork)
})
ginkgo.It("shared resource between the manifestwork should be kept when one manifestwork is deleted", func() {
// ensure configmap exists and get its uid
util.AssertExistenceOfConfigMaps(manifests, spokeKubeClient, eventuallyTimeout, eventuallyInterval)
curentConfigMap, err := spokeKubeClient.CoreV1().ConfigMaps(commOptions.SpokeClusterName).Get(context.Background(), cm1, metav1.GetOptions{})
currentConfigMap, err := spokeKubeClient.CoreV1().ConfigMaps(clusterName).Get(context.Background(), cm1, metav1.GetOptions{})
gomega.Expect(err).ToNot(gomega.HaveOccurred())
currentUID := curentConfigMap.UID
currentUID := currentConfigMap.UID
// Ensure that uid recorded in the appliedmanifestwork and anotherappliedmanifestwork is correct.
gomega.Eventually(func() error {
@@ -147,7 +130,7 @@ var _ = ginkgo.Describe("ManifestWork Delete Option", func() {
}, eventuallyTimeout, eventuallyInterval).ShouldNot(gomega.HaveOccurred())
// Delete one manifestwork
err = hubWorkClient.WorkV1().ManifestWorks(commOptions.SpokeClusterName).Delete(context.Background(), work.Name, metav1.DeleteOptions{})
err = hubWorkClient.WorkV1().ManifestWorks(clusterName).Delete(context.Background(), work.Name, metav1.DeleteOptions{})
gomega.Expect(err).ToNot(gomega.HaveOccurred())
// Ensure the appliedmanifestwork of deleted manifestwork is removed so it won't try to delete shared resource
@@ -164,7 +147,7 @@ var _ = ginkgo.Describe("ManifestWork Delete Option", func() {
// Ensure the configmap is kept and tracked by anotherappliedmanifestwork.
gomega.Eventually(func() error {
configMap, err := spokeKubeClient.CoreV1().ConfigMaps(commOptions.SpokeClusterName).Get(context.Background(), cm1, metav1.GetOptions{})
configMap, err := spokeKubeClient.CoreV1().ConfigMaps(clusterName).Get(context.Background(), cm1, metav1.GetOptions{})
if err != nil {
return err
}
@@ -173,13 +156,13 @@ var _ = ginkgo.Describe("ManifestWork Delete Option", func() {
return fmt.Errorf("UID should be equal")
}
anotherappliedmanifestwork, err := spokeWorkClient.WorkV1().AppliedManifestWorks().Get(
anotherAppliedManifestWork, err := spokeWorkClient.WorkV1().AppliedManifestWorks().Get(
context.Background(), anotherAppliedManifestWorkName, metav1.GetOptions{})
if err != nil {
return err
}
for _, appliedResource := range anotherappliedmanifestwork.Status.AppliedResources {
for _, appliedResource := range anotherAppliedManifestWork.Status.AppliedResources {
if appliedResource.Name != cm1 {
return fmt.Errorf("resource Name should be cm1")
}
@@ -196,7 +179,7 @@ var _ = ginkgo.Describe("ManifestWork Delete Option", func() {
ginkgo.It("shared resource between the manifestwork should be kept when the shared resource is removed from one manifestwork", func() {
// ensure configmap exists and get its uid
util.AssertExistenceOfConfigMaps(manifests, spokeKubeClient, eventuallyTimeout, eventuallyInterval)
curentConfigMap, err := spokeKubeClient.CoreV1().ConfigMaps(commOptions.SpokeClusterName).Get(context.Background(), cm1, metav1.GetOptions{})
curentConfigMap, err := spokeKubeClient.CoreV1().ConfigMaps(clusterName).Get(context.Background(), cm1, metav1.GetOptions{})
gomega.Expect(err).ToNot(gomega.HaveOccurred())
currentUID := curentConfigMap.UID
@@ -233,7 +216,7 @@ var _ = ginkgo.Describe("ManifestWork Delete Option", func() {
}, eventuallyTimeout, eventuallyInterval).ShouldNot(gomega.HaveOccurred())
// Update one manifestwork to remove the shared resource
updatedWork, err := hubWorkClient.WorkV1().ManifestWorks(commOptions.SpokeClusterName).Get(context.Background(), work.Name, metav1.GetOptions{})
updatedWork, err := hubWorkClient.WorkV1().ManifestWorks(clusterName).Get(context.Background(), work.Name, metav1.GetOptions{})
gomega.Expect(err).ToNot(gomega.HaveOccurred())
newWork := updatedWork.DeepCopy()
@@ -242,7 +225,7 @@ var _ = ginkgo.Describe("ManifestWork Delete Option", func() {
pathBytes, err := util.NewWorkPatch(updatedWork, newWork)
gomega.Expect(err).ToNot(gomega.HaveOccurred())
_, err = hubWorkClient.WorkV1().ManifestWorks(commOptions.SpokeClusterName).Patch(
_, err = hubWorkClient.WorkV1().ManifestWorks(clusterName).Patch(
context.Background(), updatedWork.Name, types.MergePatchType, pathBytes, metav1.PatchOptions{})
gomega.Expect(err).ToNot(gomega.HaveOccurred())
@@ -264,7 +247,7 @@ var _ = ginkgo.Describe("ManifestWork Delete Option", func() {
// Ensure the configmap is kept and tracked by anotherappliedmanifestwork
gomega.Eventually(func() error {
configMap, err := spokeKubeClient.CoreV1().ConfigMaps(commOptions.SpokeClusterName).Get(
configMap, err := spokeKubeClient.CoreV1().ConfigMaps(clusterName).Get(
context.Background(), cm1, metav1.GetOptions{})
if err != nil {
return err
@@ -299,8 +282,8 @@ var _ = ginkgo.Describe("ManifestWork Delete Option", func() {
ginkgo.Context("Delete options", func() {
ginkgo.BeforeEach(func() {
manifests = []workapiv1.Manifest{
util.ToManifest(util.NewConfigmap(commOptions.SpokeClusterName, cm1, map[string]string{"a": "b"}, []string{})),
util.ToManifest(util.NewConfigmap(commOptions.SpokeClusterName, cm2, map[string]string{"c": "d"}, []string{})),
util.ToManifest(util.NewConfigmap(clusterName, cm1, map[string]string{"a": "b"}, []string{})),
util.ToManifest(util.NewConfigmap(clusterName, cm2, map[string]string{"c": "d"}, []string{})),
}
})
@@ -309,7 +292,7 @@ var _ = ginkgo.Describe("ManifestWork Delete Option", func() {
PropagationPolicy: workapiv1.DeletePropagationPolicyTypeOrphan,
}
_, err = hubWorkClient.WorkV1().ManifestWorks(commOptions.SpokeClusterName).Create(context.Background(), work, metav1.CreateOptions{})
_, err = hubWorkClient.WorkV1().ManifestWorks(clusterName).Create(context.Background(), work, metav1.CreateOptions{})
gomega.Expect(err).ToNot(gomega.HaveOccurred())
util.AssertWorkCondition(work.Namespace, work.Name, hubWorkClient, workapiv1.WorkApplied, metav1.ConditionTrue,
@@ -322,7 +305,7 @@ var _ = ginkgo.Describe("ManifestWork Delete Option", func() {
// Ensure ownership of configmap is updated
gomega.Eventually(func() error {
cm, err := spokeKubeClient.CoreV1().ConfigMaps(commOptions.SpokeClusterName).Get(context.Background(), cm1, metav1.GetOptions{})
cm, err := spokeKubeClient.CoreV1().ConfigMaps(clusterName).Get(context.Background(), cm1, metav1.GetOptions{})
if err != nil {
return err
}
@@ -335,7 +318,7 @@ var _ = ginkgo.Describe("ManifestWork Delete Option", func() {
}, eventuallyTimeout, eventuallyInterval).ShouldNot(gomega.HaveOccurred())
gomega.Eventually(func() error {
cm, err := spokeKubeClient.CoreV1().ConfigMaps(commOptions.SpokeClusterName).Get(context.Background(), cm2, metav1.GetOptions{})
cm, err := spokeKubeClient.CoreV1().ConfigMaps(clusterName).Get(context.Background(), cm2, metav1.GetOptions{})
if err != nil {
return err
}
@@ -348,12 +331,12 @@ var _ = ginkgo.Describe("ManifestWork Delete Option", func() {
}, eventuallyTimeout, eventuallyInterval).ShouldNot(gomega.HaveOccurred())
// Delete the work
err = hubWorkClient.WorkV1().ManifestWorks(commOptions.SpokeClusterName).Delete(context.Background(), work.Name, metav1.DeleteOptions{})
err = hubWorkClient.WorkV1().ManifestWorks(clusterName).Delete(context.Background(), work.Name, metav1.DeleteOptions{})
gomega.Expect(err).ToNot(gomega.HaveOccurred())
// Wait for deletion of manifest work
gomega.Eventually(func() bool {
_, err := hubWorkClient.WorkV1().ManifestWorks(commOptions.SpokeClusterName).Get(context.Background(), work.Name, metav1.GetOptions{})
_, err := hubWorkClient.WorkV1().ManifestWorks(clusterName).Get(context.Background(), work.Name, metav1.GetOptions{})
return errors.IsNotFound(err)
}, eventuallyTimeout, eventuallyInterval).Should(gomega.BeTrue())
@@ -369,14 +352,14 @@ var _ = ginkgo.Describe("ManifestWork Delete Option", func() {
{
Group: "",
Resource: "configmaps",
Namespace: commOptions.SpokeClusterName,
Namespace: clusterName,
Name: cm1,
},
},
},
}
work, err = hubWorkClient.WorkV1().ManifestWorks(commOptions.SpokeClusterName).Create(context.Background(), work, metav1.CreateOptions{})
work, err = hubWorkClient.WorkV1().ManifestWorks(clusterName).Create(context.Background(), work, metav1.CreateOptions{})
gomega.Expect(err).ToNot(gomega.HaveOccurred())
util.AssertWorkCondition(work.Namespace, work.Name, hubWorkClient, workapiv1.WorkApplied, metav1.ConditionTrue,
@@ -389,7 +372,7 @@ var _ = ginkgo.Describe("ManifestWork Delete Option", func() {
// Ensure ownership of configmap is updated
gomega.Eventually(func() error {
cm, err := spokeKubeClient.CoreV1().ConfigMaps(commOptions.SpokeClusterName).Get(context.Background(), cm1, metav1.GetOptions{})
cm, err := spokeKubeClient.CoreV1().ConfigMaps(clusterName).Get(context.Background(), cm1, metav1.GetOptions{})
if err != nil {
return err
}
@@ -402,21 +385,21 @@ var _ = ginkgo.Describe("ManifestWork Delete Option", func() {
}, eventuallyTimeout, eventuallyInterval).ShouldNot(gomega.HaveOccurred())
// Delete the work
err = hubWorkClient.WorkV1().ManifestWorks(commOptions.SpokeClusterName).Delete(context.Background(), work.Name, metav1.DeleteOptions{})
err = hubWorkClient.WorkV1().ManifestWorks(clusterName).Delete(context.Background(), work.Name, metav1.DeleteOptions{})
gomega.Expect(err).ToNot(gomega.HaveOccurred())
// Wait for deletion of manifest work
gomega.Eventually(func() bool {
_, err := hubWorkClient.WorkV1().ManifestWorks(commOptions.SpokeClusterName).Get(context.Background(), work.Name, metav1.GetOptions{})
_, err := hubWorkClient.WorkV1().ManifestWorks(clusterName).Get(context.Background(), work.Name, metav1.GetOptions{})
return errors.IsNotFound(err)
}, eventuallyTimeout, eventuallyInterval).Should(gomega.BeTrue())
// One of the resource should be deleted.
_, err = spokeKubeClient.CoreV1().ConfigMaps(commOptions.SpokeClusterName).Get(context.Background(), cm2, metav1.GetOptions{})
_, err = spokeKubeClient.CoreV1().ConfigMaps(clusterName).Get(context.Background(), cm2, metav1.GetOptions{})
gomega.Expect(errors.IsNotFound(err)).To(gomega.BeTrue())
// One of the resource should be kept
_, err = spokeKubeClient.CoreV1().ConfigMaps(commOptions.SpokeClusterName).Get(context.Background(), cm1, metav1.GetOptions{})
_, err = spokeKubeClient.CoreV1().ConfigMaps(clusterName).Get(context.Background(), cm1, metav1.GetOptions{})
gomega.Expect(err).ToNot(gomega.HaveOccurred())
})
@@ -428,14 +411,14 @@ var _ = ginkgo.Describe("ManifestWork Delete Option", func() {
{
Group: "",
Resource: "configmaps",
Namespace: commOptions.SpokeClusterName,
Namespace: clusterName,
Name: cm1,
},
},
},
}
work, err = hubWorkClient.WorkV1().ManifestWorks(commOptions.SpokeClusterName).Create(context.Background(), work, metav1.CreateOptions{})
work, err = hubWorkClient.WorkV1().ManifestWorks(clusterName).Create(context.Background(), work, metav1.CreateOptions{})
gomega.Expect(err).ToNot(gomega.HaveOccurred())
util.AssertWorkCondition(work.Namespace, work.Name, hubWorkClient, workapiv1.WorkApplied, metav1.ConditionTrue,
@@ -448,7 +431,7 @@ var _ = ginkgo.Describe("ManifestWork Delete Option", func() {
// Ensure ownership of configmap is updated
gomega.Eventually(func() error {
cm, err := spokeKubeClient.CoreV1().ConfigMaps(commOptions.SpokeClusterName).Get(context.Background(), cm1, metav1.GetOptions{})
cm, err := spokeKubeClient.CoreV1().ConfigMaps(clusterName).Get(context.Background(), cm1, metav1.GetOptions{})
if err != nil {
return err
}
@@ -462,14 +445,14 @@ var _ = ginkgo.Describe("ManifestWork Delete Option", func() {
// Remove the resource from the manifests
gomega.Eventually(func() error {
updatedWork, err := hubWorkClient.WorkV1().ManifestWorks(commOptions.SpokeClusterName).Get(context.Background(), work.Name, metav1.GetOptions{})
updatedWork, err := hubWorkClient.WorkV1().ManifestWorks(clusterName).Get(context.Background(), work.Name, metav1.GetOptions{})
if err != nil {
return err
}
newWork := updatedWork.DeepCopy()
newWork.Spec.Workload.Manifests = []workapiv1.Manifest{
util.ToManifest(util.NewConfigmap(commOptions.SpokeClusterName, cm2, map[string]string{"c": "d"}, []string{})),
util.ToManifest(util.NewConfigmap(clusterName, cm2, map[string]string{"c": "d"}, []string{})),
}
pathBytes, err := util.NewWorkPatch(updatedWork, newWork)
@@ -477,7 +460,7 @@ var _ = ginkgo.Describe("ManifestWork Delete Option", func() {
return err
}
_, err = hubWorkClient.WorkV1().ManifestWorks(commOptions.SpokeClusterName).Patch(
_, err = hubWorkClient.WorkV1().ManifestWorks(clusterName).Patch(
context.Background(), updatedWork.Name, types.MergePatchType, pathBytes, metav1.PatchOptions{})
return err
}, eventuallyTimeout, eventuallyInterval).ShouldNot(gomega.HaveOccurred())
@@ -487,10 +470,11 @@ var _ = ginkgo.Describe("ManifestWork Delete Option", func() {
util.AssertWorkCondition(work.Namespace, work.Name, hubWorkClient, workapiv1.WorkAvailable, metav1.ConditionTrue,
[]metav1.ConditionStatus{metav1.ConditionTrue}, eventuallyTimeout, eventuallyInterval)
// Sleep 5 second and check the resource should be kept
time.Sleep(5 * time.Second)
_, err = spokeKubeClient.CoreV1().ConfigMaps(commOptions.SpokeClusterName).Get(context.Background(), cm1, metav1.GetOptions{})
gomega.Expect(err).ToNot(gomega.HaveOccurred())
// Wait for the resource to be observed
gomega.Eventually(func() error {
_, err := spokeKubeClient.CoreV1().ConfigMaps(clusterName).Get(context.Background(), cm1, metav1.GetOptions{})
return err
}, eventuallyTimeout, eventuallyInterval).ShouldNot(gomega.HaveOccurred())
})
ginkgo.It("Clean the resource when orphan deletion option is removed", func() {
@@ -501,14 +485,14 @@ var _ = ginkgo.Describe("ManifestWork Delete Option", func() {
{
Group: "",
Resource: "configmaps",
Namespace: commOptions.SpokeClusterName,
Namespace: clusterName,
Name: cm1,
},
},
},
}
work, err = hubWorkClient.WorkV1().ManifestWorks(commOptions.SpokeClusterName).Create(context.Background(), work, metav1.CreateOptions{})
work, err = hubWorkClient.WorkV1().ManifestWorks(clusterName).Create(context.Background(), work, metav1.CreateOptions{})
gomega.Expect(err).ToNot(gomega.HaveOccurred())
util.AssertWorkCondition(work.Namespace, work.Name, hubWorkClient, workapiv1.WorkApplied, metav1.ConditionTrue,
@@ -521,7 +505,7 @@ var _ = ginkgo.Describe("ManifestWork Delete Option", func() {
// Ensure ownership of configmap is updated
gomega.Eventually(func() error {
cm, err := spokeKubeClient.CoreV1().ConfigMaps(commOptions.SpokeClusterName).Get(context.Background(), cm1, metav1.GetOptions{})
cm, err := spokeKubeClient.CoreV1().ConfigMaps(clusterName).Get(context.Background(), cm1, metav1.GetOptions{})
if err != nil {
return err
}
@@ -535,7 +519,7 @@ var _ = ginkgo.Describe("ManifestWork Delete Option", func() {
// Remove the delete option
gomega.Eventually(func() error {
updatedWork, err := hubWorkClient.WorkV1().ManifestWorks(commOptions.SpokeClusterName).Get(context.Background(), work.Name, metav1.GetOptions{})
updatedWork, err := hubWorkClient.WorkV1().ManifestWorks(clusterName).Get(context.Background(), work.Name, metav1.GetOptions{})
if err != nil {
return err
}
@@ -548,14 +532,14 @@ var _ = ginkgo.Describe("ManifestWork Delete Option", func() {
return err
}
_, err = hubWorkClient.WorkV1().ManifestWorks(commOptions.SpokeClusterName).Patch(
_, err = hubWorkClient.WorkV1().ManifestWorks(clusterName).Patch(
context.Background(), updatedWork.Name, types.MergePatchType, pathBytes, metav1.PatchOptions{})
return err
}, eventuallyTimeout, eventuallyInterval).ShouldNot(gomega.HaveOccurred())
// Ensure ownership of configmap is updated
gomega.Eventually(func() error {
cm, err := spokeKubeClient.CoreV1().ConfigMaps(commOptions.SpokeClusterName).Get(context.Background(), cm1, metav1.GetOptions{})
cm, err := spokeKubeClient.CoreV1().ConfigMaps(clusterName).Get(context.Background(), cm1, metav1.GetOptions{})
if err != nil {
return err
}
@@ -568,19 +552,19 @@ var _ = ginkgo.Describe("ManifestWork Delete Option", func() {
}, eventuallyTimeout, eventuallyInterval).ShouldNot(gomega.HaveOccurred())
// Delete the work
err = hubWorkClient.WorkV1().ManifestWorks(commOptions.SpokeClusterName).Delete(context.Background(), work.Name, metav1.DeleteOptions{})
err = hubWorkClient.WorkV1().ManifestWorks(clusterName).Delete(context.Background(), work.Name, metav1.DeleteOptions{})
gomega.Expect(err).ToNot(gomega.HaveOccurred())
// Wait for deletion of manifest work
gomega.Eventually(func() bool {
_, err := hubWorkClient.WorkV1().ManifestWorks(commOptions.SpokeClusterName).Get(context.Background(), work.Name, metav1.GetOptions{})
_, err := hubWorkClient.WorkV1().ManifestWorks(clusterName).Get(context.Background(), work.Name, metav1.GetOptions{})
return errors.IsNotFound(err)
}, eventuallyTimeout, eventuallyInterval).Should(gomega.BeTrue())
// All of the resource should be deleted.
_, err = spokeKubeClient.CoreV1().ConfigMaps(commOptions.SpokeClusterName).Get(context.Background(), cm2, metav1.GetOptions{})
// All the resources should be deleted.
_, err = spokeKubeClient.CoreV1().ConfigMaps(clusterName).Get(context.Background(), cm2, metav1.GetOptions{})
gomega.Expect(errors.IsNotFound(err)).To(gomega.BeTrue())
_, err = spokeKubeClient.CoreV1().ConfigMaps(commOptions.SpokeClusterName).Get(context.Background(), cm1, metav1.GetOptions{})
_, err = spokeKubeClient.CoreV1().ConfigMaps(clusterName).Get(context.Background(), cm1, metav1.GetOptions{})
gomega.Expect(errors.IsNotFound(err)).To(gomega.BeTrue())
})
})

View File

@@ -3,7 +3,6 @@ package work
import (
"context"
"encoding/json"
"time"
jsonpatch "github.com/evanphx/json-patch"
"github.com/onsi/ginkgo/v2"
@@ -17,18 +16,15 @@ import (
workclientset "open-cluster-management.io/api/client/work/clientset/versioned"
workapiv1 "open-cluster-management.io/api/work/v1"
commonoptions "open-cluster-management.io/ocm/pkg/common/options"
"open-cluster-management.io/ocm/pkg/features"
"open-cluster-management.io/ocm/pkg/work/spoke"
"open-cluster-management.io/ocm/test/integration/util"
)
var _ = ginkgo.Describe("ManifestWork Executor Subject", func() {
var o *spoke.WorkloadAgentOptions
var commOptions *commonoptions.AgentOptions
var cancel context.CancelFunc
var work *workapiv1.ManifestWork
var clusterName string
var manifests []workapiv1.Manifest
var executor *workapiv1.ManifestWorkExecutor
@@ -38,25 +34,23 @@ var _ = ginkgo.Describe("ManifestWork Executor Subject", func() {
executorName := "test-executor"
ginkgo.BeforeEach(func() {
o = spoke.NewWorkloadAgentOptions()
o.StatusSyncInterval = 3 * time.Second
o.WorkloadSourceDriver = sourceDriver
o.WorkloadSourceConfig = sourceConfigFileName
err := features.SpokeMutableFeatureGate.Set("ExecutorValidatingCaches=true")
gomega.Expect(err).NotTo(gomega.HaveOccurred())
ginkgo.DeferCleanup(func() {
_ = features.SpokeMutableFeatureGate.Set("ExecutorValidatingCaches=false")
})
commOptions = commonoptions.NewAgentOptions()
commOptions.SpokeClusterName = utilrand.String(5)
clusterName = utilrand.String(5)
ns := &corev1.Namespace{}
ns.Name = commOptions.SpokeClusterName
ns := &corev1.Namespace{
ObjectMeta: metav1.ObjectMeta{Name: clusterName},
}
_, err = spokeKubeClient.CoreV1().Namespaces().Create(context.Background(), ns, metav1.CreateOptions{})
gomega.Expect(err).ToNot(gomega.HaveOccurred())
var ctx context.Context
ctx, cancel = context.WithCancel(context.Background())
go startWorkAgent(ctx, o, commOptions)
go startWorkAgent(ctx, clusterName)
// reset manifests
manifests = nil
@@ -64,7 +58,7 @@ var _ = ginkgo.Describe("ManifestWork Executor Subject", func() {
})
ginkgo.JustBeforeEach(func() {
work = util.NewManifestWork(commOptions.SpokeClusterName, "", manifests)
work = util.NewManifestWork(clusterName, "", manifests)
gomega.Expect(err).ToNot(gomega.HaveOccurred())
work.Spec.Executor = executor
})
@@ -74,21 +68,21 @@ var _ = ginkgo.Describe("ManifestWork Executor Subject", func() {
cancel()
}
err := spokeKubeClient.CoreV1().Namespaces().Delete(
context.Background(), commOptions.SpokeClusterName, metav1.DeleteOptions{})
context.Background(), clusterName, metav1.DeleteOptions{})
gomega.Expect(err).ToNot(gomega.HaveOccurred())
})
ginkgo.Context("Apply the resource with executor", func() {
ginkgo.BeforeEach(func() {
manifests = []workapiv1.Manifest{
util.ToManifest(util.NewConfigmap(commOptions.SpokeClusterName, cm1, map[string]string{"a": "b"}, []string{})),
util.ToManifest(util.NewConfigmap(commOptions.SpokeClusterName, cm2, map[string]string{"c": "d"}, []string{})),
util.ToManifest(util.NewConfigmap(clusterName, cm1, map[string]string{"a": "b"}, []string{})),
util.ToManifest(util.NewConfigmap(clusterName, cm2, map[string]string{"c": "d"}, []string{})),
}
executor = &workapiv1.ManifestWorkExecutor{
Subject: workapiv1.ManifestWorkExecutorSubject{
Type: workapiv1.ExecutorSubjectTypeServiceAccount,
ServiceAccount: &workapiv1.ManifestWorkSubjectServiceAccount{
Namespace: commOptions.SpokeClusterName,
Namespace: clusterName,
Name: executorName,
},
},
@@ -96,7 +90,7 @@ var _ = ginkgo.Describe("ManifestWork Executor Subject", func() {
})
ginkgo.It("Executor does not have permission", func() {
work, err = hubWorkClient.WorkV1().ManifestWorks(commOptions.SpokeClusterName).Create(
work, err = hubWorkClient.WorkV1().ManifestWorks(clusterName).Create(
context.Background(), work, metav1.CreateOptions{})
gomega.Expect(err).ToNot(gomega.HaveOccurred())
@@ -112,10 +106,10 @@ var _ = ginkgo.Describe("ManifestWork Executor Subject", func() {
})
ginkgo.It("Executor does not have permission to partial resources", func() {
_, err = spokeKubeClient.RbacV1().Roles(commOptions.SpokeClusterName).Create(
_, err = spokeKubeClient.RbacV1().Roles(clusterName).Create(
context.TODO(), &rbacv1.Role{
ObjectMeta: metav1.ObjectMeta{
Namespace: commOptions.SpokeClusterName,
Namespace: clusterName,
Name: roleName,
},
Rules: []rbacv1.PolicyRule{
@@ -128,16 +122,16 @@ var _ = ginkgo.Describe("ManifestWork Executor Subject", func() {
},
}, metav1.CreateOptions{})
gomega.Expect(err).ToNot(gomega.HaveOccurred())
_, err = spokeKubeClient.RbacV1().RoleBindings(commOptions.SpokeClusterName).Create(
_, err = spokeKubeClient.RbacV1().RoleBindings(clusterName).Create(
context.TODO(), &rbacv1.RoleBinding{
ObjectMeta: metav1.ObjectMeta{
Namespace: commOptions.SpokeClusterName,
Namespace: clusterName,
Name: roleName,
},
Subjects: []rbacv1.Subject{
{
Kind: "ServiceAccount",
Namespace: commOptions.SpokeClusterName,
Namespace: clusterName,
Name: executorName,
},
},
@@ -149,7 +143,7 @@ var _ = ginkgo.Describe("ManifestWork Executor Subject", func() {
}, metav1.CreateOptions{})
gomega.Expect(err).ToNot(gomega.HaveOccurred())
work, err = hubWorkClient.WorkV1().ManifestWorks(commOptions.SpokeClusterName).Create(
work, err = hubWorkClient.WorkV1().ManifestWorks(clusterName).Create(
context.Background(), work, metav1.CreateOptions{})
gomega.Expect(err).ToNot(gomega.HaveOccurred())
@@ -163,19 +157,19 @@ var _ = ginkgo.Describe("ManifestWork Executor Subject", func() {
// ensure configmap cm1 exist and cm2 not exist
util.AssertExistenceOfConfigMaps(
[]workapiv1.Manifest{
util.ToManifest(util.NewConfigmap(commOptions.SpokeClusterName, cm1, map[string]string{"a": "b"}, []string{})),
util.ToManifest(util.NewConfigmap(clusterName, cm1, map[string]string{"a": "b"}, []string{})),
}, spokeKubeClient, eventuallyTimeout, eventuallyInterval)
util.AssertNonexistenceOfConfigMaps(
[]workapiv1.Manifest{
util.ToManifest(util.NewConfigmap(commOptions.SpokeClusterName, cm2, map[string]string{"a": "b"}, []string{})),
util.ToManifest(util.NewConfigmap(clusterName, cm2, map[string]string{"a": "b"}, []string{})),
}, spokeKubeClient, eventuallyTimeout, eventuallyInterval)
})
ginkgo.It("Executor has permission for all resources", func() {
_, err = spokeKubeClient.RbacV1().Roles(commOptions.SpokeClusterName).Create(
_, err = spokeKubeClient.RbacV1().Roles(clusterName).Create(
context.TODO(), &rbacv1.Role{
ObjectMeta: metav1.ObjectMeta{
Namespace: commOptions.SpokeClusterName,
Namespace: clusterName,
Name: roleName,
},
Rules: []rbacv1.PolicyRule{
@@ -188,16 +182,16 @@ var _ = ginkgo.Describe("ManifestWork Executor Subject", func() {
},
}, metav1.CreateOptions{})
gomega.Expect(err).ToNot(gomega.HaveOccurred())
_, err = spokeKubeClient.RbacV1().RoleBindings(commOptions.SpokeClusterName).Create(
_, err = spokeKubeClient.RbacV1().RoleBindings(clusterName).Create(
context.TODO(), &rbacv1.RoleBinding{
ObjectMeta: metav1.ObjectMeta{
Namespace: commOptions.SpokeClusterName,
Namespace: clusterName,
Name: roleName,
},
Subjects: []rbacv1.Subject{
{
Kind: "ServiceAccount",
Namespace: commOptions.SpokeClusterName,
Namespace: clusterName,
Name: executorName,
},
},
@@ -209,7 +203,7 @@ var _ = ginkgo.Describe("ManifestWork Executor Subject", func() {
}, metav1.CreateOptions{})
gomega.Expect(err).ToNot(gomega.HaveOccurred())
work, err = hubWorkClient.WorkV1().ManifestWorks(commOptions.SpokeClusterName).Create(
work, err = hubWorkClient.WorkV1().ManifestWorks(clusterName).Create(
context.Background(), work, metav1.CreateOptions{})
gomega.Expect(err).ToNot(gomega.HaveOccurred())
@@ -228,14 +222,14 @@ var _ = ginkgo.Describe("ManifestWork Executor Subject", func() {
ginkgo.Context("Apply the resource with executor deleting validating", func() {
ginkgo.BeforeEach(func() {
manifests = []workapiv1.Manifest{
util.ToManifest(util.NewConfigmap(commOptions.SpokeClusterName, cm1, map[string]string{"a": "b"}, []string{})),
util.ToManifest(util.NewConfigmap(commOptions.SpokeClusterName, cm2, map[string]string{"c": "d"}, []string{})),
util.ToManifest(util.NewConfigmap(clusterName, cm1, map[string]string{"a": "b"}, []string{})),
util.ToManifest(util.NewConfigmap(clusterName, cm2, map[string]string{"c": "d"}, []string{})),
}
executor = &workapiv1.ManifestWorkExecutor{
Subject: workapiv1.ManifestWorkExecutorSubject{
Type: workapiv1.ExecutorSubjectTypeServiceAccount,
ServiceAccount: &workapiv1.ManifestWorkSubjectServiceAccount{
Namespace: commOptions.SpokeClusterName,
Namespace: clusterName,
Name: executorName,
},
},
@@ -243,10 +237,10 @@ var _ = ginkgo.Describe("ManifestWork Executor Subject", func() {
})
ginkgo.It("Executor does not have delete permission and delete option is foreground", func() {
_, err = spokeKubeClient.RbacV1().Roles(commOptions.SpokeClusterName).Create(
_, err = spokeKubeClient.RbacV1().Roles(clusterName).Create(
context.TODO(), &rbacv1.Role{
ObjectMeta: metav1.ObjectMeta{
Namespace: commOptions.SpokeClusterName,
Namespace: clusterName,
Name: roleName,
},
Rules: []rbacv1.PolicyRule{
@@ -259,16 +253,16 @@ var _ = ginkgo.Describe("ManifestWork Executor Subject", func() {
},
}, metav1.CreateOptions{})
gomega.Expect(err).ToNot(gomega.HaveOccurred())
_, err = spokeKubeClient.RbacV1().RoleBindings(commOptions.SpokeClusterName).Create(
_, err = spokeKubeClient.RbacV1().RoleBindings(clusterName).Create(
context.TODO(), &rbacv1.RoleBinding{
ObjectMeta: metav1.ObjectMeta{
Namespace: commOptions.SpokeClusterName,
Namespace: clusterName,
Name: roleName,
},
Subjects: []rbacv1.Subject{
{
Kind: "ServiceAccount",
Namespace: commOptions.SpokeClusterName,
Namespace: clusterName,
Name: executorName,
},
},
@@ -280,7 +274,7 @@ var _ = ginkgo.Describe("ManifestWork Executor Subject", func() {
}, metav1.CreateOptions{})
gomega.Expect(err).ToNot(gomega.HaveOccurred())
work, err = hubWorkClient.WorkV1().ManifestWorks(commOptions.SpokeClusterName).Create(
work, err = hubWorkClient.WorkV1().ManifestWorks(clusterName).Create(
context.Background(), work, metav1.CreateOptions{})
gomega.Expect(err).ToNot(gomega.HaveOccurred())
@@ -296,10 +290,10 @@ var _ = ginkgo.Describe("ManifestWork Executor Subject", func() {
})
ginkgo.It("Executor does not have delete permission and delete option is orphan", func() {
_, err = spokeKubeClient.RbacV1().Roles(commOptions.SpokeClusterName).Create(
_, err = spokeKubeClient.RbacV1().Roles(clusterName).Create(
context.TODO(), &rbacv1.Role{
ObjectMeta: metav1.ObjectMeta{
Namespace: commOptions.SpokeClusterName,
Namespace: clusterName,
Name: roleName,
},
Rules: []rbacv1.PolicyRule{
@@ -312,16 +306,16 @@ var _ = ginkgo.Describe("ManifestWork Executor Subject", func() {
},
}, metav1.CreateOptions{})
gomega.Expect(err).ToNot(gomega.HaveOccurred())
_, err = spokeKubeClient.RbacV1().RoleBindings(commOptions.SpokeClusterName).Create(
_, err = spokeKubeClient.RbacV1().RoleBindings(clusterName).Create(
context.TODO(), &rbacv1.RoleBinding{
ObjectMeta: metav1.ObjectMeta{
Namespace: commOptions.SpokeClusterName,
Namespace: clusterName,
Name: roleName,
},
Subjects: []rbacv1.Subject{
{
Kind: "ServiceAccount",
Namespace: commOptions.SpokeClusterName,
Namespace: clusterName,
Name: executorName,
},
},
@@ -336,7 +330,7 @@ var _ = ginkgo.Describe("ManifestWork Executor Subject", func() {
work.Spec.DeleteOption = &workapiv1.DeleteOption{
PropagationPolicy: workapiv1.DeletePropagationPolicyTypeOrphan,
}
work, err = hubWorkClient.WorkV1().ManifestWorks(commOptions.SpokeClusterName).Create(
work, err = hubWorkClient.WorkV1().ManifestWorks(clusterName).Create(
context.Background(), work, metav1.CreateOptions{})
gomega.Expect(err).ToNot(gomega.HaveOccurred())
@@ -352,10 +346,10 @@ var _ = ginkgo.Describe("ManifestWork Executor Subject", func() {
})
ginkgo.It("Executor does not have delete permission and delete option is selectively orphan", func() {
_, err = spokeKubeClient.RbacV1().Roles(commOptions.SpokeClusterName).Create(
_, err = spokeKubeClient.RbacV1().Roles(clusterName).Create(
context.TODO(), &rbacv1.Role{
ObjectMeta: metav1.ObjectMeta{
Namespace: commOptions.SpokeClusterName,
Namespace: clusterName,
Name: roleName,
},
Rules: []rbacv1.PolicyRule{
@@ -368,16 +362,16 @@ var _ = ginkgo.Describe("ManifestWork Executor Subject", func() {
},
}, metav1.CreateOptions{})
gomega.Expect(err).ToNot(gomega.HaveOccurred())
_, err = spokeKubeClient.RbacV1().RoleBindings(commOptions.SpokeClusterName).Create(
_, err = spokeKubeClient.RbacV1().RoleBindings(clusterName).Create(
context.TODO(), &rbacv1.RoleBinding{
ObjectMeta: metav1.ObjectMeta{
Namespace: commOptions.SpokeClusterName,
Namespace: clusterName,
Name: roleName,
},
Subjects: []rbacv1.Subject{
{
Kind: "ServiceAccount",
Namespace: commOptions.SpokeClusterName,
Namespace: clusterName,
Name: executorName,
},
},
@@ -395,13 +389,13 @@ var _ = ginkgo.Describe("ManifestWork Executor Subject", func() {
OrphaningRules: []workapiv1.OrphaningRule{
{
Resource: "configmaps",
Namespace: commOptions.SpokeClusterName,
Namespace: clusterName,
Name: cm1,
},
},
},
}
work, err = hubWorkClient.WorkV1().ManifestWorks(commOptions.SpokeClusterName).Create(
work, err = hubWorkClient.WorkV1().ManifestWorks(clusterName).Create(
context.Background(), work, metav1.CreateOptions{})
gomega.Expect(err).ToNot(gomega.HaveOccurred())
@@ -415,11 +409,11 @@ var _ = ginkgo.Describe("ManifestWork Executor Subject", func() {
// ensure configmap cm1 exist and cm2 not exist
util.AssertExistenceOfConfigMaps(
[]workapiv1.Manifest{
util.ToManifest(util.NewConfigmap(commOptions.SpokeClusterName, cm1, map[string]string{"a": "b"}, []string{})),
util.ToManifest(util.NewConfigmap(clusterName, cm1, map[string]string{"a": "b"}, []string{})),
}, spokeKubeClient, eventuallyTimeout, eventuallyInterval)
util.AssertNonexistenceOfConfigMaps(
[]workapiv1.Manifest{
util.ToManifest(util.NewConfigmap(commOptions.SpokeClusterName, cm2, map[string]string{"a": "b"}, []string{})),
util.ToManifest(util.NewConfigmap(clusterName, cm2, map[string]string{"a": "b"}, []string{})),
}, spokeKubeClient, eventuallyTimeout, eventuallyInterval)
})
})
@@ -427,20 +421,20 @@ var _ = ginkgo.Describe("ManifestWork Executor Subject", func() {
ginkgo.Context("Apply the resource with executor escalation validating", func() {
ginkgo.BeforeEach(func() {
manifests = []workapiv1.Manifest{
util.ToManifest(util.NewConfigmap(commOptions.SpokeClusterName, cm1, map[string]string{"a": "b"}, []string{})),
util.ToManifest(util.NewRoleForManifest(commOptions.SpokeClusterName, "role-cm-creator", rbacv1.PolicyRule{
util.ToManifest(util.NewConfigmap(clusterName, cm1, map[string]string{"a": "b"}, []string{})),
util.ToManifest(util.NewRoleForManifest(clusterName, "role-cm-creator", rbacv1.PolicyRule{
Verbs: []string{"create", "update", "patch", "get", "list", "delete"},
APIGroups: []string{""},
Resources: []string{"configmaps"},
})),
util.ToManifest(util.NewRoleBindingForManifest(commOptions.SpokeClusterName, "role-cm-creator-binding",
util.ToManifest(util.NewRoleBindingForManifest(clusterName, "role-cm-creator-binding",
rbacv1.RoleRef{
Kind: "Role",
Name: "role-cm-creator",
},
rbacv1.Subject{
Kind: "ServiceAccount",
Namespace: commOptions.SpokeClusterName,
Namespace: clusterName,
Name: executorName,
})),
}
@@ -448,7 +442,7 @@ var _ = ginkgo.Describe("ManifestWork Executor Subject", func() {
Subject: workapiv1.ManifestWorkExecutorSubject{
Type: workapiv1.ExecutorSubjectTypeServiceAccount,
ServiceAccount: &workapiv1.ManifestWorkSubjectServiceAccount{
Namespace: commOptions.SpokeClusterName,
Namespace: clusterName,
Name: executorName,
},
},
@@ -456,11 +450,11 @@ var _ = ginkgo.Describe("ManifestWork Executor Subject", func() {
})
ginkgo.It("no permission", func() {
_, err = spokeKubeClient.RbacV1().Roles(commOptions.SpokeClusterName).Create(
_, err = spokeKubeClient.RbacV1().Roles(clusterName).Create(
context.TODO(), &rbacv1.Role{
ObjectMeta: metav1.ObjectMeta{
Name: roleName,
Namespace: commOptions.SpokeClusterName,
Namespace: clusterName,
},
Rules: []rbacv1.PolicyRule{
{
@@ -472,16 +466,16 @@ var _ = ginkgo.Describe("ManifestWork Executor Subject", func() {
},
}, metav1.CreateOptions{})
gomega.Expect(err).ToNot(gomega.HaveOccurred())
_, err = spokeKubeClient.RbacV1().RoleBindings(commOptions.SpokeClusterName).Create(
_, err = spokeKubeClient.RbacV1().RoleBindings(clusterName).Create(
context.TODO(), &rbacv1.RoleBinding{
ObjectMeta: metav1.ObjectMeta{
Name: roleName,
Namespace: commOptions.SpokeClusterName,
Namespace: clusterName,
},
Subjects: []rbacv1.Subject{
{
Kind: "ServiceAccount",
Namespace: commOptions.SpokeClusterName,
Namespace: clusterName,
Name: executorName,
},
},
@@ -493,7 +487,7 @@ var _ = ginkgo.Describe("ManifestWork Executor Subject", func() {
}, metav1.CreateOptions{})
gomega.Expect(err).ToNot(gomega.HaveOccurred())
work, err = hubWorkClient.WorkV1().ManifestWorks(commOptions.SpokeClusterName).Create(
work, err = hubWorkClient.WorkV1().ManifestWorks(clusterName).Create(
context.Background(), work, metav1.CreateOptions{})
gomega.Expect(err).ToNot(gomega.HaveOccurred())
@@ -509,16 +503,16 @@ var _ = ginkgo.Describe("ManifestWork Executor Subject", func() {
// ensure configmap not exist
util.AssertNonexistenceOfConfigMaps(
[]workapiv1.Manifest{
util.ToManifest(util.NewConfigmap(commOptions.SpokeClusterName, cm1, map[string]string{"a": "b"}, []string{})),
util.ToManifest(util.NewConfigmap(clusterName, cm1, map[string]string{"a": "b"}, []string{})),
}, spokeKubeClient, eventuallyTimeout, eventuallyInterval)
})
ginkgo.It("no permission for already existing resource", func() {
_, err = spokeKubeClient.RbacV1().Roles(commOptions.SpokeClusterName).Create(
_, err = spokeKubeClient.RbacV1().Roles(clusterName).Create(
context.TODO(), &rbacv1.Role{
ObjectMeta: metav1.ObjectMeta{
Name: roleName,
Namespace: commOptions.SpokeClusterName,
Namespace: clusterName,
},
Rules: []rbacv1.PolicyRule{
{
@@ -530,16 +524,16 @@ var _ = ginkgo.Describe("ManifestWork Executor Subject", func() {
},
}, metav1.CreateOptions{})
gomega.Expect(err).ToNot(gomega.HaveOccurred())
_, err = spokeKubeClient.RbacV1().RoleBindings(commOptions.SpokeClusterName).Create(
_, err = spokeKubeClient.RbacV1().RoleBindings(clusterName).Create(
context.TODO(), &rbacv1.RoleBinding{
ObjectMeta: metav1.ObjectMeta{
Name: roleName,
Namespace: commOptions.SpokeClusterName,
Namespace: clusterName,
},
Subjects: []rbacv1.Subject{
{
Kind: "ServiceAccount",
Namespace: commOptions.SpokeClusterName,
Namespace: clusterName,
Name: executorName,
},
},
@@ -552,11 +546,11 @@ var _ = ginkgo.Describe("ManifestWork Executor Subject", func() {
gomega.Expect(err).ToNot(gomega.HaveOccurred())
// make the role exist with lower permission
_, err = spokeKubeClient.RbacV1().Roles(commOptions.SpokeClusterName).Create(
_, err = spokeKubeClient.RbacV1().Roles(clusterName).Create(
context.TODO(), &rbacv1.Role{
ObjectMeta: metav1.ObjectMeta{
Name: "role-cm-creator",
Namespace: commOptions.SpokeClusterName,
Namespace: clusterName,
},
Rules: []rbacv1.PolicyRule{
{
@@ -568,7 +562,7 @@ var _ = ginkgo.Describe("ManifestWork Executor Subject", func() {
}, metav1.CreateOptions{})
gomega.Expect(err).ToNot(gomega.HaveOccurred())
work, err = hubWorkClient.WorkV1().ManifestWorks(commOptions.SpokeClusterName).Create(
work, err = hubWorkClient.WorkV1().ManifestWorks(clusterName).Create(
context.Background(), work, metav1.CreateOptions{})
gomega.Expect(err).ToNot(gomega.HaveOccurred())
@@ -585,16 +579,16 @@ var _ = ginkgo.Describe("ManifestWork Executor Subject", func() {
// ensure configmap not exist
util.AssertNonexistenceOfConfigMaps(
[]workapiv1.Manifest{
util.ToManifest(util.NewConfigmap(commOptions.SpokeClusterName, cm1, map[string]string{"a": "b"}, []string{})),
util.ToManifest(util.NewConfigmap(clusterName, cm1, map[string]string{"a": "b"}, []string{})),
}, spokeKubeClient, eventuallyTimeout, eventuallyInterval)
})
ginkgo.It("with permission", func() {
_, err = spokeKubeClient.RbacV1().Roles(commOptions.SpokeClusterName).Create(
_, err = spokeKubeClient.RbacV1().Roles(clusterName).Create(
context.TODO(), &rbacv1.Role{
ObjectMeta: metav1.ObjectMeta{
Name: roleName,
Namespace: commOptions.SpokeClusterName,
Namespace: clusterName,
},
Rules: []rbacv1.PolicyRule{
{
@@ -611,16 +605,16 @@ var _ = ginkgo.Describe("ManifestWork Executor Subject", func() {
},
}, metav1.CreateOptions{})
gomega.Expect(err).ToNot(gomega.HaveOccurred())
_, err = spokeKubeClient.RbacV1().RoleBindings(commOptions.SpokeClusterName).Create(
_, err = spokeKubeClient.RbacV1().RoleBindings(clusterName).Create(
context.TODO(), &rbacv1.RoleBinding{
ObjectMeta: metav1.ObjectMeta{
Name: roleName,
Namespace: commOptions.SpokeClusterName,
Namespace: clusterName,
},
Subjects: []rbacv1.Subject{
{
Kind: "ServiceAccount",
Namespace: commOptions.SpokeClusterName,
Namespace: clusterName,
Name: executorName,
},
},
@@ -632,7 +626,7 @@ var _ = ginkgo.Describe("ManifestWork Executor Subject", func() {
}, metav1.CreateOptions{})
gomega.Expect(err).ToNot(gomega.HaveOccurred())
work, err = hubWorkClient.WorkV1().ManifestWorks(commOptions.SpokeClusterName).Create(
work, err = hubWorkClient.WorkV1().ManifestWorks(clusterName).Create(
context.Background(), work, metav1.CreateOptions{})
gomega.Expect(err).ToNot(gomega.HaveOccurred())
@@ -648,16 +642,16 @@ var _ = ginkgo.Describe("ManifestWork Executor Subject", func() {
// ensure configmaps exist
util.AssertExistenceOfConfigMaps(
[]workapiv1.Manifest{
util.ToManifest(util.NewConfigmap(commOptions.SpokeClusterName, cm1, map[string]string{"a": "b"}, []string{})),
util.ToManifest(util.NewConfigmap(clusterName, cm1, map[string]string{"a": "b"}, []string{})),
}, spokeKubeClient, eventuallyTimeout, eventuallyInterval)
})
ginkgo.It("with permission for already exist resource", func() {
_, err = spokeKubeClient.RbacV1().Roles(commOptions.SpokeClusterName).Create(
_, err = spokeKubeClient.RbacV1().Roles(clusterName).Create(
context.TODO(), &rbacv1.Role{
ObjectMeta: metav1.ObjectMeta{
Name: roleName,
Namespace: commOptions.SpokeClusterName,
Namespace: clusterName,
},
Rules: []rbacv1.PolicyRule{
{
@@ -674,16 +668,16 @@ var _ = ginkgo.Describe("ManifestWork Executor Subject", func() {
},
}, metav1.CreateOptions{})
gomega.Expect(err).ToNot(gomega.HaveOccurred())
_, err = spokeKubeClient.RbacV1().RoleBindings(commOptions.SpokeClusterName).Create(
_, err = spokeKubeClient.RbacV1().RoleBindings(clusterName).Create(
context.TODO(), &rbacv1.RoleBinding{
ObjectMeta: metav1.ObjectMeta{
Name: roleName,
Namespace: commOptions.SpokeClusterName,
Namespace: clusterName,
},
Subjects: []rbacv1.Subject{
{
Kind: "ServiceAccount",
Namespace: commOptions.SpokeClusterName,
Namespace: clusterName,
Name: executorName,
},
},
@@ -696,11 +690,11 @@ var _ = ginkgo.Describe("ManifestWork Executor Subject", func() {
gomega.Expect(err).ToNot(gomega.HaveOccurred())
// make the role exist with lower permission
_, err = spokeKubeClient.RbacV1().Roles(commOptions.SpokeClusterName).Create(
_, err = spokeKubeClient.RbacV1().Roles(clusterName).Create(
context.TODO(), &rbacv1.Role{
ObjectMeta: metav1.ObjectMeta{
Name: "role-cm-creator",
Namespace: commOptions.SpokeClusterName,
Namespace: clusterName,
},
Rules: []rbacv1.PolicyRule{
{
@@ -712,7 +706,7 @@ var _ = ginkgo.Describe("ManifestWork Executor Subject", func() {
}, metav1.CreateOptions{})
gomega.Expect(err).ToNot(gomega.HaveOccurred())
work, err = hubWorkClient.WorkV1().ManifestWorks(commOptions.SpokeClusterName).Create(
work, err = hubWorkClient.WorkV1().ManifestWorks(clusterName).Create(
context.Background(), work, metav1.CreateOptions{})
gomega.Expect(err).ToNot(gomega.HaveOccurred())
@@ -728,7 +722,7 @@ var _ = ginkgo.Describe("ManifestWork Executor Subject", func() {
// ensure configmaps exist
util.AssertExistenceOfConfigMaps(
[]workapiv1.Manifest{
util.ToManifest(util.NewConfigmap(commOptions.SpokeClusterName, cm1, map[string]string{"a": "b"}, []string{})),
util.ToManifest(util.NewConfigmap(clusterName, cm1, map[string]string{"a": "b"}, []string{})),
}, spokeKubeClient, eventuallyTimeout, eventuallyInterval)
})
})
@@ -782,13 +776,13 @@ var _ = ginkgo.Describe("ManifestWork Executor Subject", func() {
}
ginkgo.BeforeEach(func() {
manifests = []workapiv1.Manifest{
util.ToManifest(util.NewConfigmap(commOptions.SpokeClusterName, cm1, map[string]string{"a": "b"}, []string{})),
util.ToManifest(util.NewConfigmap(clusterName, cm1, map[string]string{"a": "b"}, []string{})),
}
executor = &workapiv1.ManifestWorkExecutor{
Subject: workapiv1.ManifestWorkExecutorSubject{
Type: workapiv1.ExecutorSubjectTypeServiceAccount,
ServiceAccount: &workapiv1.ManifestWorkSubjectServiceAccount{
Namespace: commOptions.SpokeClusterName,
Namespace: clusterName,
Name: executorName,
},
},
@@ -796,7 +790,7 @@ var _ = ginkgo.Describe("ManifestWork Executor Subject", func() {
})
ginkgo.It("Permission change", func() {
work, err = hubWorkClient.WorkV1().ManifestWorks(commOptions.SpokeClusterName).Create(
work, err = hubWorkClient.WorkV1().ManifestWorks(clusterName).Create(
context.Background(), work, metav1.CreateOptions{})
gomega.Expect(err).ToNot(gomega.HaveOccurred())
@@ -810,8 +804,8 @@ var _ = ginkgo.Describe("ManifestWork Executor Subject", func() {
ginkgo.By("ensure configmaps do not exist")
util.AssertNonexistenceOfConfigMaps(manifests, spokeKubeClient, eventuallyTimeout, eventuallyInterval)
createRBAC(commOptions.SpokeClusterName, executorName)
addConfigMapToManifestWork(hubWorkClient, work.Name, commOptions.SpokeClusterName, cm2)
createRBAC(clusterName, executorName)
addConfigMapToManifestWork(hubWorkClient, work.Name, clusterName, cm2)
util.AssertWorkCondition(work.Namespace, work.Name, hubWorkClient, workapiv1.WorkApplied,
metav1.ConditionTrue, []metav1.ConditionStatus{metav1.ConditionTrue, metav1.ConditionTrue},
@@ -823,8 +817,8 @@ var _ = ginkgo.Describe("ManifestWork Executor Subject", func() {
ginkgo.By("ensure configmaps cm1 and cm2 exist")
util.AssertExistenceOfConfigMaps(manifests, spokeKubeClient, eventuallyTimeout, eventuallyInterval)
deleteRBAC(commOptions.SpokeClusterName)
addConfigMapToManifestWork(hubWorkClient, work.Name, commOptions.SpokeClusterName, "cm3")
deleteRBAC(clusterName)
addConfigMapToManifestWork(hubWorkClient, work.Name, clusterName, "cm3")
util.AssertWorkCondition(work.Namespace, work.Name, hubWorkClient, workapiv1.WorkApplied,
metav1.ConditionFalse, []metav1.ConditionStatus{metav1.ConditionFalse, metav1.ConditionFalse,
@@ -836,15 +830,15 @@ var _ = ginkgo.Describe("ManifestWork Executor Subject", func() {
ginkgo.By("ensure configmap cm1 cm2 exist(will not delete the applied resource even the permison is revoked) but cm3 does not exist")
util.AssertExistenceOfConfigMaps(
[]workapiv1.Manifest{
util.ToManifest(util.NewConfigmap(commOptions.SpokeClusterName, cm1, map[string]string{"a": "b"}, nil)),
util.ToManifest(util.NewConfigmap(clusterName, cm1, map[string]string{"a": "b"}, nil)),
}, spokeKubeClient, eventuallyTimeout, eventuallyInterval)
util.AssertExistenceOfConfigMaps(
[]workapiv1.Manifest{
util.ToManifest(util.NewConfigmap(commOptions.SpokeClusterName, cm2, map[string]string{"a": "b"}, nil)),
util.ToManifest(util.NewConfigmap(clusterName, cm2, map[string]string{"a": "b"}, nil)),
}, spokeKubeClient, eventuallyTimeout, eventuallyInterval)
util.AssertNonexistenceOfConfigMaps(
[]workapiv1.Manifest{
util.ToManifest(util.NewConfigmap(commOptions.SpokeClusterName, "cm3", map[string]string{"a": "b"}, nil)),
util.ToManifest(util.NewConfigmap(clusterName, "cm3", map[string]string{"a": "b"}, nil)),
}, spokeKubeClient, eventuallyTimeout, eventuallyInterval)
})
})

View File

@@ -3,7 +3,6 @@ package work
import (
"context"
"fmt"
"time"
"github.com/onsi/ginkgo/v2"
"github.com/onsi/gomega"
@@ -17,18 +16,15 @@ import (
ocmfeature "open-cluster-management.io/api/feature"
workapiv1 "open-cluster-management.io/api/work/v1"
commonoptions "open-cluster-management.io/ocm/pkg/common/options"
"open-cluster-management.io/ocm/pkg/features"
"open-cluster-management.io/ocm/pkg/work/spoke"
"open-cluster-management.io/ocm/test/integration/util"
)
var _ = ginkgo.Describe("ManifestWork Status Feedback", func() {
var o *spoke.WorkloadAgentOptions
var commOptions *commonoptions.AgentOptions
var cancel context.CancelFunc
var workName string
var clusterName string
var work *workapiv1.ManifestWork
var manifests []workapiv1.Manifest
@@ -36,22 +32,11 @@ var _ = ginkgo.Describe("ManifestWork Status Feedback", func() {
ginkgo.BeforeEach(func() {
workName = fmt.Sprintf("status-feedback-work-%s", rand.String(5))
clusterName := rand.String(5)
clusterName = rand.String(5)
o = spoke.NewWorkloadAgentOptions()
o.StatusSyncInterval = 3 * time.Second
o.WorkloadSourceDriver = sourceDriver
o.WorkloadSourceConfig = sourceConfigFileName
if sourceDriver != util.KubeDriver {
o.CloudEventsClientID = fmt.Sprintf("%s-work-agent", clusterName)
o.CloudEventsClientCodecs = []string{"manifestbundle"}
ns := &corev1.Namespace{
ObjectMeta: metav1.ObjectMeta{Name: clusterName},
}
commOptions = commonoptions.NewAgentOptions()
commOptions.SpokeClusterName = clusterName
ns := &corev1.Namespace{}
ns.Name = commOptions.SpokeClusterName
_, err = spokeKubeClient.CoreV1().Namespaces().Create(context.Background(), ns, metav1.CreateOptions{})
gomega.Expect(err).ToNot(gomega.HaveOccurred())
@@ -60,24 +45,24 @@ var _ = ginkgo.Describe("ManifestWork Status Feedback", func() {
})
ginkgo.JustBeforeEach(func() {
work = util.NewManifestWork(commOptions.SpokeClusterName, workName, manifests)
work = util.NewManifestWork(clusterName, workName, manifests)
gomega.Expect(err).ToNot(gomega.HaveOccurred())
})
ginkgo.AfterEach(func() {
err := spokeKubeClient.CoreV1().Namespaces().Delete(context.Background(), commOptions.SpokeClusterName, metav1.DeleteOptions{})
err := spokeKubeClient.CoreV1().Namespaces().Delete(context.Background(), clusterName, metav1.DeleteOptions{})
gomega.Expect(err).ToNot(gomega.HaveOccurred())
})
ginkgo.Context("Deployment Status feedback", func() {
ginkgo.BeforeEach(func() {
u, _, err := util.NewDeployment(commOptions.SpokeClusterName, "deploy1", "sa")
u, _, err := util.NewDeployment(clusterName, "deploy1", "sa")
gomega.Expect(err).ToNot(gomega.HaveOccurred())
manifests = append(manifests, util.ToManifest(u))
var ctx context.Context
ctx, cancel = context.WithCancel(context.Background())
go startWorkAgent(ctx, o, commOptions)
go startWorkAgent(ctx, clusterName)
})
ginkgo.AfterEach(func() {
@@ -92,7 +77,7 @@ var _ = ginkgo.Describe("ManifestWork Status Feedback", func() {
ResourceIdentifier: workapiv1.ResourceIdentifier{
Group: "apps",
Resource: "deployments",
Namespace: commOptions.SpokeClusterName,
Namespace: clusterName,
Name: "deploy1",
},
FeedbackRules: []workapiv1.FeedbackRule{
@@ -103,7 +88,7 @@ var _ = ginkgo.Describe("ManifestWork Status Feedback", func() {
},
}
work, err = hubWorkClient.WorkV1().ManifestWorks(commOptions.SpokeClusterName).Create(context.Background(), work, metav1.CreateOptions{})
work, err = hubWorkClient.WorkV1().ManifestWorks(clusterName).Create(context.Background(), work, metav1.CreateOptions{})
gomega.Expect(err).ToNot(gomega.HaveOccurred())
util.AssertWorkCondition(work.Namespace, work.Name, hubWorkClient, workapiv1.WorkApplied, metav1.ConditionTrue,
@@ -113,7 +98,7 @@ var _ = ginkgo.Describe("ManifestWork Status Feedback", func() {
// Update Deployment status on spoke
gomega.Eventually(func() error {
deploy, err := spokeKubeClient.AppsV1().Deployments(commOptions.SpokeClusterName).Get(context.Background(), "deploy1", metav1.GetOptions{})
deploy, err := spokeKubeClient.AppsV1().Deployments(clusterName).Get(context.Background(), "deploy1", metav1.GetOptions{})
if err != nil {
return err
}
@@ -122,13 +107,13 @@ var _ = ginkgo.Describe("ManifestWork Status Feedback", func() {
deploy.Status.Replicas = 3
deploy.Status.ReadyReplicas = 2
_, err = spokeKubeClient.AppsV1().Deployments(commOptions.SpokeClusterName).UpdateStatus(context.Background(), deploy, metav1.UpdateOptions{})
_, err = spokeKubeClient.AppsV1().Deployments(clusterName).UpdateStatus(context.Background(), deploy, metav1.UpdateOptions{})
return err
}, eventuallyTimeout, eventuallyInterval).ShouldNot(gomega.HaveOccurred())
// Check if we get status of deployment on work api
gomega.Eventually(func() error {
work, err = hubWorkClient.WorkV1().ManifestWorks(commOptions.SpokeClusterName).Get(context.Background(), work.Name, metav1.GetOptions{})
work, err = hubWorkClient.WorkV1().ManifestWorks(clusterName).Get(context.Background(), work.Name, metav1.GetOptions{})
if err != nil {
return err
}
@@ -175,7 +160,7 @@ var _ = ginkgo.Describe("ManifestWork Status Feedback", func() {
// Update replica of deployment
gomega.Eventually(func() error {
deploy, err := spokeKubeClient.AppsV1().Deployments(commOptions.SpokeClusterName).Get(context.Background(), "deploy1", metav1.GetOptions{})
deploy, err := spokeKubeClient.AppsV1().Deployments(clusterName).Get(context.Background(), "deploy1", metav1.GetOptions{})
if err != nil {
return err
}
@@ -184,13 +169,13 @@ var _ = ginkgo.Describe("ManifestWork Status Feedback", func() {
deploy.Status.Replicas = 3
deploy.Status.ReadyReplicas = 3
_, err = spokeKubeClient.AppsV1().Deployments(commOptions.SpokeClusterName).UpdateStatus(context.Background(), deploy, metav1.UpdateOptions{})
_, err = spokeKubeClient.AppsV1().Deployments(clusterName).UpdateStatus(context.Background(), deploy, metav1.UpdateOptions{})
return err
}, eventuallyTimeout, eventuallyInterval).ShouldNot(gomega.HaveOccurred())
// Check if the status of deployment is synced on work api
gomega.Eventually(func() error {
work, err = hubWorkClient.WorkV1().ManifestWorks(commOptions.SpokeClusterName).Get(context.Background(), work.Name, metav1.GetOptions{})
work, err = hubWorkClient.WorkV1().ManifestWorks(clusterName).Get(context.Background(), work.Name, metav1.GetOptions{})
if err != nil {
return err
}
@@ -263,7 +248,7 @@ var _ = ginkgo.Describe("ManifestWork Status Feedback", func() {
},
}
work, err = hubWorkClient.WorkV1().ManifestWorks(commOptions.SpokeClusterName).Create(context.Background(), work, metav1.CreateOptions{})
work, err = hubWorkClient.WorkV1().ManifestWorks(clusterName).Create(context.Background(), work, metav1.CreateOptions{})
gomega.Expect(err).ToNot(gomega.HaveOccurred())
util.AssertWorkCondition(work.Namespace, work.Name, hubWorkClient, workapiv1.WorkApplied, metav1.ConditionTrue,
@@ -272,7 +257,7 @@ var _ = ginkgo.Describe("ManifestWork Status Feedback", func() {
[]metav1.ConditionStatus{metav1.ConditionTrue}, eventuallyTimeout, eventuallyInterval)
gomega.Eventually(func() error {
deploy, err := spokeKubeClient.AppsV1().Deployments(commOptions.SpokeClusterName).Get(context.Background(), "deploy1", metav1.GetOptions{})
deploy, err := spokeKubeClient.AppsV1().Deployments(clusterName).Get(context.Background(), "deploy1", metav1.GetOptions{})
if err != nil {
return err
}
@@ -284,13 +269,13 @@ var _ = ginkgo.Describe("ManifestWork Status Feedback", func() {
},
}
_, err = spokeKubeClient.AppsV1().Deployments(commOptions.SpokeClusterName).UpdateStatus(context.Background(), deploy, metav1.UpdateOptions{})
_, err = spokeKubeClient.AppsV1().Deployments(clusterName).UpdateStatus(context.Background(), deploy, metav1.UpdateOptions{})
return err
}, eventuallyTimeout, eventuallyInterval).ShouldNot(gomega.HaveOccurred())
// Check if we get status of deployment on work api
gomega.Eventually(func() error {
work, err = hubWorkClient.WorkV1().ManifestWorks(commOptions.SpokeClusterName).Get(context.Background(), work.Name, metav1.GetOptions{})
work, err = hubWorkClient.WorkV1().ManifestWorks(clusterName).Get(context.Background(), work.Name, metav1.GetOptions{})
if err != nil {
return err
}
@@ -323,7 +308,7 @@ var _ = ginkgo.Describe("ManifestWork Status Feedback", func() {
})
ginkgo.It("should return none for resources with no wellknown status", func() {
sa, _ := util.NewServiceAccount(commOptions.SpokeClusterName, "sa")
sa, _ := util.NewServiceAccount(clusterName, "sa")
work.Spec.Workload.Manifests = append(work.Spec.Workload.Manifests, util.ToManifest(sa))
work.Spec.ManifestConfigs = []workapiv1.ManifestConfigOption{
@@ -331,7 +316,7 @@ var _ = ginkgo.Describe("ManifestWork Status Feedback", func() {
ResourceIdentifier: workapiv1.ResourceIdentifier{
Group: "apps",
Resource: "deployments",
Namespace: commOptions.SpokeClusterName,
Namespace: clusterName,
Name: "deploy1",
},
FeedbackRules: []workapiv1.FeedbackRule{
@@ -344,7 +329,7 @@ var _ = ginkgo.Describe("ManifestWork Status Feedback", func() {
ResourceIdentifier: workapiv1.ResourceIdentifier{
Group: "",
Resource: "serviceaccounts",
Namespace: commOptions.SpokeClusterName,
Namespace: clusterName,
Name: "sa",
},
FeedbackRules: []workapiv1.FeedbackRule{
@@ -355,7 +340,7 @@ var _ = ginkgo.Describe("ManifestWork Status Feedback", func() {
},
}
work, err = hubWorkClient.WorkV1().ManifestWorks(commOptions.SpokeClusterName).Create(context.Background(), work, metav1.CreateOptions{})
work, err = hubWorkClient.WorkV1().ManifestWorks(clusterName).Create(context.Background(), work, metav1.CreateOptions{})
gomega.Expect(err).ToNot(gomega.HaveOccurred())
util.AssertWorkCondition(work.Namespace, work.Name, hubWorkClient, workapiv1.WorkApplied, metav1.ConditionTrue,
@@ -365,7 +350,7 @@ var _ = ginkgo.Describe("ManifestWork Status Feedback", func() {
// Update Deployment status on spoke
gomega.Eventually(func() error {
deploy, err := spokeKubeClient.AppsV1().Deployments(commOptions.SpokeClusterName).Get(context.Background(), "deploy1", metav1.GetOptions{})
deploy, err := spokeKubeClient.AppsV1().Deployments(clusterName).Get(context.Background(), "deploy1", metav1.GetOptions{})
if err != nil {
return err
}
@@ -374,13 +359,13 @@ var _ = ginkgo.Describe("ManifestWork Status Feedback", func() {
deploy.Status.Replicas = 3
deploy.Status.ReadyReplicas = 2
_, err = spokeKubeClient.AppsV1().Deployments(commOptions.SpokeClusterName).UpdateStatus(context.Background(), deploy, metav1.UpdateOptions{})
_, err = spokeKubeClient.AppsV1().Deployments(clusterName).UpdateStatus(context.Background(), deploy, metav1.UpdateOptions{})
return err
}, eventuallyTimeout, eventuallyInterval).ShouldNot(gomega.HaveOccurred())
// Check if we get status of deployment on work api
gomega.Eventually(func() error {
work, err = hubWorkClient.WorkV1().ManifestWorks(commOptions.SpokeClusterName).Get(context.Background(), work.Name, metav1.GetOptions{})
work, err = hubWorkClient.WorkV1().ManifestWorks(clusterName).Get(context.Background(), work.Name, metav1.GetOptions{})
if err != nil {
return err
}
@@ -438,7 +423,7 @@ var _ = ginkgo.Describe("ManifestWork Status Feedback", func() {
ResourceIdentifier: workapiv1.ResourceIdentifier{
Group: "apps",
Resource: "deployments",
Namespace: commOptions.SpokeClusterName,
Namespace: clusterName,
Name: "deploy1",
},
FeedbackRules: []workapiv1.FeedbackRule{
@@ -455,7 +440,7 @@ var _ = ginkgo.Describe("ManifestWork Status Feedback", func() {
},
}
work, err = hubWorkClient.WorkV1().ManifestWorks(commOptions.SpokeClusterName).Create(context.Background(), work, metav1.CreateOptions{})
work, err = hubWorkClient.WorkV1().ManifestWorks(clusterName).Create(context.Background(), work, metav1.CreateOptions{})
gomega.Expect(err).ToNot(gomega.HaveOccurred())
util.AssertWorkCondition(work.Namespace, work.Name, hubWorkClient, workapiv1.WorkApplied, metav1.ConditionTrue,
@@ -467,15 +452,18 @@ var _ = ginkgo.Describe("ManifestWork Status Feedback", func() {
ginkgo.Context("Deployment Status feedback with RawJsonString enabled", func() {
ginkgo.BeforeEach(func() {
u, _, err := util.NewDeployment(commOptions.SpokeClusterName, "deploy1", "sa")
u, _, err := util.NewDeployment(clusterName, "deploy1", "sa")
gomega.Expect(err).ToNot(gomega.HaveOccurred())
manifests = append(manifests, util.ToManifest(u))
err = features.SpokeMutableFeatureGate.Set(fmt.Sprintf("%s=true", ocmfeature.RawFeedbackJsonString))
gomega.Expect(err).NotTo(gomega.HaveOccurred())
ginkgo.DeferCleanup(func() {
_ = features.SpokeMutableFeatureGate.Set(fmt.Sprintf("%s=false", ocmfeature.RawFeedbackJsonString))
})
var ctx context.Context
ctx, cancel = context.WithCancel(context.Background())
go startWorkAgent(ctx, o, commOptions)
go startWorkAgent(ctx, clusterName)
})
ginkgo.AfterEach(func() {
@@ -490,7 +478,7 @@ var _ = ginkgo.Describe("ManifestWork Status Feedback", func() {
ResourceIdentifier: workapiv1.ResourceIdentifier{
Group: "apps",
Resource: "deployments",
Namespace: commOptions.SpokeClusterName,
Namespace: clusterName,
Name: "deploy1",
},
FeedbackRules: []workapiv1.FeedbackRule{
@@ -507,7 +495,7 @@ var _ = ginkgo.Describe("ManifestWork Status Feedback", func() {
},
}
work, err = hubWorkClient.WorkV1().ManifestWorks(commOptions.SpokeClusterName).Create(context.Background(), work, metav1.CreateOptions{})
work, err = hubWorkClient.WorkV1().ManifestWorks(clusterName).Create(context.Background(), work, metav1.CreateOptions{})
gomega.Expect(err).ToNot(gomega.HaveOccurred())
util.AssertWorkCondition(work.Namespace, work.Name, hubWorkClient, workapiv1.WorkApplied, metav1.ConditionTrue,
@@ -516,7 +504,7 @@ var _ = ginkgo.Describe("ManifestWork Status Feedback", func() {
[]metav1.ConditionStatus{metav1.ConditionTrue}, eventuallyTimeout, eventuallyInterval)
gomega.Eventually(func() error {
deploy, err := spokeKubeClient.AppsV1().Deployments(commOptions.SpokeClusterName).Get(context.Background(), "deploy1", metav1.GetOptions{})
deploy, err := spokeKubeClient.AppsV1().Deployments(clusterName).Get(context.Background(), "deploy1", metav1.GetOptions{})
if err != nil {
return err
}
@@ -528,13 +516,13 @@ var _ = ginkgo.Describe("ManifestWork Status Feedback", func() {
},
}
_, err = spokeKubeClient.AppsV1().Deployments(commOptions.SpokeClusterName).UpdateStatus(context.Background(), deploy, metav1.UpdateOptions{})
_, err = spokeKubeClient.AppsV1().Deployments(clusterName).UpdateStatus(context.Background(), deploy, metav1.UpdateOptions{})
return err
}, eventuallyTimeout, eventuallyInterval).ShouldNot(gomega.HaveOccurred())
// Check if we get status of deployment on work api
gomega.Eventually(func() error {
work, err = hubWorkClient.WorkV1().ManifestWorks(commOptions.SpokeClusterName).Get(context.Background(), work.Name, metav1.GetOptions{})
work, err = hubWorkClient.WorkV1().ManifestWorks(clusterName).Get(context.Background(), work.Name, metav1.GetOptions{})
if err != nil {
return err
}
@@ -572,13 +560,13 @@ var _ = ginkgo.Describe("ManifestWork Status Feedback", func() {
ginkgo.Context("DaemonSet Status feedback", func() {
ginkgo.BeforeEach(func() {
u, _, err := util.NewDaesonSet(commOptions.SpokeClusterName, "ds1")
u, _, err := util.NewDaemonSet(clusterName, "ds1")
gomega.Expect(err).ToNot(gomega.HaveOccurred())
manifests = append(manifests, util.ToManifest(u))
var ctx context.Context
ctx, cancel = context.WithCancel(context.Background())
go startWorkAgent(ctx, o, commOptions)
go startWorkAgent(ctx, clusterName)
})
ginkgo.AfterEach(func() {
@@ -593,7 +581,7 @@ var _ = ginkgo.Describe("ManifestWork Status Feedback", func() {
ResourceIdentifier: workapiv1.ResourceIdentifier{
Group: "apps",
Resource: "daemonsets",
Namespace: commOptions.SpokeClusterName,
Namespace: clusterName,
Name: "ds1",
},
FeedbackRules: []workapiv1.FeedbackRule{
@@ -604,7 +592,7 @@ var _ = ginkgo.Describe("ManifestWork Status Feedback", func() {
},
}
work, err = hubWorkClient.WorkV1().ManifestWorks(commOptions.SpokeClusterName).
work, err = hubWorkClient.WorkV1().ManifestWorks(clusterName).
Create(context.Background(), work, metav1.CreateOptions{})
gomega.Expect(err).ToNot(gomega.HaveOccurred())
@@ -617,7 +605,7 @@ var _ = ginkgo.Describe("ManifestWork Status Feedback", func() {
// Update DaemonSet status on spoke
gomega.Eventually(func() error {
ds, err := spokeKubeClient.AppsV1().DaemonSets(commOptions.SpokeClusterName).
ds, err := spokeKubeClient.AppsV1().DaemonSets(clusterName).
Get(context.Background(), "ds1", metav1.GetOptions{})
if err != nil {
return err
@@ -627,14 +615,14 @@ var _ = ginkgo.Describe("ManifestWork Status Feedback", func() {
ds.Status.DesiredNumberScheduled = 3
ds.Status.NumberReady = 2
_, err = spokeKubeClient.AppsV1().DaemonSets(commOptions.SpokeClusterName).
_, err = spokeKubeClient.AppsV1().DaemonSets(clusterName).
UpdateStatus(context.Background(), ds, metav1.UpdateOptions{})
return err
}, eventuallyTimeout, eventuallyInterval).ShouldNot(gomega.HaveOccurred())
// Check if we get status of daemonset on work api
gomega.Eventually(func() error {
work, err = hubWorkClient.WorkV1().ManifestWorks(commOptions.SpokeClusterName).
work, err = hubWorkClient.WorkV1().ManifestWorks(clusterName).
Get(context.Background(), work.Name, metav1.GetOptions{})
if err != nil {
return err
@@ -684,7 +672,7 @@ var _ = ginkgo.Describe("ManifestWork Status Feedback", func() {
// Update replica of deployment
gomega.Eventually(func() error {
ds, err := spokeKubeClient.AppsV1().DaemonSets(commOptions.SpokeClusterName).
ds, err := spokeKubeClient.AppsV1().DaemonSets(clusterName).
Get(context.Background(), "ds1", metav1.GetOptions{})
if err != nil {
return err
@@ -694,14 +682,14 @@ var _ = ginkgo.Describe("ManifestWork Status Feedback", func() {
ds.Status.DesiredNumberScheduled = 3
ds.Status.NumberReady = 3
_, err = spokeKubeClient.AppsV1().DaemonSets(commOptions.SpokeClusterName).
_, err = spokeKubeClient.AppsV1().DaemonSets(clusterName).
UpdateStatus(context.Background(), ds, metav1.UpdateOptions{})
return err
}, eventuallyTimeout, eventuallyInterval).ShouldNot(gomega.HaveOccurred())
// Check if the status of the daemonset is synced on work api
gomega.Eventually(func() error {
work, err = hubWorkClient.WorkV1().ManifestWorks(commOptions.SpokeClusterName).
work, err = hubWorkClient.WorkV1().ManifestWorks(clusterName).
Get(context.Background(), work.Name, metav1.GetOptions{})
if err != nil {
return err
@@ -753,10 +741,10 @@ var _ = ginkgo.Describe("ManifestWork Status Feedback", func() {
ginkgo.Context("Deployments Status feedback with wildcard", func() {
ginkgo.BeforeEach(func() {
deployment1, _, err := util.NewDeployment(commOptions.SpokeClusterName, "deploy1", "sa")
deployment1, _, err := util.NewDeployment(clusterName, "deploy1", "sa")
gomega.Expect(err).ToNot(gomega.HaveOccurred())
manifests = append(manifests, util.ToManifest(deployment1))
deployment2, _, err := util.NewDeployment(commOptions.SpokeClusterName, "deploy2", "sa")
deployment2, _, err := util.NewDeployment(clusterName, "deploy2", "sa")
gomega.Expect(err).ToNot(gomega.HaveOccurred())
manifests = append(manifests, util.ToManifest(deployment2))
@@ -764,7 +752,7 @@ var _ = ginkgo.Describe("ManifestWork Status Feedback", func() {
gomega.Expect(err).NotTo(gomega.HaveOccurred())
var ctx context.Context
ctx, cancel = context.WithCancel(context.Background())
go startWorkAgent(ctx, o, commOptions)
go startWorkAgent(ctx, clusterName)
})
ginkgo.AfterEach(func() {
@@ -796,7 +784,7 @@ var _ = ginkgo.Describe("ManifestWork Status Feedback", func() {
},
}
work, err = hubWorkClient.WorkV1().ManifestWorks(commOptions.SpokeClusterName).Create(context.Background(), work, metav1.CreateOptions{})
work, err = hubWorkClient.WorkV1().ManifestWorks(clusterName).Create(context.Background(), work, metav1.CreateOptions{})
gomega.Expect(err).ToNot(gomega.HaveOccurred())
util.AssertWorkCondition(work.Namespace, work.Name, hubWorkClient, workapiv1.WorkApplied, metav1.ConditionTrue,
@@ -806,7 +794,7 @@ var _ = ginkgo.Describe("ManifestWork Status Feedback", func() {
// Update Deployment status on spoke
gomega.Eventually(func() error {
deploy, err := spokeKubeClient.AppsV1().Deployments(commOptions.SpokeClusterName).Get(context.Background(), "deploy1", metav1.GetOptions{})
deploy, err := spokeKubeClient.AppsV1().Deployments(clusterName).Get(context.Background(), "deploy1", metav1.GetOptions{})
if err != nil {
return err
}
@@ -815,12 +803,12 @@ var _ = ginkgo.Describe("ManifestWork Status Feedback", func() {
deploy.Status.Replicas = 3
deploy.Status.ReadyReplicas = 3
_, err = spokeKubeClient.AppsV1().Deployments(commOptions.SpokeClusterName).UpdateStatus(context.Background(), deploy, metav1.UpdateOptions{})
_, err = spokeKubeClient.AppsV1().Deployments(clusterName).UpdateStatus(context.Background(), deploy, metav1.UpdateOptions{})
if err != nil {
return err
}
deploy, err = spokeKubeClient.AppsV1().Deployments(commOptions.SpokeClusterName).Get(context.Background(), "deploy2", metav1.GetOptions{})
deploy, err = spokeKubeClient.AppsV1().Deployments(clusterName).Get(context.Background(), "deploy2", metav1.GetOptions{})
if err != nil {
return err
}
@@ -829,13 +817,13 @@ var _ = ginkgo.Describe("ManifestWork Status Feedback", func() {
deploy.Status.Replicas = 4
deploy.Status.ReadyReplicas = 4
_, err = spokeKubeClient.AppsV1().Deployments(commOptions.SpokeClusterName).UpdateStatus(context.Background(), deploy, metav1.UpdateOptions{})
_, err = spokeKubeClient.AppsV1().Deployments(clusterName).UpdateStatus(context.Background(), deploy, metav1.UpdateOptions{})
return err
}, eventuallyTimeout, eventuallyInterval).ShouldNot(gomega.HaveOccurred())
// Check if we get status of deployment on work api
gomega.Eventually(func() error {
work, err = hubWorkClient.WorkV1().ManifestWorks(commOptions.SpokeClusterName).Get(context.Background(), work.Name, metav1.GetOptions{})
work, err = hubWorkClient.WorkV1().ManifestWorks(clusterName).Get(context.Background(), work.Name, metav1.GetOptions{})
if err != nil {
return err
}
@@ -929,7 +917,7 @@ var _ = ginkgo.Describe("ManifestWork Status Feedback", func() {
},
}
work, err = hubWorkClient.WorkV1().ManifestWorks(commOptions.SpokeClusterName).Create(context.Background(),
work, err = hubWorkClient.WorkV1().ManifestWorks(clusterName).Create(context.Background(),
work, metav1.CreateOptions{})
gomega.Expect(err).ToNot(gomega.HaveOccurred())
@@ -940,7 +928,7 @@ var _ = ginkgo.Describe("ManifestWork Status Feedback", func() {
// Update Deployment status on spoke
gomega.Eventually(func() error {
deploy, err := spokeKubeClient.AppsV1().Deployments(commOptions.SpokeClusterName).
deploy, err := spokeKubeClient.AppsV1().Deployments(clusterName).
Get(context.Background(), "deploy1", metav1.GetOptions{})
if err != nil {
return err
@@ -953,13 +941,13 @@ var _ = ginkgo.Describe("ManifestWork Status Feedback", func() {
},
}
_, err = spokeKubeClient.AppsV1().Deployments(commOptions.SpokeClusterName).
_, err = spokeKubeClient.AppsV1().Deployments(clusterName).
UpdateStatus(context.Background(), deploy, metav1.UpdateOptions{})
if err != nil {
return err
}
deploy, err = spokeKubeClient.AppsV1().Deployments(commOptions.SpokeClusterName).
deploy, err = spokeKubeClient.AppsV1().Deployments(clusterName).
Get(context.Background(), "deploy2", metav1.GetOptions{})
if err != nil {
return err
@@ -969,14 +957,14 @@ var _ = ginkgo.Describe("ManifestWork Status Feedback", func() {
deploy.Status.Replicas = 4
deploy.Status.ReadyReplicas = 4
_, err = spokeKubeClient.AppsV1().Deployments(commOptions.SpokeClusterName).
_, err = spokeKubeClient.AppsV1().Deployments(clusterName).
UpdateStatus(context.Background(), deploy, metav1.UpdateOptions{})
return err
}, eventuallyTimeout, eventuallyInterval).ShouldNot(gomega.HaveOccurred())
// Check if we get status of deployment on work api
gomega.Eventually(func() error {
work, err = hubWorkClient.WorkV1().ManifestWorks(commOptions.SpokeClusterName).
work, err = hubWorkClient.WorkV1().ManifestWorks(clusterName).
Get(context.Background(), work.Name, metav1.GetOptions{})
if err != nil {
return err

View File

@@ -4,9 +4,11 @@ import (
"context"
"flag"
"fmt"
"net"
"os"
"path"
"testing"
"time"
"github.com/onsi/ginkgo/v2"
"github.com/onsi/gomega"
@@ -35,10 +37,12 @@ import (
cemetrics "open-cluster-management.io/sdk-go/pkg/cloudevents/server/grpc/metrics"
sdkgrpc "open-cluster-management.io/sdk-go/pkg/server/grpc"
commonoptions "open-cluster-management.io/ocm/pkg/common/options"
"open-cluster-management.io/ocm/pkg/features"
serviceswork "open-cluster-management.io/ocm/pkg/server/services/work"
"open-cluster-management.io/ocm/pkg/work/helper"
"open-cluster-management.io/ocm/pkg/work/hub"
"open-cluster-management.io/ocm/pkg/work/spoke"
"open-cluster-management.io/ocm/test/integration/util"
)
@@ -164,31 +168,23 @@ var _ = ginkgo.BeforeSuite(func() {
hubWorkClient, err = workclientset.NewForConfig(cfg)
gomega.Expect(err).NotTo(gomega.HaveOccurred())
sourceConfigFileName = path.Join(tempDir, "grpcconfig")
gRPCURL, gRPCServerOptions, _, err := util.CreateGRPCConfigs(sourceConfigFileName)
gomega.Expect(err).NotTo(gomega.HaveOccurred())
hubHash = helper.HubHash(gRPCURL)
hook, err := util.NewGRPCServerWorkHook(cfg)
gomega.Expect(err).NotTo(gomega.HaveOccurred())
go hook.Run(envCtx)
grpcEventServer := cloudeventsgrpc.NewGRPCBroker()
grpcEventServer.RegisterService(payload.ManifestBundleEventDataType,
serviceswork.NewWorkService(hook.WorkClient, hook.WorkInformers.Work().V1().ManifestWorks()))
authorizer := util.NewMockAuthorizer()
server := sdkgrpc.NewGRPCServer(gRPCServerOptions).
WithUnaryAuthorizer(authorizer).
WithStreamAuthorizer(authorizer).
WithRegisterFunc(func(s *grpc.Server) {
pbv1.RegisterCloudEventServiceServer(s, grpcEventServer)
}).
WithExtraMetrics(cemetrics.CloudEventsGRPCMetrics()...)
sourceConfigFileName, hubHash = startGRPCServer(envCtx, tempDir, cfg)
// start hub controller
go func() {
err := server.Run(envCtx)
// in grpc driver, hub controller still calls hub kube-apiserver.
sourceKubeConfigFileName := path.Join(tempDir, "kubeconfig")
err = util.CreateKubeconfigFile(cfg, sourceKubeConfigFileName)
gomega.Expect(err).ToNot(gomega.HaveOccurred())
opts := hub.NewWorkHubManagerOptions()
opts.WorkDriver = "kube"
opts.WorkDriverConfig = sourceKubeConfigFileName
hubConfig := hub.NewWorkHubManagerConfig(opts)
err := hubConfig.RunWorkHubManager(envCtx, &controllercmd.ControllerContext{
KubeConfig: cfg,
EventRecorder: util.NewIntegrationTestEventRecorder("hub"),
})
gomega.Expect(err).NotTo(gomega.HaveOccurred())
}()
default:
@@ -222,3 +218,68 @@ var _ = ginkgo.AfterSuite(func() {
os.RemoveAll(tempDir)
}
})
type agentOptionsDecorator func(opt *spoke.WorkloadAgentOptions, commonOpt *commonoptions.AgentOptions) (
*spoke.WorkloadAgentOptions, *commonoptions.AgentOptions)
func startWorkAgent(ctx context.Context, clusterName string, decorators ...agentOptionsDecorator) {
o := spoke.NewWorkloadAgentOptions()
o.StatusSyncInterval = 3 * time.Second
o.WorkloadSourceDriver = sourceDriver
o.WorkloadSourceConfig = sourceConfigFileName
if sourceDriver != util.KubeDriver {
o.CloudEventsClientID = fmt.Sprintf("%s-work-agent", clusterName)
o.CloudEventsClientCodecs = []string{"manifestbundle"}
}
commOptions := commonoptions.NewAgentOptions()
commOptions.SpokeClusterName = clusterName
for _, decorator := range decorators {
o, commOptions = decorator(o, commOptions)
}
agentConfig := spoke.NewWorkAgentConfig(commOptions, o)
err := agentConfig.RunWorkloadAgent(ctx, &controllercmd.ControllerContext{
KubeConfig: spokeRestConfig,
EventRecorder: util.NewIntegrationTestEventRecorder("integration"),
})
gomega.Expect(err).NotTo(gomega.HaveOccurred())
}
func startGRPCServer(ctx context.Context, temp string, cfg *rest.Config) (string, string) {
ln, err := net.Listen("tcp", "127.0.0.1:0")
gomega.Expect(err).NotTo(gomega.HaveOccurred())
bindPort := fmt.Sprintf("%d", ln.Addr().(*net.TCPAddr).Port)
_ = ln.Close()
configFileName := path.Join(temp, "grpcconfig")
gRPCURL, gRPCServerOptions, _, err := util.CreateGRPCConfigs(configFileName, bindPort)
gomega.Expect(err).NotTo(gomega.HaveOccurred())
hubHash := helper.HubHash(gRPCURL)
hook, err := util.NewGRPCServerWorkHook(cfg)
gomega.Expect(err).NotTo(gomega.HaveOccurred())
go hook.Run(ctx)
grpcEventServer := cloudeventsgrpc.NewGRPCBroker()
grpcEventServer.RegisterService(payload.ManifestBundleEventDataType,
serviceswork.NewWorkService(hook.WorkClient, hook.WorkInformers.Work().V1().ManifestWorks()))
authorizer := util.NewMockAuthorizer()
server := sdkgrpc.NewGRPCServer(gRPCServerOptions).
WithUnaryAuthorizer(authorizer).
WithStreamAuthorizer(authorizer).
WithRegisterFunc(func(s *grpc.Server) {
pbv1.RegisterCloudEventServiceServer(s, grpcEventServer)
}).
WithExtraMetrics(cemetrics.CloudEventsGRPCMetrics()...)
go func() {
err := server.Run(ctx)
gomega.Expect(err).NotTo(gomega.HaveOccurred())
}()
return configFileName, hubHash
}

View File

@@ -25,41 +25,45 @@ import (
)
var _ = ginkgo.Describe("Unmanaged ApplieManifestWork", func() {
var o *spoke.WorkloadAgentOptions
var commOptions *commonoptions.AgentOptions
var cancel context.CancelFunc
var work *workapiv1.ManifestWork
var clusterName string
var agentID string
var manifests []workapiv1.Manifest
var appliedManifestWorkName string
var err error
var ns *corev1.Namespace
optionDecorator := func(agentID, sourceConfigFile string, gracePeriod time.Duration) agentOptionsDecorator {
return func(opt *spoke.WorkloadAgentOptions, commonOpt *commonoptions.AgentOptions) (
*spoke.WorkloadAgentOptions, *commonoptions.AgentOptions) {
opt.AppliedManifestWorkEvictionGracePeriod = gracePeriod
opt.WorkloadSourceConfig = sourceConfigFile
commonOpt.AgentID = agentID
return opt, commonOpt
}
}
ginkgo.BeforeEach(func() {
o = spoke.NewWorkloadAgentOptions()
o.StatusSyncInterval = 3 * time.Second
o.AppliedManifestWorkEvictionGracePeriod = 10 * time.Second
o.WorkloadSourceDriver = sourceDriver
o.WorkloadSourceConfig = sourceConfigFileName
clusterName = utilrand.String(5)
agentID = utilrand.String(5)
commOptions = commonoptions.NewAgentOptions()
commOptions.SpokeClusterName = utilrand.String(5)
commOptions.AgentID = utilrand.String(5)
ns = &corev1.Namespace{}
ns.Name = commOptions.SpokeClusterName
ns = &corev1.Namespace{
ObjectMeta: metav1.ObjectMeta{Name: clusterName},
}
_, err := spokeKubeClient.CoreV1().Namespaces().Create(context.Background(), ns, metav1.CreateOptions{})
gomega.Expect(err).ToNot(gomega.HaveOccurred())
var ctx context.Context
ctx, cancel = context.WithCancel(context.Background())
go startWorkAgent(ctx, o, commOptions)
go startWorkAgent(ctx, clusterName, optionDecorator(agentID, sourceConfigFileName, 10*time.Second))
manifests = []workapiv1.Manifest{
util.ToManifest(util.NewConfigmap(commOptions.SpokeClusterName, cm1, map[string]string{"a": "b"}, nil)),
util.ToManifest(util.NewConfigmap(clusterName, cm1, map[string]string{"a": "b"}, nil)),
}
work = util.NewManifestWork(commOptions.SpokeClusterName, "unmanaged-appliedwork", manifests)
_, err = hubWorkClient.WorkV1().ManifestWorks(commOptions.SpokeClusterName).Create(context.Background(), work, metav1.CreateOptions{})
work = util.NewManifestWork(clusterName, "unmanaged-appliedwork", manifests)
_, err = hubWorkClient.WorkV1().ManifestWorks(clusterName).Create(context.Background(), work, metav1.CreateOptions{})
gomega.Expect(err).ToNot(gomega.HaveOccurred())
appliedManifestWorkName = fmt.Sprintf("%s-%s", hubHash, work.Name)
@@ -69,16 +73,19 @@ var _ = ginkgo.Describe("Unmanaged ApplieManifestWork", func() {
if cancel != nil {
cancel()
}
err := spokeKubeClient.CoreV1().Namespaces().Delete(context.Background(), commOptions.SpokeClusterName, metav1.DeleteOptions{})
err := spokeKubeClient.CoreV1().Namespaces().Delete(context.Background(), clusterName, metav1.DeleteOptions{})
gomega.Expect(err).ToNot(gomega.HaveOccurred())
})
ginkgo.Context("Should delete unmanaged applied work when hub changed", func() {
var newHubKubeConfigFile string
var newSourceConfigFile string
var newKubeClient kubernetes.Interface
var newWorkClient workclientset.Interface
var newAgentID string
var newHub *envtest.Environment
var newHubTempDir string
var grpcCtx context.Context
var grpcCancel context.CancelFunc
ginkgo.BeforeEach(func() {
// start another hub
@@ -87,15 +94,24 @@ var _ = ginkgo.Describe("Unmanaged ApplieManifestWork", func() {
CRDDirectoryPaths: CRDPaths,
}
newAgentID = utilrand.String(5)
newCfg, err := newHub.Start()
gomega.Expect(err).ToNot(gomega.HaveOccurred())
newHubTempDir, err = os.MkdirTemp("", "unmanaged_work_test")
gomega.Expect(err).ToNot(gomega.HaveOccurred())
newHubKubeConfigFile = path.Join(newHubTempDir, "kubeconfig")
err = util.CreateKubeconfigFile(newCfg, newHubKubeConfigFile)
gomega.Expect(err).ToNot(gomega.HaveOccurred())
gomega.Expect(sourceDriver).Should(gomega.Or(gomega.Equal("grpc"), gomega.Equal("kube")))
grpcCtx, grpcCancel = context.WithCancel(context.Background())
if sourceDriver == "grpc" {
newSourceConfigFile, _ = startGRPCServer(grpcCtx, newHubTempDir, newCfg)
} else if sourceDriver == "kube" {
newSourceConfigFile = path.Join(newHubTempDir, "kubeconfig")
err = util.CreateKubeconfigFile(newCfg, newSourceConfigFile)
gomega.Expect(err).ToNot(gomega.HaveOccurred())
}
newKubeClient, err = kubernetes.NewForConfig(newCfg)
gomega.Expect(err).ToNot(gomega.HaveOccurred())
@@ -108,6 +124,7 @@ var _ = ginkgo.Describe("Unmanaged ApplieManifestWork", func() {
})
ginkgo.AfterEach(func() {
grpcCancel()
if cancel != nil {
cancel()
}
@@ -132,21 +149,12 @@ var _ = ginkgo.Describe("Unmanaged ApplieManifestWork", func() {
cancel()
}
newOption := spoke.NewWorkloadAgentOptions()
newOption.AppliedManifestWorkEvictionGracePeriod = 5 * time.Second
newOption.WorkloadSourceDriver = sourceDriver
newOption.WorkloadSourceConfig = newHubKubeConfigFile
newCommonOptions := commonoptions.NewAgentOptions()
newCommonOptions.SpokeClusterName = commOptions.SpokeClusterName
newCommonOptions.AgentID = utilrand.String(5)
var ctx context.Context
ctx, cancel = context.WithCancel(context.Background())
go startWorkAgent(ctx, newOption, newCommonOptions)
go startWorkAgent(ctx, clusterName, optionDecorator(newAgentID, newSourceConfigFile, 5*time.Second))
// Create the same manifestwork with the same name on new hub.
work, err = newWorkClient.WorkV1().ManifestWorks(commOptions.SpokeClusterName).Create(context.Background(), work, metav1.CreateOptions{})
work, err = newWorkClient.WorkV1().ManifestWorks(clusterName).Create(context.Background(), work, metav1.CreateOptions{})
gomega.Expect(err).ToNot(gomega.HaveOccurred())
util.AssertWorkCondition(work.Namespace, work.Name, newWorkClient, workapiv1.WorkApplied, metav1.ConditionTrue,
@@ -156,7 +164,7 @@ var _ = ginkgo.Describe("Unmanaged ApplieManifestWork", func() {
// ensure the resource has two ownerrefs
gomega.Eventually(func() error {
cm, err := spokeKubeClient.CoreV1().ConfigMaps(commOptions.SpokeClusterName).Get(context.TODO(), cm1, metav1.GetOptions{})
cm, err := spokeKubeClient.CoreV1().ConfigMaps(clusterName).Get(context.TODO(), cm1, metav1.GetOptions{})
if err != nil {
return err
}
@@ -180,21 +188,21 @@ var _ = ginkgo.Describe("Unmanaged ApplieManifestWork", func() {
cancel()
}
newOption := spoke.NewWorkloadAgentOptions()
newOption.AppliedManifestWorkEvictionGracePeriod = 5 * time.Second
newOption.WorkloadSourceDriver = sourceDriver
newOption.WorkloadSourceConfig = newHubKubeConfigFile
newCommonOptions := commonoptions.NewAgentOptions()
newCommonOptions.SpokeClusterName = commOptions.SpokeClusterName
newCommonOptions.AgentID = commOptions.AgentID
newOptDecorator := func(opt *spoke.WorkloadAgentOptions, commonOpt *commonoptions.AgentOptions) (
*spoke.WorkloadAgentOptions, *commonoptions.AgentOptions) {
opt.WorkloadSourceConfig = newSourceConfigFile
opt.AppliedManifestWorkEvictionGracePeriod = 5 * time.Second
commonOpt.SpokeClusterName = clusterName
commonOpt.AgentID = agentID
return opt, commonOpt
}
var ctx context.Context
ctx, cancel = context.WithCancel(context.Background())
go startWorkAgent(ctx, newOption, newCommonOptions)
go startWorkAgent(ctx, clusterName, newOptDecorator)
// Create the same manifestwork with the same name.
work, err = newWorkClient.WorkV1().ManifestWorks(commOptions.SpokeClusterName).Create(context.Background(), work, metav1.CreateOptions{})
work, err = newWorkClient.WorkV1().ManifestWorks(clusterName).Create(context.Background(), work, metav1.CreateOptions{})
gomega.Expect(err).ToNot(gomega.HaveOccurred())
util.AssertWorkCondition(work.Namespace, work.Name, newWorkClient, workapiv1.WorkApplied, metav1.ConditionTrue,
@@ -216,7 +224,7 @@ var _ = ginkgo.Describe("Unmanaged ApplieManifestWork", func() {
// ensure the resource has only one ownerref
gomega.Eventually(func() error {
cm, err := spokeKubeClient.CoreV1().ConfigMaps(commOptions.SpokeClusterName).Get(context.TODO(), cm1, metav1.GetOptions{})
cm, err := spokeKubeClient.CoreV1().ConfigMaps(clusterName).Get(context.TODO(), cm1, metav1.GetOptions{})
if err != nil {
return err
}
@@ -244,7 +252,7 @@ var _ = ginkgo.Describe("Unmanaged ApplieManifestWork", func() {
cancel()
}
// fore delete the work from hub
// force delete the work from hub
err := forceDeleteManifestWork(context.TODO(), work)
gomega.Expect(err).ToNot(gomega.HaveOccurred())
})
@@ -253,7 +261,7 @@ var _ = ginkgo.Describe("Unmanaged ApplieManifestWork", func() {
// restart the work agent
var ctx context.Context
ctx, cancel = context.WithCancel(context.Background())
go startWorkAgent(ctx, o, commOptions)
go startWorkAgent(ctx, clusterName, optionDecorator(agentID, sourceConfigFileName, 10*time.Second))
// ensure the manifestwork is removed.
gomega.Eventually(func() error {
@@ -272,10 +280,10 @@ var _ = ginkgo.Describe("Unmanaged ApplieManifestWork", func() {
// restart the work agent
var ctx context.Context
ctx, cancel = context.WithCancel(context.Background())
go startWorkAgent(ctx, o, commOptions)
go startWorkAgent(ctx, clusterName, optionDecorator(agentID, sourceConfigFileName, 10*time.Second))
// recreate the work on the hub
_, err = hubWorkClient.WorkV1().ManifestWorks(commOptions.SpokeClusterName).Create(context.Background(), work, metav1.CreateOptions{})
_, err = hubWorkClient.WorkV1().ManifestWorks(clusterName).Create(context.Background(), work, metav1.CreateOptions{})
gomega.Expect(err).ToNot(gomega.HaveOccurred())
// ensure the appliemanifestwork eviction is stopped
@@ -316,11 +324,7 @@ var _ = ginkgo.Describe("Unmanaged ApplieManifestWork", func() {
// restart the work agent
var ctx context.Context
ctx, cancel = context.WithCancel(context.Background())
newOption := spoke.NewWorkloadAgentOptions()
newOption.AppliedManifestWorkEvictionGracePeriod = 5 * time.Minute
newOption.WorkloadSourceDriver = sourceDriver
newOption.WorkloadSourceConfig = sourceConfigFileName
go startWorkAgent(ctx, newOption, commOptions)
go startWorkAgent(ctx, clusterName, optionDecorator(agentID, sourceConfigFileName, 5*time.Second))
// ensure the eviction of the applied manifestwork starts
gomega.Eventually(func() error {
@@ -343,8 +347,7 @@ var _ = ginkgo.Describe("Unmanaged ApplieManifestWork", func() {
// restart the work agent with a really large eviction grace period
ctx, cancel = context.WithCancel(context.Background())
newOption.AppliedManifestWorkEvictionGracePeriod = 100 * 365 * 24 * time.Hour
go startWorkAgent(ctx, newOption, commOptions)
go startWorkAgent(ctx, clusterName, optionDecorator(agentID, sourceConfigFileName, 100*365*24*time.Hour))
// ensure the eviction of the applied manifestwork stops
gomega.Eventually(func() error {

View File

@@ -3,7 +3,6 @@ package work
import (
"context"
"fmt"
"time"
"github.com/onsi/ginkgo/v2"
"github.com/onsi/gomega"
@@ -18,60 +17,46 @@ import (
workapiv1 "open-cluster-management.io/api/work/v1"
commonoptions "open-cluster-management.io/ocm/pkg/common/options"
"open-cluster-management.io/ocm/pkg/work/spoke"
"open-cluster-management.io/ocm/test/integration/util"
)
var _ = ginkgo.Describe("ManifestWork Update Strategy", func() {
var o *spoke.WorkloadAgentOptions
var commOptions *commonoptions.AgentOptions
var cancel context.CancelFunc
var workName string
var clusterName string
var work *workapiv1.ManifestWork
var manifests []workapiv1.Manifest
var err error
ginkgo.BeforeEach(func() {
clusterName := rand.String(5)
clusterName = rand.String(5)
workName = fmt.Sprintf("update-strategy-work-%s", rand.String(5))
o = spoke.NewWorkloadAgentOptions()
o.StatusSyncInterval = 3 * time.Second
o.WorkloadSourceDriver = sourceDriver
o.WorkloadSourceConfig = sourceConfigFileName
if sourceDriver != util.KubeDriver {
o.CloudEventsClientID = fmt.Sprintf("%s-work-agent", clusterName)
o.CloudEventsClientCodecs = []string{"manifestbundle"}
ns := &corev1.Namespace{
ObjectMeta: metav1.ObjectMeta{Name: clusterName},
}
commOptions = commonoptions.NewAgentOptions()
commOptions.SpokeClusterName = clusterName
ns := &corev1.Namespace{}
ns.Name = commOptions.SpokeClusterName
_, err := spokeKubeClient.CoreV1().Namespaces().Create(context.Background(), ns, metav1.CreateOptions{})
gomega.Expect(err).ToNot(gomega.HaveOccurred())
var ctx context.Context
ctx, cancel = context.WithCancel(context.Background())
go startWorkAgent(ctx, o, commOptions)
go startWorkAgent(ctx, clusterName)
// reset manifests
manifests = nil
})
ginkgo.JustBeforeEach(func() {
work = util.NewManifestWork(commOptions.SpokeClusterName, workName, manifests)
work = util.NewManifestWork(clusterName, workName, manifests)
})
ginkgo.AfterEach(func() {
if cancel != nil {
cancel()
}
err := spokeKubeClient.CoreV1().Namespaces().Delete(context.Background(), commOptions.SpokeClusterName, metav1.DeleteOptions{})
err := spokeKubeClient.CoreV1().Namespaces().Delete(context.Background(), clusterName, metav1.DeleteOptions{})
gomega.Expect(err).ToNot(gomega.HaveOccurred())
})
@@ -79,7 +64,7 @@ var _ = ginkgo.Describe("ManifestWork Update Strategy", func() {
var object *unstructured.Unstructured
ginkgo.BeforeEach(func() {
object, _, err = util.NewDeployment(commOptions.SpokeClusterName, "deploy1", "sa")
object, _, err = util.NewDeployment(clusterName, "deploy1", "sa")
gomega.Expect(err).ToNot(gomega.HaveOccurred())
manifests = append(manifests, util.ToManifest(object))
})
@@ -90,7 +75,7 @@ var _ = ginkgo.Describe("ManifestWork Update Strategy", func() {
ResourceIdentifier: workapiv1.ResourceIdentifier{
Group: "apps",
Resource: "deployments",
Namespace: commOptions.SpokeClusterName,
Namespace: clusterName,
Name: "deploy1",
},
UpdateStrategy: &workapiv1.UpdateStrategy{
@@ -99,7 +84,7 @@ var _ = ginkgo.Describe("ManifestWork Update Strategy", func() {
},
}
work, err = hubWorkClient.WorkV1().ManifestWorks(commOptions.SpokeClusterName).Create(context.Background(), work, metav1.CreateOptions{})
work, err = hubWorkClient.WorkV1().ManifestWorks(clusterName).Create(context.Background(), work, metav1.CreateOptions{})
gomega.Expect(err).ToNot(gomega.HaveOccurred())
util.AssertWorkCondition(work.Namespace, work.Name, hubWorkClient, workapiv1.WorkApplied, metav1.ConditionTrue,
@@ -109,7 +94,7 @@ var _ = ginkgo.Describe("ManifestWork Update Strategy", func() {
err = unstructured.SetNestedField(object.Object, int64(3), "spec", "replicas")
gomega.Expect(err).ToNot(gomega.HaveOccurred())
gomega.Eventually(func() error {
updatedWork, err := hubWorkClient.WorkV1().ManifestWorks(commOptions.SpokeClusterName).Get(context.Background(), work.Name, metav1.GetOptions{})
updatedWork, err := hubWorkClient.WorkV1().ManifestWorks(clusterName).Get(context.Background(), work.Name, metav1.GetOptions{})
if err != nil {
return err
}
@@ -122,7 +107,7 @@ var _ = ginkgo.Describe("ManifestWork Update Strategy", func() {
return err
}
_, err = hubWorkClient.WorkV1().ManifestWorks(commOptions.SpokeClusterName).Patch(
_, err = hubWorkClient.WorkV1().ManifestWorks(clusterName).Patch(
context.Background(), updatedWork.Name, types.MergePatchType, pathBytes, metav1.PatchOptions{})
return err
}, eventuallyTimeout, eventuallyInterval).ShouldNot(gomega.HaveOccurred())
@@ -131,7 +116,7 @@ var _ = ginkgo.Describe("ManifestWork Update Strategy", func() {
[]metav1.ConditionStatus{metav1.ConditionTrue}, eventuallyTimeout, eventuallyInterval)
gomega.Eventually(func() error {
deploy, err := spokeKubeClient.AppsV1().Deployments(commOptions.SpokeClusterName).Get(context.Background(), "deploy1", metav1.GetOptions{})
deploy, err := spokeKubeClient.AppsV1().Deployments(clusterName).Get(context.Background(), "deploy1", metav1.GetOptions{})
if err != nil {
return err
}
@@ -148,14 +133,14 @@ var _ = ginkgo.Describe("ManifestWork Update Strategy", func() {
ginkgo.Context("Read only strategy", func() {
var cm *corev1.ConfigMap
ginkgo.BeforeEach(func() {
cm = util.NewConfigmap(commOptions.SpokeClusterName, "cm1", map[string]string{"test": "testdata"}, []string{})
_, err := spokeKubeClient.CoreV1().ConfigMaps(commOptions.SpokeClusterName).Create(context.TODO(), cm, metav1.CreateOptions{})
cm = util.NewConfigmap(clusterName, "cm1", map[string]string{"test": "testdata"}, []string{})
_, err := spokeKubeClient.CoreV1().ConfigMaps(clusterName).Create(context.TODO(), cm, metav1.CreateOptions{})
gomega.Expect(err).ToNot(gomega.HaveOccurred())
manifests = append(manifests, util.ToManifest(cm))
})
ginkgo.AfterEach(func() {
err := spokeKubeClient.CoreV1().ConfigMaps(commOptions.SpokeClusterName).Delete(context.TODO(), "cm1", metav1.DeleteOptions{})
err := spokeKubeClient.CoreV1().ConfigMaps(clusterName).Delete(context.TODO(), "cm1", metav1.DeleteOptions{})
gomega.Expect(err).ToNot(gomega.HaveOccurred())
})
@@ -164,7 +149,7 @@ var _ = ginkgo.Describe("ManifestWork Update Strategy", func() {
{
ResourceIdentifier: workapiv1.ResourceIdentifier{
Resource: "configmaps",
Namespace: commOptions.SpokeClusterName,
Namespace: clusterName,
Name: "cm1",
},
UpdateStrategy: &workapiv1.UpdateStrategy{
@@ -184,7 +169,7 @@ var _ = ginkgo.Describe("ManifestWork Update Strategy", func() {
},
}
work, err = hubWorkClient.WorkV1().ManifestWorks(commOptions.SpokeClusterName).Create(context.Background(), work, metav1.CreateOptions{})
work, err = hubWorkClient.WorkV1().ManifestWorks(clusterName).Create(context.Background(), work, metav1.CreateOptions{})
gomega.Expect(err).ToNot(gomega.HaveOccurred())
util.AssertWorkCondition(work.Namespace, work.Name, hubWorkClient, workapiv1.WorkApplied, metav1.ConditionTrue,
@@ -192,7 +177,7 @@ var _ = ginkgo.Describe("ManifestWork Update Strategy", func() {
ginkgo.By("get configmap values from the work")
gomega.Eventually(func() error {
work, err = hubWorkClient.WorkV1().ManifestWorks(commOptions.SpokeClusterName).Get(context.Background(), work.Name, metav1.GetOptions{})
work, err = hubWorkClient.WorkV1().ManifestWorks(clusterName).Get(context.Background(), work.Name, metav1.GetOptions{})
if err != nil {
return err
}
@@ -221,20 +206,20 @@ var _ = ginkgo.Describe("ManifestWork Update Strategy", func() {
ginkgo.By("update configmap")
gomega.Eventually(func() error {
cm, err := spokeKubeClient.CoreV1().ConfigMaps(commOptions.SpokeClusterName).Get(context.Background(), "cm1", metav1.GetOptions{})
cm, err := spokeKubeClient.CoreV1().ConfigMaps(clusterName).Get(context.Background(), "cm1", metav1.GetOptions{})
if err != nil {
return err
}
cm.Data["test"] = "testdata-updated"
_, err = spokeKubeClient.CoreV1().ConfigMaps(commOptions.SpokeClusterName).Update(context.Background(), cm, metav1.UpdateOptions{})
_, err = spokeKubeClient.CoreV1().ConfigMaps(clusterName).Update(context.Background(), cm, metav1.UpdateOptions{})
return err
}, eventuallyTimeout, eventuallyInterval).ShouldNot(gomega.HaveOccurred())
ginkgo.By("get updated configmap values from the work")
gomega.Eventually(func() error {
work, err = hubWorkClient.WorkV1().ManifestWorks(commOptions.SpokeClusterName).Get(context.Background(), work.Name, metav1.GetOptions{})
work, err = hubWorkClient.WorkV1().ManifestWorks(clusterName).Get(context.Background(), work.Name, metav1.GetOptions{})
if err != nil {
return err
}
@@ -267,7 +252,7 @@ var _ = ginkgo.Describe("ManifestWork Update Strategy", func() {
var object *unstructured.Unstructured
ginkgo.BeforeEach(func() {
object, _, err = util.NewDeployment(commOptions.SpokeClusterName, "deploy1", "sa")
object, _, err = util.NewDeployment(clusterName, "deploy1", "sa")
gomega.Expect(err).ToNot(gomega.HaveOccurred())
manifests = append(manifests, util.ToManifest(object))
})
@@ -278,7 +263,7 @@ var _ = ginkgo.Describe("ManifestWork Update Strategy", func() {
ResourceIdentifier: workapiv1.ResourceIdentifier{
Group: "apps",
Resource: "deployments",
Namespace: commOptions.SpokeClusterName,
Namespace: clusterName,
Name: "deploy1",
},
UpdateStrategy: &workapiv1.UpdateStrategy{
@@ -287,7 +272,7 @@ var _ = ginkgo.Describe("ManifestWork Update Strategy", func() {
},
}
work, err = hubWorkClient.WorkV1().ManifestWorks(commOptions.SpokeClusterName).Create(context.Background(), work, metav1.CreateOptions{})
work, err = hubWorkClient.WorkV1().ManifestWorks(clusterName).Create(context.Background(), work, metav1.CreateOptions{})
gomega.Expect(err).ToNot(gomega.HaveOccurred())
util.AssertWorkCondition(work.Namespace, work.Name, hubWorkClient, workapiv1.WorkApplied, metav1.ConditionTrue,
@@ -296,7 +281,7 @@ var _ = ginkgo.Describe("ManifestWork Update Strategy", func() {
// update work
err = unstructured.SetNestedField(object.Object, int64(3), "spec", "replicas")
gomega.Eventually(func() error {
updatedWork, err := hubWorkClient.WorkV1().ManifestWorks(commOptions.SpokeClusterName).Get(context.Background(), work.Name, metav1.GetOptions{})
updatedWork, err := hubWorkClient.WorkV1().ManifestWorks(clusterName).Get(context.Background(), work.Name, metav1.GetOptions{})
if err != nil {
return err
}
@@ -309,13 +294,13 @@ var _ = ginkgo.Describe("ManifestWork Update Strategy", func() {
return err
}
_, err = hubWorkClient.WorkV1().ManifestWorks(commOptions.SpokeClusterName).Patch(
_, err = hubWorkClient.WorkV1().ManifestWorks(clusterName).Patch(
context.Background(), updatedWork.Name, types.MergePatchType, pathBytes, metav1.PatchOptions{})
return err
}, eventuallyTimeout, eventuallyInterval).ShouldNot(gomega.HaveOccurred())
gomega.Eventually(func() error {
deploy, err := spokeKubeClient.AppsV1().Deployments(commOptions.SpokeClusterName).Get(context.Background(), "deploy1", metav1.GetOptions{})
deploy, err := spokeKubeClient.AppsV1().Deployments(clusterName).Get(context.Background(), "deploy1", metav1.GetOptions{})
if err != nil {
return err
}
@@ -334,7 +319,7 @@ var _ = ginkgo.Describe("ManifestWork Update Strategy", func() {
ResourceIdentifier: workapiv1.ResourceIdentifier{
Group: "apps",
Resource: "deployments",
Namespace: commOptions.SpokeClusterName,
Namespace: clusterName,
Name: "deploy1",
},
UpdateStrategy: &workapiv1.UpdateStrategy{
@@ -343,7 +328,7 @@ var _ = ginkgo.Describe("ManifestWork Update Strategy", func() {
},
}
work, err = hubWorkClient.WorkV1().ManifestWorks(commOptions.SpokeClusterName).Create(context.Background(), work, metav1.CreateOptions{})
work, err = hubWorkClient.WorkV1().ManifestWorks(clusterName).Create(context.Background(), work, metav1.CreateOptions{})
gomega.Expect(err).ToNot(gomega.HaveOccurred())
util.AssertWorkCondition(work.Namespace, work.Name, hubWorkClient, workapiv1.WorkApplied, metav1.ConditionTrue,
@@ -354,7 +339,7 @@ var _ = ginkgo.Describe("ManifestWork Update Strategy", func() {
gomega.Expect(err).ToNot(gomega.HaveOccurred())
patch, err := object.MarshalJSON()
gomega.Expect(err).ToNot(gomega.HaveOccurred())
_, err = spokeKubeClient.AppsV1().Deployments(commOptions.SpokeClusterName).Patch(
_, err = spokeKubeClient.AppsV1().Deployments(clusterName).Patch(
context.Background(), "deploy1", types.ApplyPatchType, patch, metav1.PatchOptions{Force: ptr.To[bool](true), FieldManager: "test-integration"})
gomega.Expect(err).ToNot(gomega.HaveOccurred())
@@ -362,7 +347,7 @@ var _ = ginkgo.Describe("ManifestWork Update Strategy", func() {
err = unstructured.SetNestedField(object.Object, int64(3), "spec", "replicas")
gomega.Expect(err).ToNot(gomega.HaveOccurred())
gomega.Eventually(func() error {
updatedWork, err := hubWorkClient.WorkV1().ManifestWorks(commOptions.SpokeClusterName).Get(context.Background(), work.Name, metav1.GetOptions{})
updatedWork, err := hubWorkClient.WorkV1().ManifestWorks(clusterName).Get(context.Background(), work.Name, metav1.GetOptions{})
if err != nil {
return err
}
@@ -375,7 +360,7 @@ var _ = ginkgo.Describe("ManifestWork Update Strategy", func() {
return err
}
_, err = hubWorkClient.WorkV1().ManifestWorks(commOptions.SpokeClusterName).Patch(
_, err = hubWorkClient.WorkV1().ManifestWorks(clusterName).Patch(
context.Background(), updatedWork.Name, types.MergePatchType, pathBytes, metav1.PatchOptions{})
return err
}, eventuallyTimeout, eventuallyInterval).ShouldNot(gomega.HaveOccurred())
@@ -387,7 +372,7 @@ var _ = ginkgo.Describe("ManifestWork Update Strategy", func() {
// remove the replica field and apply should work
unstructured.RemoveNestedField(object.Object, "spec", "replicas")
gomega.Eventually(func() error {
updatedWork, err := hubWorkClient.WorkV1().ManifestWorks(commOptions.SpokeClusterName).Get(context.Background(), work.Name, metav1.GetOptions{})
updatedWork, err := hubWorkClient.WorkV1().ManifestWorks(clusterName).Get(context.Background(), work.Name, metav1.GetOptions{})
if err != nil {
return err
}
@@ -400,7 +385,7 @@ var _ = ginkgo.Describe("ManifestWork Update Strategy", func() {
return err
}
_, err = hubWorkClient.WorkV1().ManifestWorks(commOptions.SpokeClusterName).Patch(
_, err = hubWorkClient.WorkV1().ManifestWorks(clusterName).Patch(
context.Background(), updatedWork.Name, types.MergePatchType, pathBytes, metav1.PatchOptions{})
return err
}, eventuallyTimeout, eventuallyInterval).ShouldNot(gomega.HaveOccurred())
@@ -415,7 +400,7 @@ var _ = ginkgo.Describe("ManifestWork Update Strategy", func() {
ResourceIdentifier: workapiv1.ResourceIdentifier{
Group: "apps",
Resource: "deployments",
Namespace: commOptions.SpokeClusterName,
Namespace: clusterName,
Name: "deploy1",
},
UpdateStrategy: &workapiv1.UpdateStrategy{
@@ -424,7 +409,7 @@ var _ = ginkgo.Describe("ManifestWork Update Strategy", func() {
},
}
work, err = hubWorkClient.WorkV1().ManifestWorks(commOptions.SpokeClusterName).Create(context.Background(), work, metav1.CreateOptions{})
work, err = hubWorkClient.WorkV1().ManifestWorks(clusterName).Create(context.Background(), work, metav1.CreateOptions{})
gomega.Expect(err).ToNot(gomega.HaveOccurred())
util.AssertWorkCondition(work.Namespace, work.Name, hubWorkClient, workapiv1.WorkApplied, metav1.ConditionTrue,
@@ -434,13 +419,13 @@ var _ = ginkgo.Describe("ManifestWork Update Strategy", func() {
objCopy := object.DeepCopy()
// work1 does not want to own replica field
unstructured.RemoveNestedField(objCopy.Object, "spec", "replicas")
work1 := util.NewManifestWork(commOptions.SpokeClusterName, "another", []workapiv1.Manifest{util.ToManifest(objCopy)})
work1 := util.NewManifestWork(clusterName, "another", []workapiv1.Manifest{util.ToManifest(objCopy)})
work1.Spec.ManifestConfigs = []workapiv1.ManifestConfigOption{
{
ResourceIdentifier: workapiv1.ResourceIdentifier{
Group: "apps",
Resource: "deployments",
Namespace: commOptions.SpokeClusterName,
Namespace: clusterName,
Name: "deploy1",
},
UpdateStrategy: &workapiv1.UpdateStrategy{
@@ -453,7 +438,7 @@ var _ = ginkgo.Describe("ManifestWork Update Strategy", func() {
},
}
_, err = hubWorkClient.WorkV1().ManifestWorks(commOptions.SpokeClusterName).Create(context.Background(), work1, metav1.CreateOptions{})
_, err = hubWorkClient.WorkV1().ManifestWorks(clusterName).Create(context.Background(), work1, metav1.CreateOptions{})
gomega.Expect(err).ToNot(gomega.HaveOccurred())
util.AssertWorkCondition(work1.Namespace, work1.Name, hubWorkClient, workapiv1.WorkApplied, metav1.ConditionTrue,
@@ -463,7 +448,7 @@ var _ = ginkgo.Describe("ManifestWork Update Strategy", func() {
err = unstructured.SetNestedField(object.Object, int64(3), "spec", "replicas")
gomega.Expect(err).ToNot(gomega.HaveOccurred())
gomega.Eventually(func() error {
updatedWork, err := hubWorkClient.WorkV1().ManifestWorks(commOptions.SpokeClusterName).Get(context.Background(), work.Name, metav1.GetOptions{})
updatedWork, err := hubWorkClient.WorkV1().ManifestWorks(clusterName).Get(context.Background(), work.Name, metav1.GetOptions{})
if err != nil {
return err
}
@@ -476,7 +461,7 @@ var _ = ginkgo.Describe("ManifestWork Update Strategy", func() {
return err
}
_, err = hubWorkClient.WorkV1().ManifestWorks(commOptions.SpokeClusterName).Patch(
_, err = hubWorkClient.WorkV1().ManifestWorks(clusterName).Patch(
context.Background(), updatedWork.Name, types.MergePatchType, pathBytes, metav1.PatchOptions{})
return err
}, eventuallyTimeout, eventuallyInterval).ShouldNot(gomega.HaveOccurred())
@@ -486,7 +471,7 @@ var _ = ginkgo.Describe("ManifestWork Update Strategy", func() {
[]metav1.ConditionStatus{metav1.ConditionTrue}, eventuallyTimeout, eventuallyInterval)
gomega.Eventually(func() error {
deploy, err := spokeKubeClient.AppsV1().Deployments(commOptions.SpokeClusterName).Get(context.Background(), "deploy1", metav1.GetOptions{})
deploy, err := spokeKubeClient.AppsV1().Deployments(clusterName).Get(context.Background(), "deploy1", metav1.GetOptions{})
if err != nil {
return err
}
@@ -502,7 +487,7 @@ var _ = ginkgo.Describe("ManifestWork Update Strategy", func() {
err = unstructured.SetNestedField(object.Object, "another-sa", "spec", "template", "spec", "serviceAccountName")
gomega.Expect(err).ToNot(gomega.HaveOccurred())
gomega.Eventually(func() error {
updatedWork, err := hubWorkClient.WorkV1().ManifestWorks(commOptions.SpokeClusterName).Get(context.Background(), work.Name, metav1.GetOptions{})
updatedWork, err := hubWorkClient.WorkV1().ManifestWorks(clusterName).Get(context.Background(), work.Name, metav1.GetOptions{})
if err != nil {
return err
}
@@ -515,7 +500,7 @@ var _ = ginkgo.Describe("ManifestWork Update Strategy", func() {
return err
}
_, err = hubWorkClient.WorkV1().ManifestWorks(commOptions.SpokeClusterName).Patch(
_, err = hubWorkClient.WorkV1().ManifestWorks(clusterName).Patch(
context.Background(), updatedWork.Name, types.MergePatchType, pathBytes, metav1.PatchOptions{})
return err
}, eventuallyTimeout, eventuallyInterval).ShouldNot(gomega.HaveOccurred())
@@ -531,7 +516,7 @@ var _ = ginkgo.Describe("ManifestWork Update Strategy", func() {
ResourceIdentifier: workapiv1.ResourceIdentifier{
Group: "apps",
Resource: "deployments",
Namespace: commOptions.SpokeClusterName,
Namespace: clusterName,
Name: "deploy1",
},
UpdateStrategy: &workapiv1.UpdateStrategy{
@@ -540,7 +525,7 @@ var _ = ginkgo.Describe("ManifestWork Update Strategy", func() {
},
}
work, err = hubWorkClient.WorkV1().ManifestWorks(commOptions.SpokeClusterName).Create(context.Background(), work, metav1.CreateOptions{})
work, err = hubWorkClient.WorkV1().ManifestWorks(clusterName).Create(context.Background(), work, metav1.CreateOptions{})
gomega.Expect(err).ToNot(gomega.HaveOccurred())
util.AssertWorkCondition(work.Namespace, work.Name, hubWorkClient, workapiv1.WorkApplied, metav1.ConditionTrue,
@@ -550,13 +535,13 @@ var _ = ginkgo.Describe("ManifestWork Update Strategy", func() {
objCopy := object.DeepCopy()
// work1 does not want to own replica field
unstructured.RemoveNestedField(objCopy.Object, "spec", "replicas")
work1 := util.NewManifestWork(commOptions.SpokeClusterName, "another", []workapiv1.Manifest{util.ToManifest(objCopy)})
work1 := util.NewManifestWork(clusterName, "another", []workapiv1.Manifest{util.ToManifest(objCopy)})
work1.Spec.ManifestConfigs = []workapiv1.ManifestConfigOption{
{
ResourceIdentifier: workapiv1.ResourceIdentifier{
Group: "apps",
Resource: "deployments",
Namespace: commOptions.SpokeClusterName,
Namespace: clusterName,
Name: "deploy1",
},
UpdateStrategy: &workapiv1.UpdateStrategy{
@@ -569,14 +554,14 @@ var _ = ginkgo.Describe("ManifestWork Update Strategy", func() {
},
}
_, err = hubWorkClient.WorkV1().ManifestWorks(commOptions.SpokeClusterName).Create(context.Background(), work1, metav1.CreateOptions{})
_, err = hubWorkClient.WorkV1().ManifestWorks(clusterName).Create(context.Background(), work1, metav1.CreateOptions{})
gomega.Expect(err).ToNot(gomega.HaveOccurred())
util.AssertWorkCondition(work1.Namespace, work1.Name, hubWorkClient, workapiv1.WorkApplied, metav1.ConditionTrue,
[]metav1.ConditionStatus{metav1.ConditionTrue}, eventuallyTimeout, eventuallyInterval)
gomega.Eventually(func() error {
deploy, err := spokeKubeClient.AppsV1().Deployments(commOptions.SpokeClusterName).Get(context.Background(), "deploy1", metav1.GetOptions{})
deploy, err := spokeKubeClient.AppsV1().Deployments(clusterName).Get(context.Background(), "deploy1", metav1.GetOptions{})
if err != nil {
return err
}
@@ -590,7 +575,7 @@ var _ = ginkgo.Describe("ManifestWork Update Strategy", func() {
// update deleteOption of the first work
gomega.Eventually(func() error {
updatedWork, err := hubWorkClient.WorkV1().ManifestWorks(commOptions.SpokeClusterName).Get(context.Background(), work.Name, metav1.GetOptions{})
updatedWork, err := hubWorkClient.WorkV1().ManifestWorks(clusterName).Get(context.Background(), work.Name, metav1.GetOptions{})
if err != nil {
return err
}
@@ -603,13 +588,13 @@ var _ = ginkgo.Describe("ManifestWork Update Strategy", func() {
return err
}
_, err = hubWorkClient.WorkV1().ManifestWorks(commOptions.SpokeClusterName).Patch(
_, err = hubWorkClient.WorkV1().ManifestWorks(clusterName).Patch(
context.Background(), updatedWork.Name, types.MergePatchType, pathBytes, metav1.PatchOptions{})
return err
}, eventuallyTimeout, eventuallyInterval).ShouldNot(gomega.HaveOccurred())
gomega.Eventually(func() error {
deploy, err := spokeKubeClient.AppsV1().Deployments(commOptions.SpokeClusterName).Get(context.Background(), "deploy1", metav1.GetOptions{})
deploy, err := spokeKubeClient.AppsV1().Deployments(clusterName).Get(context.Background(), "deploy1", metav1.GetOptions{})
if err != nil {
return err
}
@@ -628,7 +613,7 @@ var _ = ginkgo.Describe("ManifestWork Update Strategy", func() {
ResourceIdentifier: workapiv1.ResourceIdentifier{
Group: "apps",
Resource: "deployments",
Namespace: commOptions.SpokeClusterName,
Namespace: clusterName,
Name: "deploy1",
},
UpdateStrategy: &workapiv1.UpdateStrategy{
@@ -645,7 +630,7 @@ var _ = ginkgo.Describe("ManifestWork Update Strategy", func() {
},
}
work, err = hubWorkClient.WorkV1().ManifestWorks(commOptions.SpokeClusterName).Create(context.Background(), work, metav1.CreateOptions{})
work, err = hubWorkClient.WorkV1().ManifestWorks(clusterName).Create(context.Background(), work, metav1.CreateOptions{})
gomega.Expect(err).ToNot(gomega.HaveOccurred())
util.AssertWorkCondition(work.Namespace, work.Name, hubWorkClient, workapiv1.WorkApplied, metav1.ConditionTrue,
@@ -653,7 +638,7 @@ var _ = ginkgo.Describe("ManifestWork Update Strategy", func() {
ginkgo.By("Update deployment replica to 2")
gomega.Eventually(func() error {
deploy, err := spokeKubeClient.AppsV1().Deployments(commOptions.SpokeClusterName).Get(context.Background(), "deploy1", metav1.GetOptions{})
deploy, err := spokeKubeClient.AppsV1().Deployments(clusterName).Get(context.Background(), "deploy1", metav1.GetOptions{})
if err != nil {
return err
}
@@ -663,14 +648,14 @@ var _ = ginkgo.Describe("ManifestWork Update Strategy", func() {
}
deploy.Spec.Replicas = pointer.Int32(2)
_, err = spokeKubeClient.AppsV1().Deployments(commOptions.SpokeClusterName).Update(context.Background(), deploy, metav1.UpdateOptions{})
_, err = spokeKubeClient.AppsV1().Deployments(clusterName).Update(context.Background(), deploy, metav1.UpdateOptions{})
return err
}, eventuallyTimeout, eventuallyInterval).ShouldNot(gomega.HaveOccurred())
ginkgo.By("Update manifestwork with force apply to trigger a reconcile")
gomega.Eventually(func() error {
updatedWork, err := hubWorkClient.WorkV1().ManifestWorks(commOptions.SpokeClusterName).Get(context.Background(), work.Name, metav1.GetOptions{})
updatedWork, err := hubWorkClient.WorkV1().ManifestWorks(clusterName).Get(context.Background(), work.Name, metav1.GetOptions{})
if err != nil {
return err
}
@@ -682,7 +667,7 @@ var _ = ginkgo.Describe("ManifestWork Update Strategy", func() {
return err
}
_, err = hubWorkClient.WorkV1().ManifestWorks(commOptions.SpokeClusterName).Patch(
_, err = hubWorkClient.WorkV1().ManifestWorks(clusterName).Patch(
context.Background(), updatedWork.Name, types.MergePatchType, pathBytes, metav1.PatchOptions{})
return err
}, eventuallyTimeout, eventuallyInterval).ShouldNot(gomega.HaveOccurred())
@@ -692,7 +677,7 @@ var _ = ginkgo.Describe("ManifestWork Update Strategy", func() {
ginkgo.By("Deployment replicas should not be updated")
gomega.Eventually(func() error {
deploy, err := spokeKubeClient.AppsV1().Deployments(commOptions.SpokeClusterName).Get(context.Background(), "deploy1", metav1.GetOptions{})
deploy, err := spokeKubeClient.AppsV1().Deployments(clusterName).Get(context.Background(), "deploy1", metav1.GetOptions{})
if err != nil {
return err
}
@@ -710,7 +695,7 @@ var _ = ginkgo.Describe("ManifestWork Update Strategy", func() {
ginkgo.By("update manifestwork's deployment replica to 3")
err = unstructured.SetNestedField(object.Object, int64(3), "spec", "replicas")
gomega.Eventually(func() error {
updatedWork, err := hubWorkClient.WorkV1().ManifestWorks(commOptions.SpokeClusterName).Get(context.Background(), work.Name, metav1.GetOptions{})
updatedWork, err := hubWorkClient.WorkV1().ManifestWorks(clusterName).Get(context.Background(), work.Name, metav1.GetOptions{})
if err != nil {
return err
}
@@ -723,13 +708,13 @@ var _ = ginkgo.Describe("ManifestWork Update Strategy", func() {
return err
}
_, err = hubWorkClient.WorkV1().ManifestWorks(commOptions.SpokeClusterName).Patch(
_, err = hubWorkClient.WorkV1().ManifestWorks(clusterName).Patch(
context.Background(), updatedWork.Name, types.MergePatchType, pathBytes, metav1.PatchOptions{})
return err
}, eventuallyTimeout, eventuallyInterval).ShouldNot(gomega.HaveOccurred())
gomega.Eventually(func() error {
deploy, err := spokeKubeClient.AppsV1().Deployments(commOptions.SpokeClusterName).Get(context.Background(), "deploy1", metav1.GetOptions{})
deploy, err := spokeKubeClient.AppsV1().Deployments(clusterName).Get(context.Background(), "deploy1", metav1.GetOptions{})
if err != nil {
return err
}
@@ -748,7 +733,7 @@ var _ = ginkgo.Describe("ManifestWork Update Strategy", func() {
ResourceIdentifier: workapiv1.ResourceIdentifier{
Group: "apps",
Resource: "deployments",
Namespace: commOptions.SpokeClusterName,
Namespace: clusterName,
Name: "deploy1",
},
UpdateStrategy: &workapiv1.UpdateStrategy{
@@ -765,7 +750,7 @@ var _ = ginkgo.Describe("ManifestWork Update Strategy", func() {
},
}
work, err = hubWorkClient.WorkV1().ManifestWorks(commOptions.SpokeClusterName).Create(context.Background(), work, metav1.CreateOptions{})
work, err = hubWorkClient.WorkV1().ManifestWorks(clusterName).Create(context.Background(), work, metav1.CreateOptions{})
gomega.Expect(err).ToNot(gomega.HaveOccurred())
util.AssertWorkCondition(work.Namespace, work.Name, hubWorkClient, workapiv1.WorkApplied, metav1.ConditionTrue,
@@ -773,7 +758,7 @@ var _ = ginkgo.Describe("ManifestWork Update Strategy", func() {
ginkgo.By("Update deployment replica to 2")
gomega.Eventually(func() error {
deploy, err := spokeKubeClient.AppsV1().Deployments(commOptions.SpokeClusterName).Get(context.Background(), "deploy1", metav1.GetOptions{})
deploy, err := spokeKubeClient.AppsV1().Deployments(clusterName).Get(context.Background(), "deploy1", metav1.GetOptions{})
if err != nil {
return err
}
@@ -783,7 +768,7 @@ var _ = ginkgo.Describe("ManifestWork Update Strategy", func() {
}
deploy.Spec.Replicas = pointer.Int32(2)
_, err = spokeKubeClient.AppsV1().Deployments(commOptions.SpokeClusterName).Update(context.Background(), deploy, metav1.UpdateOptions{})
_, err = spokeKubeClient.AppsV1().Deployments(clusterName).Update(context.Background(), deploy, metav1.UpdateOptions{})
return err
}, eventuallyTimeout, eventuallyInterval).ShouldNot(gomega.HaveOccurred())
@@ -794,7 +779,7 @@ var _ = ginkgo.Describe("ManifestWork Update Strategy", func() {
ginkgo.By("update manifestwork's deployment replica to 3")
err = unstructured.SetNestedField(object.Object, int64(3), "spec", "replicas")
gomega.Eventually(func() error {
updatedWork, err := hubWorkClient.WorkV1().ManifestWorks(commOptions.SpokeClusterName).Get(context.Background(), work.Name, metav1.GetOptions{})
updatedWork, err := hubWorkClient.WorkV1().ManifestWorks(clusterName).Get(context.Background(), work.Name, metav1.GetOptions{})
if err != nil {
return err
}
@@ -807,7 +792,7 @@ var _ = ginkgo.Describe("ManifestWork Update Strategy", func() {
return err
}
_, err = hubWorkClient.WorkV1().ManifestWorks(commOptions.SpokeClusterName).Patch(
_, err = hubWorkClient.WorkV1().ManifestWorks(clusterName).Patch(
context.Background(), updatedWork.Name, types.MergePatchType, pathBytes, metav1.PatchOptions{})
return err
}, eventuallyTimeout, eventuallyInterval).ShouldNot(gomega.HaveOccurred())
@@ -817,7 +802,7 @@ var _ = ginkgo.Describe("ManifestWork Update Strategy", func() {
ginkgo.By("Deployment replica should not be changed")
gomega.Eventually(func() error {
deploy, err := spokeKubeClient.AppsV1().Deployments(commOptions.SpokeClusterName).Get(context.Background(), "deploy1", metav1.GetOptions{})
deploy, err := spokeKubeClient.AppsV1().Deployments(clusterName).Get(context.Background(), "deploy1", metav1.GetOptions{})
if err != nil {
return err
}
@@ -842,7 +827,7 @@ var _ = ginkgo.Describe("ManifestWork Update Strategy", func() {
Kind: "ManifestWork",
}
work := util.NewManifestWork(commOptions.SpokeClusterName, workName, []workapiv1.Manifest{util.ToManifest(nestedWork)})
work := util.NewManifestWork(clusterName, workName, []workapiv1.Manifest{util.ToManifest(nestedWork)})
work.Spec.ManifestConfigs = []workapiv1.ManifestConfigOption{
{
ResourceIdentifier: workapiv1.ResourceIdentifier{
@@ -859,7 +844,7 @@ var _ = ginkgo.Describe("ManifestWork Update Strategy", func() {
},
},
}
_, err = hubWorkClient.WorkV1().ManifestWorks(commOptions.SpokeClusterName).Create(context.Background(), work, metav1.CreateOptions{})
_, err = hubWorkClient.WorkV1().ManifestWorks(clusterName).Create(context.Background(), work, metav1.CreateOptions{})
gomega.Expect(err).ToNot(gomega.HaveOccurred())
// make sure the nested work is created
@@ -889,23 +874,23 @@ var _ = ginkgo.Describe("ManifestWork Update Strategy", func() {
ginkgo.Context("wildcard to filter all resources", func() {
ginkgo.BeforeEach(func() {
cm1 := util.NewConfigmap(commOptions.SpokeClusterName, "cm1",
cm1 := util.NewConfigmap(clusterName, "cm1",
map[string]string{"test1": "testdata", "test2": "test2"}, []string{})
_, err := spokeKubeClient.CoreV1().ConfigMaps(commOptions.SpokeClusterName).Create(context.TODO(), cm1, metav1.CreateOptions{})
_, err := spokeKubeClient.CoreV1().ConfigMaps(clusterName).Create(context.TODO(), cm1, metav1.CreateOptions{})
gomega.Expect(err).ToNot(gomega.HaveOccurred())
manifests = append(manifests, util.ToManifest(cm1))
cm2 := util.NewConfigmap(commOptions.SpokeClusterName, "cm2",
cm2 := util.NewConfigmap(clusterName, "cm2",
map[string]string{"test1": "testdata", "test2": "test2"}, []string{})
_, err = spokeKubeClient.CoreV1().ConfigMaps(commOptions.SpokeClusterName).Create(context.TODO(), cm2, metav1.CreateOptions{})
_, err = spokeKubeClient.CoreV1().ConfigMaps(clusterName).Create(context.TODO(), cm2, metav1.CreateOptions{})
gomega.Expect(err).ToNot(gomega.HaveOccurred())
manifests = append(manifests, util.ToManifest(cm2))
})
ginkgo.AfterEach(func() {
err := spokeKubeClient.CoreV1().ConfigMaps(commOptions.SpokeClusterName).Delete(context.TODO(), "cm1", metav1.DeleteOptions{})
err := spokeKubeClient.CoreV1().ConfigMaps(clusterName).Delete(context.TODO(), "cm1", metav1.DeleteOptions{})
gomega.Expect(err).ToNot(gomega.HaveOccurred())
err = spokeKubeClient.CoreV1().ConfigMaps(commOptions.SpokeClusterName).Delete(context.TODO(), "cm2", metav1.DeleteOptions{})
err = spokeKubeClient.CoreV1().ConfigMaps(clusterName).Delete(context.TODO(), "cm2", metav1.DeleteOptions{})
gomega.Expect(err).ToNot(gomega.HaveOccurred())
})
@@ -955,7 +940,7 @@ var _ = ginkgo.Describe("ManifestWork Update Strategy", func() {
},
}
work, err = hubWorkClient.WorkV1().ManifestWorks(commOptions.SpokeClusterName).Create(context.Background(), work, metav1.CreateOptions{})
work, err = hubWorkClient.WorkV1().ManifestWorks(clusterName).Create(context.Background(), work, metav1.CreateOptions{})
gomega.Expect(err).ToNot(gomega.HaveOccurred())
util.AssertWorkCondition(work.Namespace, work.Name, hubWorkClient, workapiv1.WorkApplied, metav1.ConditionTrue,
@@ -963,7 +948,7 @@ var _ = ginkgo.Describe("ManifestWork Update Strategy", func() {
ginkgo.By("get configmap values from the work")
gomega.Eventually(func() error {
work, err = hubWorkClient.WorkV1().ManifestWorks(commOptions.SpokeClusterName).Get(context.Background(), work.Name, metav1.GetOptions{})
work, err = hubWorkClient.WorkV1().ManifestWorks(clusterName).Get(context.Background(), work.Name, metav1.GetOptions{})
if err != nil {
return err
}
@@ -994,14 +979,14 @@ var _ = ginkgo.Describe("ManifestWork Update Strategy", func() {
gomega.Eventually(func() error {
cmName := []string{"cm1", "cm2"}
for _, name := range cmName {
cm, err := spokeKubeClient.CoreV1().ConfigMaps(commOptions.SpokeClusterName).Get(context.Background(), name, metav1.GetOptions{})
cm, err := spokeKubeClient.CoreV1().ConfigMaps(clusterName).Get(context.Background(), name, metav1.GetOptions{})
if err != nil {
return err
}
cm.Data["test1"] = "testdata-updated"
_, err = spokeKubeClient.CoreV1().ConfigMaps(commOptions.SpokeClusterName).Update(context.Background(), cm, metav1.UpdateOptions{})
_, err = spokeKubeClient.CoreV1().ConfigMaps(clusterName).Update(context.Background(), cm, metav1.UpdateOptions{})
if err != nil {
return err
}
@@ -1011,7 +996,7 @@ var _ = ginkgo.Describe("ManifestWork Update Strategy", func() {
ginkgo.By("get updated configmap values from the work")
gomega.Eventually(func() error {
work, err = hubWorkClient.WorkV1().ManifestWorks(commOptions.SpokeClusterName).Get(context.Background(), work.Name, metav1.GetOptions{})
work, err = hubWorkClient.WorkV1().ManifestWorks(clusterName).Get(context.Background(), work.Name, metav1.GetOptions{})
if err != nil {
return err
}

View File

@@ -7,7 +7,6 @@ import (
"github.com/onsi/ginkgo/v2"
"github.com/onsi/gomega"
"github.com/openshift/library-go/pkg/controller/controllercmd"
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
@@ -26,21 +25,18 @@ import (
"open-cluster-management.io/ocm/test/integration/util"
)
func startWorkAgent(ctx context.Context, o *spoke.WorkloadAgentOptions, commOption *commonoptions.AgentOptions) {
agentConfig := spoke.NewWorkAgentConfig(commOption, o)
err := agentConfig.RunWorkloadAgent(ctx, &controllercmd.ControllerContext{
KubeConfig: spokeRestConfig,
EventRecorder: util.NewIntegrationTestEventRecorder("integration"),
})
gomega.Expect(err).NotTo(gomega.HaveOccurred())
func appliedManifestWorkGracePeriodDecorator(duration time.Duration) agentOptionsDecorator {
return func(opt *spoke.WorkloadAgentOptions, commonOpt *commonoptions.AgentOptions) (*spoke.WorkloadAgentOptions, *commonoptions.AgentOptions) {
opt.AppliedManifestWorkEvictionGracePeriod = duration
return opt, commonOpt
}
}
var _ = ginkgo.Describe("ManifestWork", func() {
var o *spoke.WorkloadAgentOptions
var commOptions *commonoptions.AgentOptions
var cancel context.CancelFunc
var workName string
var clusterName string
var work *workapiv1.ManifestWork
var expectedFinalizer string
var manifests []workapiv1.Manifest
@@ -51,31 +47,21 @@ var _ = ginkgo.Describe("ManifestWork", func() {
ginkgo.BeforeEach(func() {
expectedFinalizer = workapiv1.ManifestWorkFinalizer
workName = fmt.Sprintf("work-%s", rand.String(5))
clusterName := rand.String(5)
o = spoke.NewWorkloadAgentOptions()
o.StatusSyncInterval = 3 * time.Second
o.AppliedManifestWorkEvictionGracePeriod = 5 * time.Second
o.WorkloadSourceDriver = sourceDriver
o.WorkloadSourceConfig = sourceConfigFileName
if sourceDriver != util.KubeDriver {
expectedFinalizer = common.ResourceFinalizer
o.CloudEventsClientID = fmt.Sprintf("%s-work-agent", clusterName)
o.CloudEventsClientCodecs = []string{"manifestbundle"}
}
workName = fmt.Sprintf("work-%s", rand.String(5))
clusterName = rand.String(5)
commOptions = commonoptions.NewAgentOptions()
commOptions.SpokeClusterName = clusterName
ns := &corev1.Namespace{}
ns.Name = commOptions.SpokeClusterName
ns := &corev1.Namespace{
ObjectMeta: metav1.ObjectMeta{Name: clusterName},
}
_, err := spokeKubeClient.CoreV1().Namespaces().Create(context.Background(), ns, metav1.CreateOptions{})
gomega.Expect(err).ToNot(gomega.HaveOccurred())
var ctx context.Context
ctx, cancel = context.WithCancel(context.Background())
go startWorkAgent(ctx, o, commOptions)
go startWorkAgent(ctx, clusterName, appliedManifestWorkGracePeriodDecorator(5*time.Second))
// reset manifests and workOpts
manifests = nil
@@ -83,33 +69,33 @@ var _ = ginkgo.Describe("ManifestWork", func() {
})
ginkgo.JustBeforeEach(func() {
work = util.NewManifestWork(commOptions.SpokeClusterName, workName, manifests)
work = util.NewManifestWork(clusterName, workName, manifests)
for _, opt := range workOpts {
opt(work)
}
work, err = hubWorkClient.WorkV1().ManifestWorks(commOptions.SpokeClusterName).Create(context.Background(), work, metav1.CreateOptions{})
appliedManifestWorkName = util.AppliedManifestWorkName(sourceDriver, hubHash, work)
work, err = hubWorkClient.WorkV1().ManifestWorks(clusterName).Create(context.Background(), work, metav1.CreateOptions{})
gomega.Expect(err).ToNot(gomega.HaveOccurred())
appliedManifestWorkName = util.AppliedManifestWorkName(hubHash, work)
})
ginkgo.AfterEach(func() {
err = hubWorkClient.WorkV1().ManifestWorks(commOptions.SpokeClusterName).Delete(context.Background(), work.Name, metav1.DeleteOptions{})
err = hubWorkClient.WorkV1().ManifestWorks(clusterName).Delete(context.Background(), work.Name, metav1.DeleteOptions{})
if !errors.IsNotFound(err) {
gomega.Expect(err).ToNot(gomega.HaveOccurred())
}
gomega.Eventually(func() error {
_, err := hubWorkClient.WorkV1().ManifestWorks(commOptions.SpokeClusterName).Get(context.Background(), work.Name, metav1.GetOptions{})
_, err := hubWorkClient.WorkV1().ManifestWorks(clusterName).Get(context.Background(), work.Name, metav1.GetOptions{})
if errors.IsNotFound(err) {
return nil
}
if err != nil {
return err
}
return fmt.Errorf("work %s in namespace %s still exists", work.Name, commOptions.SpokeClusterName)
return fmt.Errorf("work %s in namespace %s still exists", work.Name, clusterName)
}, eventuallyTimeout, eventuallyInterval).Should(gomega.Succeed())
err := spokeKubeClient.CoreV1().Namespaces().Delete(context.Background(), commOptions.SpokeClusterName, metav1.DeleteOptions{})
err := spokeKubeClient.CoreV1().Namespaces().Delete(context.Background(), clusterName, metav1.DeleteOptions{})
gomega.Expect(err).ToNot(gomega.HaveOccurred())
if cancel != nil {
@@ -120,7 +106,7 @@ var _ = ginkgo.Describe("ManifestWork", func() {
ginkgo.Context("With a single manifest", func() {
ginkgo.BeforeEach(func() {
manifests = []workapiv1.Manifest{
util.ToManifest(util.NewConfigmap(commOptions.SpokeClusterName, cm1, map[string]string{"a": "b"}, nil)),
util.ToManifest(util.NewConfigmap(clusterName, cm1, map[string]string{"a": "b"}, nil)),
}
})
@@ -128,7 +114,7 @@ var _ = ginkgo.Describe("ManifestWork", func() {
util.AssertExistenceOfConfigMaps(manifests, spokeKubeClient, eventuallyTimeout, eventuallyInterval)
ginkgo.By("field manager should be work-agent")
cm, err := spokeKubeClient.CoreV1().ConfigMaps(commOptions.SpokeClusterName).Get(context.Background(), cm1, metav1.GetOptions{})
cm, err := spokeKubeClient.CoreV1().ConfigMaps(clusterName).Get(context.Background(), cm1, metav1.GetOptions{})
gomega.Expect(err).ToNot(gomega.HaveOccurred())
for _, entry := range cm.ManagedFields {
gomega.Expect(entry.Manager).To(gomega.Equal("work-agent"))
@@ -147,9 +133,9 @@ var _ = ginkgo.Describe("ManifestWork", func() {
[]metav1.ConditionStatus{metav1.ConditionTrue}, eventuallyTimeout, eventuallyInterval)
newManifests := []workapiv1.Manifest{
util.ToManifest(util.NewConfigmap(commOptions.SpokeClusterName, cm2, map[string]string{"x": "y"}, nil)),
util.ToManifest(util.NewConfigmap(clusterName, cm2, map[string]string{"x": "y"}, nil)),
}
updatedWork, err := hubWorkClient.WorkV1().ManifestWorks(commOptions.SpokeClusterName).Get(context.Background(), work.Name, metav1.GetOptions{})
updatedWork, err := hubWorkClient.WorkV1().ManifestWorks(clusterName).Get(context.Background(), work.Name, metav1.GetOptions{})
gomega.Expect(err).ToNot(gomega.HaveOccurred())
newWork := updatedWork.DeepCopy()
@@ -158,7 +144,7 @@ var _ = ginkgo.Describe("ManifestWork", func() {
pathBytes, err := util.NewWorkPatch(updatedWork, newWork)
gomega.Expect(err).ToNot(gomega.HaveOccurred())
_, err = hubWorkClient.WorkV1().ManifestWorks(commOptions.SpokeClusterName).Patch(
_, err = hubWorkClient.WorkV1().ManifestWorks(clusterName).Patch(
context.Background(), updatedWork.Name, types.MergePatchType, pathBytes, metav1.PatchOptions{})
gomega.Expect(err).ToNot(gomega.HaveOccurred())
@@ -181,14 +167,14 @@ var _ = ginkgo.Describe("ManifestWork", func() {
return nil
}, eventuallyTimeout, eventuallyInterval).ShouldNot(gomega.HaveOccurred())
_, err = spokeKubeClient.CoreV1().ConfigMaps(commOptions.SpokeClusterName).Get(context.Background(), cm1, metav1.GetOptions{})
_, err = spokeKubeClient.CoreV1().ConfigMaps(clusterName).Get(context.Background(), cm1, metav1.GetOptions{})
gomega.Expect(errors.IsNotFound(err)).To(gomega.BeTrue())
})
ginkgo.It("should delete work successfully", func() {
util.AssertFinalizerAdded(work.Namespace, work.Name, expectedFinalizer, hubWorkClient, eventuallyTimeout, eventuallyInterval)
err = hubWorkClient.WorkV1().ManifestWorks(commOptions.SpokeClusterName).Delete(context.Background(), work.Name, metav1.DeleteOptions{})
err = hubWorkClient.WorkV1().ManifestWorks(clusterName).Delete(context.Background(), work.Name, metav1.DeleteOptions{})
gomega.Expect(err).ToNot(gomega.HaveOccurred())
util.AssertWorkDeleted(work.Namespace, work.Name, fmt.Sprintf("%s-%s", hubHash, work.Name),
@@ -200,8 +186,8 @@ var _ = ginkgo.Describe("ManifestWork", func() {
ginkgo.BeforeEach(func() {
manifests = []workapiv1.Manifest{
util.ToManifest(util.NewConfigmap("non-existent-namespace", cm1, map[string]string{"a": "b"}, nil)),
util.ToManifest(util.NewConfigmap(commOptions.SpokeClusterName, cm2, map[string]string{"c": "d"}, nil)),
util.ToManifest(util.NewConfigmap(commOptions.SpokeClusterName, "cm3", map[string]string{"e": "f"}, nil)),
util.ToManifest(util.NewConfigmap(clusterName, cm2, map[string]string{"c": "d"}, nil)),
util.ToManifest(util.NewConfigmap(clusterName, "cm3", map[string]string{"e": "f"}, nil)),
}
})
@@ -223,12 +209,12 @@ var _ = ginkgo.Describe("ManifestWork", func() {
[]metav1.ConditionStatus{metav1.ConditionFalse, metav1.ConditionTrue, metav1.ConditionTrue}, eventuallyTimeout, eventuallyInterval)
newManifests := []workapiv1.Manifest{
util.ToManifest(util.NewConfigmap(commOptions.SpokeClusterName, cm1, map[string]string{"a": "b"}, nil)),
util.ToManifest(util.NewConfigmap(commOptions.SpokeClusterName, cm2, map[string]string{"x": "y"}, nil)),
util.ToManifest(util.NewConfigmap(commOptions.SpokeClusterName, "cm4", map[string]string{"e": "f"}, nil)),
util.ToManifest(util.NewConfigmap(clusterName, cm1, map[string]string{"a": "b"}, nil)),
util.ToManifest(util.NewConfigmap(clusterName, cm2, map[string]string{"x": "y"}, nil)),
util.ToManifest(util.NewConfigmap(clusterName, "cm4", map[string]string{"e": "f"}, nil)),
}
updatedWork, err := hubWorkClient.WorkV1().ManifestWorks(commOptions.SpokeClusterName).Get(context.Background(), work.Name, metav1.GetOptions{})
updatedWork, err := hubWorkClient.WorkV1().ManifestWorks(clusterName).Get(context.Background(), work.Name, metav1.GetOptions{})
gomega.Expect(err).ToNot(gomega.HaveOccurred())
newWork := updatedWork.DeepCopy()
@@ -237,7 +223,7 @@ var _ = ginkgo.Describe("ManifestWork", func() {
pathBytes, err := util.NewWorkPatch(updatedWork, newWork)
gomega.Expect(err).ToNot(gomega.HaveOccurred())
_, err = hubWorkClient.WorkV1().ManifestWorks(commOptions.SpokeClusterName).Patch(
_, err = hubWorkClient.WorkV1().ManifestWorks(clusterName).Patch(
context.Background(), updatedWork.Name, types.MergePatchType, pathBytes, metav1.PatchOptions{})
gomega.Expect(err).ToNot(gomega.HaveOccurred())
@@ -263,14 +249,14 @@ var _ = ginkgo.Describe("ManifestWork", func() {
return nil
}, eventuallyTimeout, eventuallyInterval).ShouldNot(gomega.HaveOccurred())
_, err = spokeKubeClient.CoreV1().ConfigMaps(commOptions.SpokeClusterName).Get(context.Background(), "cm3", metav1.GetOptions{})
_, err = spokeKubeClient.CoreV1().ConfigMaps(clusterName).Get(context.Background(), "cm3", metav1.GetOptions{})
gomega.Expect(errors.IsNotFound(err)).To(gomega.BeTrue())
})
ginkgo.It("should delete work successfully", func() {
util.AssertFinalizerAdded(work.Namespace, work.Name, expectedFinalizer, hubWorkClient, eventuallyTimeout, eventuallyInterval)
err = hubWorkClient.WorkV1().ManifestWorks(commOptions.SpokeClusterName).Delete(context.Background(), work.Name, metav1.DeleteOptions{})
err = hubWorkClient.WorkV1().ManifestWorks(clusterName).Delete(context.Background(), work.Name, metav1.DeleteOptions{})
gomega.Expect(err).ToNot(gomega.HaveOccurred())
util.AssertWorkDeleted(work.Namespace, work.Name, fmt.Sprintf("%s-%s", hubHash, work.Name),
@@ -295,7 +281,7 @@ var _ = ginkgo.Describe("ManifestWork", func() {
objects = append(objects, obj)
// cr
obj, gvr, err = util.GuestbookCr(commOptions.SpokeClusterName, "guestbook1")
obj, gvr, err = util.GuestbookCr(clusterName, "guestbook1")
gomega.Expect(err).ToNot(gomega.HaveOccurred())
gvrs = append(gvrs, gvr)
objects = append(objects, obj)
@@ -337,7 +323,7 @@ var _ = ginkgo.Describe("ManifestWork", func() {
util.AssertAppliedResources(appliedManifestWorkName, gvrs, namespaces, names, spokeWorkClient, eventuallyTimeout, eventuallyInterval)
// update object label
obj, gvr, err := util.GuestbookCr(commOptions.SpokeClusterName, "guestbook1")
obj, gvr, err := util.GuestbookCr(clusterName, "guestbook1")
gomega.Expect(err).ToNot(gomega.HaveOccurred())
cr, err := util.GetResource(obj.GetNamespace(), obj.GetName(), gvr, spokeDynamicClient)
@@ -358,7 +344,7 @@ var _ = ginkgo.Describe("ManifestWork", func() {
pathBytes, err := util.NewWorkPatch(work, newWork)
gomega.Expect(err).ToNot(gomega.HaveOccurred())
_, err = hubWorkClient.WorkV1().ManifestWorks(commOptions.SpokeClusterName).Patch(
_, err = hubWorkClient.WorkV1().ManifestWorks(clusterName).Patch(
context.Background(), work.Name, types.MergePatchType, pathBytes, metav1.PatchOptions{})
gomega.Expect(err).ToNot(gomega.HaveOccurred())
@@ -394,7 +380,7 @@ var _ = ginkgo.Describe("ManifestWork", func() {
util.AssertAppliedResources(appliedManifestWorkName, gvrs, namespaces, names, spokeWorkClient, eventuallyTimeout, eventuallyInterval)
// update object finalizer
obj, gvr, err := util.GuestbookCr(commOptions.SpokeClusterName, "guestbook1")
obj, gvr, err := util.GuestbookCr(clusterName, "guestbook1")
gomega.Expect(err).ToNot(gomega.HaveOccurred())
cr, err := util.GetResource(obj.GetNamespace(), obj.GetName(), gvr, spokeDynamicClient)
@@ -419,7 +405,7 @@ var _ = ginkgo.Describe("ManifestWork", func() {
pathBytes, err := util.NewWorkPatch(updatedWork, newWork)
gomega.Expect(err).ToNot(gomega.HaveOccurred())
_, err = hubWorkClient.WorkV1().ManifestWorks(commOptions.SpokeClusterName).Patch(
_, err = hubWorkClient.WorkV1().ManifestWorks(clusterName).Patch(
context.Background(), updatedWork.Name, types.MergePatchType, pathBytes, metav1.PatchOptions{})
gomega.Expect(err).ToNot(gomega.HaveOccurred())
@@ -466,12 +452,12 @@ var _ = ginkgo.Describe("ManifestWork", func() {
util.AssertAppliedResources(appliedManifestWorkName, gvrs, namespaces, names, spokeWorkClient, eventuallyTimeout, eventuallyInterval)
// delete manifest work
err = hubWorkClient.WorkV1().ManifestWorks(commOptions.SpokeClusterName).Delete(context.Background(), work.Name, metav1.DeleteOptions{})
err = hubWorkClient.WorkV1().ManifestWorks(clusterName).Delete(context.Background(), work.Name, metav1.DeleteOptions{})
gomega.Expect(err).ToNot(gomega.HaveOccurred())
// wait for deletion of manifest work
gomega.Eventually(func() bool {
_, err := hubWorkClient.WorkV1().ManifestWorks(commOptions.SpokeClusterName).Get(context.Background(), work.Name, metav1.GetOptions{})
_, err := hubWorkClient.WorkV1().ManifestWorks(clusterName).Get(context.Background(), work.Name, metav1.GetOptions{})
return errors.IsNotFound(err)
}, eventuallyTimeout, eventuallyInterval).Should(gomega.BeTrue())
@@ -506,7 +492,7 @@ var _ = ginkgo.Describe("ManifestWork", func() {
objects = nil
// Create a clusterrole with namespace in metadata
u, gvr := util.NewClusterRole(commOptions.SpokeClusterName, "work-clusterrole")
u, gvr := util.NewClusterRole(clusterName, "work-clusterrole")
gvrs = append(gvrs, gvr)
objects = append(objects, u)
@@ -546,19 +532,19 @@ var _ = ginkgo.Describe("ManifestWork", func() {
gvrs = nil
objects = nil
u, gvr := util.NewServiceAccount(commOptions.SpokeClusterName, "sa")
u, gvr := util.NewServiceAccount(clusterName, "sa")
gvrs = append(gvrs, gvr)
objects = append(objects, u)
u, gvr = util.NewRole(commOptions.SpokeClusterName, "role1")
u, gvr = util.NewRole(clusterName, "role1")
gvrs = append(gvrs, gvr)
objects = append(objects, u)
u, gvr = util.NewRoleBinding(commOptions.SpokeClusterName, "rolebinding1", "sa", "role1")
u, gvr = util.NewRoleBinding(clusterName, "rolebinding1", "sa", "role1")
gvrs = append(gvrs, gvr)
objects = append(objects, u)
u, gvr, err = util.NewDeployment(commOptions.SpokeClusterName, "deploy1", "sa")
u, gvr, err = util.NewDeployment(clusterName, "deploy1", "sa")
gomega.Expect(err).ToNot(gomega.HaveOccurred())
gvrs = append(gvrs, gvr)
objects = append(objects, u)
@@ -613,9 +599,9 @@ var _ = ginkgo.Describe("ManifestWork", func() {
ginkgo.By("update manifests in work")
oldServiceAccount := objects[0]
gvrs[0], gvrs[3] = gvrs[3], gvrs[0]
u, _ := util.NewServiceAccount(commOptions.SpokeClusterName, "admin")
u, _ := util.NewServiceAccount(clusterName, "admin")
objects[3] = u
u, _, err = util.NewDeployment(commOptions.SpokeClusterName, "deploy1", "admin")
u, _, err = util.NewDeployment(clusterName, "deploy1", "admin")
gomega.Expect(err).ToNot(gomega.HaveOccurred())
objects[0] = u
@@ -629,7 +615,7 @@ var _ = ginkgo.Describe("ManifestWork", func() {
updateTime := metav1.Now()
time.Sleep(1 * time.Second)
updatedWork, err := hubWorkClient.WorkV1().ManifestWorks(commOptions.SpokeClusterName).Get(context.Background(), work.Name, metav1.GetOptions{})
updatedWork, err := hubWorkClient.WorkV1().ManifestWorks(clusterName).Get(context.Background(), work.Name, metav1.GetOptions{})
gomega.Expect(err).ToNot(gomega.HaveOccurred())
newWork := updatedWork.DeepCopy()
@@ -638,7 +624,7 @@ var _ = ginkgo.Describe("ManifestWork", func() {
pathBytes, err := util.NewWorkPatch(updatedWork, newWork)
gomega.Expect(err).ToNot(gomega.HaveOccurred())
_, err = hubWorkClient.WorkV1().ManifestWorks(commOptions.SpokeClusterName).Patch(
_, err = hubWorkClient.WorkV1().ManifestWorks(clusterName).Patch(
context.Background(), updatedWork.Name, types.MergePatchType, pathBytes, metav1.PatchOptions{})
gomega.Expect(err).ToNot(gomega.HaveOccurred())
@@ -653,7 +639,7 @@ var _ = ginkgo.Describe("ManifestWork", func() {
ginkgo.By("check if deployment is updated")
gomega.Eventually(func() error {
u, err := util.GetResource(commOptions.SpokeClusterName, objects[0].GetName(), gvrs[0], spokeDynamicClient)
u, err := util.GetResource(clusterName, objects[0].GetName(), gvrs[0], spokeDynamicClient)
if err != nil {
return err
}
@@ -667,7 +653,7 @@ var _ = ginkgo.Describe("ManifestWork", func() {
ginkgo.By("check if LastTransitionTime is updated")
gomega.Eventually(func() error {
work, err = hubWorkClient.WorkV1().ManifestWorks(commOptions.SpokeClusterName).Get(context.Background(), work.Name, metav1.GetOptions{})
work, err = hubWorkClient.WorkV1().ManifestWorks(clusterName).Get(context.Background(), work.Name, metav1.GetOptions{})
if err != nil {
return err
}
@@ -713,19 +699,19 @@ var _ = ginkgo.Describe("ManifestWork", func() {
var finalizer = "cluster.open-cluster-management.io/testing"
ginkgo.BeforeEach(func() {
manifests = []workapiv1.Manifest{
util.ToManifest(util.NewConfigmap(commOptions.SpokeClusterName, cm1, map[string]string{"a": "b"}, []string{finalizer})),
util.ToManifest(util.NewConfigmap(commOptions.SpokeClusterName, cm2, map[string]string{"c": "d"}, []string{finalizer})),
util.ToManifest(util.NewConfigmap(commOptions.SpokeClusterName, "cm3", map[string]string{"e": "f"}, []string{finalizer})),
util.ToManifest(util.NewConfigmap(clusterName, cm1, map[string]string{"a": "b"}, []string{finalizer})),
util.ToManifest(util.NewConfigmap(clusterName, cm2, map[string]string{"c": "d"}, []string{finalizer})),
util.ToManifest(util.NewConfigmap(clusterName, "cm3", map[string]string{"e": "f"}, []string{finalizer})),
}
})
ginkgo.AfterEach(func() {
err = util.RemoveConfigmapFinalizers(spokeKubeClient, commOptions.SpokeClusterName, cm1, cm2, "cm3")
err = util.RemoveConfigmapFinalizers(spokeKubeClient, clusterName, cm1, cm2, "cm3")
gomega.Expect(err).ToNot(gomega.HaveOccurred())
})
ginkgo.It("should remove applied resource immediately when work is updated", func() {
updatedWork, err := hubWorkClient.WorkV1().ManifestWorks(commOptions.SpokeClusterName).Get(context.Background(), work.Name, metav1.GetOptions{})
updatedWork, err := hubWorkClient.WorkV1().ManifestWorks(clusterName).Get(context.Background(), work.Name, metav1.GetOptions{})
gomega.Expect(err).ToNot(gomega.HaveOccurred())
newWork := updatedWork.DeepCopy()
@@ -734,7 +720,7 @@ var _ = ginkgo.Describe("ManifestWork", func() {
pathBytes, err := util.NewWorkPatch(updatedWork, newWork)
gomega.Expect(err).ToNot(gomega.HaveOccurred())
_, err = hubWorkClient.WorkV1().ManifestWorks(commOptions.SpokeClusterName).Patch(
_, err = hubWorkClient.WorkV1().ManifestWorks(clusterName).Patch(
context.Background(), updatedWork.Name, types.MergePatchType, pathBytes, metav1.PatchOptions{})
gomega.Expect(err).ToNot(gomega.HaveOccurred())
@@ -742,7 +728,7 @@ var _ = ginkgo.Describe("ManifestWork", func() {
go func() {
time.Sleep(2 * time.Second)
// remove finalizers of cm1
_ = util.RemoveConfigmapFinalizers(spokeKubeClient, commOptions.SpokeClusterName, cm1)
_ = util.RemoveConfigmapFinalizers(spokeKubeClient, clusterName, cm1)
}()
// check if resource created by stale manifest is deleted once it is removed from applied resource list
@@ -761,7 +747,7 @@ var _ = ginkgo.Describe("ManifestWork", func() {
return nil
}, eventuallyTimeout, eventuallyInterval).ShouldNot(gomega.HaveOccurred())
_, err = spokeKubeClient.CoreV1().ConfigMaps(commOptions.SpokeClusterName).Get(context.Background(), cm1, metav1.GetOptions{})
_, err = spokeKubeClient.CoreV1().ConfigMaps(clusterName).Get(context.Background(), cm1, metav1.GetOptions{})
gomega.Expect(errors.IsNotFound(err)).To(gomega.BeTrue())
err = hubWorkClient.WorkV1().ManifestWorks(work.Namespace).Delete(context.Background(), work.Name, metav1.DeleteOptions{})
@@ -776,7 +762,7 @@ var _ = ginkgo.Describe("ManifestWork", func() {
util.AssertWorkCondition(work.Namespace, work.Name, hubWorkClient, workapiv1.WorkAvailable, metav1.ConditionTrue,
[]metav1.ConditionStatus{metav1.ConditionTrue, metav1.ConditionTrue, metav1.ConditionTrue}, eventuallyTimeout, eventuallyInterval)
updatedWork, err := hubWorkClient.WorkV1().ManifestWorks(commOptions.SpokeClusterName).Get(context.Background(), work.Name, metav1.GetOptions{})
updatedWork, err := hubWorkClient.WorkV1().ManifestWorks(clusterName).Get(context.Background(), work.Name, metav1.GetOptions{})
gomega.Expect(err).ToNot(gomega.HaveOccurred())
newWork := updatedWork.DeepCopy()
@@ -785,7 +771,7 @@ var _ = ginkgo.Describe("ManifestWork", func() {
pathBytes, err := util.NewWorkPatch(updatedWork, newWork)
gomega.Expect(err).ToNot(gomega.HaveOccurred())
_, err = hubWorkClient.WorkV1().ManifestWorks(commOptions.SpokeClusterName).Patch(
_, err = hubWorkClient.WorkV1().ManifestWorks(clusterName).Patch(
context.Background(), updatedWork.Name, types.MergePatchType, pathBytes, metav1.PatchOptions{})
gomega.Expect(err).ToNot(gomega.HaveOccurred())
@@ -798,7 +784,7 @@ var _ = ginkgo.Describe("ManifestWork", func() {
go func() {
time.Sleep(2 * time.Second)
// remove finalizers of cm1
_ = util.RemoveConfigmapFinalizers(spokeKubeClient, commOptions.SpokeClusterName, cm1)
_ = util.RemoveConfigmapFinalizers(spokeKubeClient, clusterName, cm1)
}()
// check if resource created by stale manifest is deleted once it is removed from applied resource list
@@ -817,7 +803,7 @@ var _ = ginkgo.Describe("ManifestWork", func() {
return nil
}, eventuallyTimeout, eventuallyInterval).ShouldNot(gomega.HaveOccurred())
_, err = spokeKubeClient.CoreV1().ConfigMaps(commOptions.SpokeClusterName).Get(context.Background(), cm1, metav1.GetOptions{})
_, err = spokeKubeClient.CoreV1().ConfigMaps(clusterName).Get(context.Background(), cm1, metav1.GetOptions{})
gomega.Expect(errors.IsNotFound(err)).To(gomega.BeTrue())
})
@@ -884,8 +870,8 @@ var _ = ginkgo.Describe("ManifestWork", func() {
ginkgo.Context("Work completion", func() {
ginkgo.BeforeEach(func() {
manifests = []workapiv1.Manifest{
util.ToManifest(util.NewConfigmap(commOptions.SpokeClusterName, cm1, map[string]string{"a": "b"}, nil)),
util.ToManifest(util.NewConfigmap(commOptions.SpokeClusterName, cm2, map[string]string{"c": "d"}, nil)),
util.ToManifest(util.NewConfigmap(clusterName, cm1, map[string]string{"a": "b"}, nil)),
util.ToManifest(util.NewConfigmap(clusterName, cm2, map[string]string{"c": "d"}, nil)),
}
workOpts = append(workOpts, func(work *workapiv1.ManifestWork) {
work.Spec.ManifestConfigs = []workapiv1.ManifestConfigOption{
@@ -893,7 +879,7 @@ var _ = ginkgo.Describe("ManifestWork", func() {
ResourceIdentifier: workapiv1.ResourceIdentifier{
Resource: "configmaps",
Name: cm1,
Namespace: commOptions.SpokeClusterName,
Namespace: clusterName,
},
ConditionRules: []workapiv1.ConditionRule{
{
@@ -918,11 +904,11 @@ var _ = ginkgo.Describe("ManifestWork", func() {
[]metav1.ConditionStatus{metav1.ConditionTrue, metav1.ConditionTrue}, eventuallyTimeout, eventuallyInterval)
newManifests := []workapiv1.Manifest{
util.ToManifest(util.NewConfigmap(commOptions.SpokeClusterName, cm1, map[string]string{"a": "b", "complete": "true"}, nil)),
util.ToManifest(util.NewConfigmap(commOptions.SpokeClusterName, cm2, map[string]string{"c": "d"}, nil)),
util.ToManifest(util.NewConfigmap(clusterName, cm1, map[string]string{"a": "b", "complete": "true"}, nil)),
util.ToManifest(util.NewConfigmap(clusterName, cm2, map[string]string{"c": "d"}, nil)),
}
updatedWork, err := hubWorkClient.WorkV1().ManifestWorks(commOptions.SpokeClusterName).Get(context.Background(), work.Name, metav1.GetOptions{})
updatedWork, err := hubWorkClient.WorkV1().ManifestWorks(clusterName).Get(context.Background(), work.Name, metav1.GetOptions{})
gomega.Expect(err).ToNot(gomega.HaveOccurred())
newWork := updatedWork.DeepCopy()
@@ -931,13 +917,13 @@ var _ = ginkgo.Describe("ManifestWork", func() {
pathBytes, err := util.NewWorkPatch(updatedWork, newWork)
gomega.Expect(err).ToNot(gomega.HaveOccurred())
_, err = hubWorkClient.WorkV1().ManifestWorks(commOptions.SpokeClusterName).Patch(
_, err = hubWorkClient.WorkV1().ManifestWorks(clusterName).Patch(
context.Background(), updatedWork.Name, types.MergePatchType, pathBytes, metav1.PatchOptions{})
gomega.Expect(err).ToNot(gomega.HaveOccurred())
// ManifestWork should be marked completed
gomega.Eventually(func() error {
work, err := hubWorkClient.WorkV1().ManifestWorks(commOptions.SpokeClusterName).Get(context.Background(), work.Name, metav1.GetOptions{})
work, err := hubWorkClient.WorkV1().ManifestWorks(clusterName).Get(context.Background(), work.Name, metav1.GetOptions{})
if err != nil {
return err
}

2
vendor/modules.txt vendored
View File

@@ -1813,7 +1813,7 @@ open-cluster-management.io/api/operator/v1
open-cluster-management.io/api/utils/work/v1/workapplier
open-cluster-management.io/api/work/v1
open-cluster-management.io/api/work/v1alpha1
# open-cluster-management.io/sdk-go v1.0.1-0.20250901084824-d4c9f78c2e6a
# open-cluster-management.io/sdk-go v1.0.1-0.20250905083121-3fc951c340cc
## explicit; go 1.24.0
open-cluster-management.io/sdk-go/pkg/apis/cluster/v1alpha1
open-cluster-management.io/sdk-go/pkg/apis/cluster/v1beta1

View File

@@ -62,6 +62,7 @@ func (c *ManifestBundleCodec) Encode(source string, eventType types.CloudEventsT
evt := types.NewEventBuilder(source, eventType).
WithResourceID(string(work.UID)).
WithStatusUpdateSequenceID(sequenceGenerator.Generate().String()).
WithResourceName(work.Name).
WithResourceVersion(resourceVersion).
WithClusterName(work.Namespace).
WithOriginalSource(originalSource).
@@ -104,6 +105,17 @@ func (c *ManifestBundleCodec) Decode(evt *cloudevents.Event) (*workv1.ManifestWo
return nil, fmt.Errorf("failed to get resourceid extension: %v", err)
}
var resourceName string
if v, ok := evtExtensions[types.ExtensionResourceName]; ok {
resourceName, err = cloudeventstypes.ToString(v)
if err != nil {
return nil, fmt.Errorf("failed to get resourcename extension: %v", err)
}
} else {
// fall back to set resourceName to resourceID
resourceName = resourceID
}
resourceVersion, err := cloudeventstypes.ToInteger(evtExtensions[types.ExtensionResourceVersion])
if err != nil {
return nil, fmt.Errorf("failed to get resourceversion extension: %v", err)
@@ -123,7 +135,7 @@ func (c *ManifestBundleCodec) Decode(evt *cloudevents.Event) (*workv1.ManifestWo
UID: kubetypes.UID(resourceID),
Generation: int64(resourceVersion),
ResourceVersion: fmt.Sprintf("%d", resourceVersion),
Name: resourceID,
Name: resourceName,
Namespace: clusterName,
Labels: map[string]string{
common.CloudEventsOriginalSourceLabelKey: evt.Source(),

View File

@@ -47,6 +47,7 @@ func (c *ManifestBundleCodec) Encode(source string, eventType types.CloudEventsT
evt := types.NewEventBuilder(source, eventType).
WithClusterName(work.Namespace).
WithResourceID(string(work.UID)).
WithResourceName(work.Name).
WithResourceVersion(int64(resourceVersion)).
NewEvent()
@@ -92,6 +93,17 @@ func (c *ManifestBundleCodec) Decode(evt *cloudevents.Event) (*workv1.ManifestWo
return nil, fmt.Errorf("failed to get resourceid extension: %v", err)
}
var resourceName string
if v, ok := evtExtensions[types.ExtensionResourceName]; ok {
resourceName, err = cloudeventstypes.ToString(v)
if err != nil {
return nil, fmt.Errorf("failed to get resourcename extension: %v", err)
}
} else {
// fall back to set resourceName to resourceID
resourceName = resourceID
}
resourceVersion, err := cloudeventstypes.ToInteger(evtExtensions[types.ExtensionResourceVersion])
if err != nil {
return nil, fmt.Errorf("failed to get resourceversion extension: %v", err)
@@ -119,6 +131,7 @@ func (c *ManifestBundleCodec) Decode(evt *cloudevents.Event) (*workv1.ManifestWo
}
metaObj.UID = kubetypes.UID(resourceID)
metaObj.Name = resourceName
metaObj.ResourceVersion = fmt.Sprintf("%d", resourceVersion)
if metaObj.Annotations == nil {
metaObj.Annotations = map[string]string{}

View File

@@ -57,6 +57,9 @@ const (
// ExtensionResourceID is the cloud event extension key of the resource ID.
ExtensionResourceID = "resourceid"
// ExtensionResourceName is the cloud event extension key of the resource name.
ExtensionResourceName = "resourcename"
// ExtensionResourceVersion is the cloud event extension key of the resource version.
ExtensionResourceVersion = "resourceversion"
@@ -228,6 +231,7 @@ type EventBuilder struct {
clusterName string
originalSource string
resourceID string
resourceName string
sequenceID string
resourceVersion *int64
eventType CloudEventsType
@@ -246,6 +250,11 @@ func (b *EventBuilder) WithResourceID(resourceID string) *EventBuilder {
return b
}
func (b *EventBuilder) WithResourceName(resourceName string) *EventBuilder {
b.resourceName = resourceName
return b
}
func (b *EventBuilder) WithResourceVersion(resourceVersion int64) *EventBuilder {
b.resourceVersion = &resourceVersion
return b
@@ -285,6 +294,13 @@ func (b *EventBuilder) NewEvent() cloudevents.Event {
evt.SetExtension(ExtensionResourceID, b.resourceID)
}
if len(b.resourceName) != 0 {
evt.SetExtension(ExtensionResourceName, b.resourceName)
} else {
// if resourceName is not set, uses resourceID as the resourceName
evt.SetExtension(ExtensionResourceName, b.resourceID)
}
if b.resourceVersion != nil {
evt.SetExtension(ExtensionResourceVersion, *b.resourceVersion)
}