make work status sync interval configurable (#1009)

* update api

Signed-off-by: zhujian <jiazhu@redhat.com>

* make work status sync interval configurable

Signed-off-by: zhujian <jiazhu@redhat.com>

* add unit tests

Signed-off-by: zhujian <jiazhu@redhat.com>

* fix flaky e2e tests

Signed-off-by: zhujian <jiazhu@redhat.com>

* drop go mod replace

Signed-off-by: zhujian <jiazhu@redhat.com>

---------

Signed-off-by: zhujian <jiazhu@redhat.com>
This commit is contained in:
Jian Zhu
2025-05-27 15:47:58 +08:00
committed by GitHub
parent f13599ffdb
commit 4d1b4ee8d5
20 changed files with 198 additions and 105 deletions

View File

@@ -264,7 +264,10 @@ spec:
reservedClusterClaimSuffixes:
description: Custom suffixes for reserved ClusterClaims.
items:
maxLength: 64
minLength: 1
type: string
maxItems: 10
type: array
required:
- maxCustomClusterClaims
@@ -486,6 +489,13 @@ spec:
If it is set empty, use the default value: 50
format: int32
type: integer
statusSyncInterval:
description: |-
StatusSyncInterval is the interval for the work agent to check the status of ManifestWorks.
Larger value means less frequent status sync and less api calls to the managed cluster, vice versa.
The value(x) should be: 5s <= x <= 1h.
pattern: ^([0-9]+(s|m|h))+$
type: string
type: object
workImagePullSpec:
description: |-

View File

@@ -264,7 +264,10 @@ spec:
reservedClusterClaimSuffixes:
description: Custom suffixes for reserved ClusterClaims.
items:
maxLength: 64
minLength: 1
type: string
maxItems: 10
type: array
required:
- maxCustomClusterClaims
@@ -486,6 +489,13 @@ spec:
If it is set empty, use the default value: 50
format: int32
type: integer
statusSyncInterval:
description: |-
StatusSyncInterval is the interval for the work agent to check the status of ManifestWorks.
Larger value means less frequent status sync and less api calls to the managed cluster, vice versa.
The value(x) should be: 5s <= x <= 1h.
pattern: ^([0-9]+(s|m|h))+$
type: string
type: object
workImagePullSpec:
description: |-

2
go.mod
View File

@@ -37,7 +37,7 @@ require (
k8s.io/kube-aggregator v0.32.1
k8s.io/utils v0.0.0-20241104100929-3ea5e8cea738
open-cluster-management.io/addon-framework v0.12.1-0.20250407131028-9d436ffc2da7
open-cluster-management.io/api v0.16.2-0.20250506092504-9143e192a0a7
open-cluster-management.io/api v0.16.2-0.20250527062515-98a1d87193c1
open-cluster-management.io/sdk-go v0.16.1-0.20250428032116-875454003818
sigs.k8s.io/cluster-inventory-api v0.0.0-20240730014211-ef0154379848
sigs.k8s.io/controller-runtime v0.20.2

4
go.sum
View File

@@ -487,8 +487,8 @@ k8s.io/utils v0.0.0-20241104100929-3ea5e8cea738 h1:M3sRQVHv7vB20Xc2ybTt7ODCeFj6J
k8s.io/utils v0.0.0-20241104100929-3ea5e8cea738/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0=
open-cluster-management.io/addon-framework v0.12.1-0.20250407131028-9d436ffc2da7 h1:oLu9ag44Msf56Hb19b/b+UTNq0J2l9rhlBQ1gh7izCU=
open-cluster-management.io/addon-framework v0.12.1-0.20250407131028-9d436ffc2da7/go.mod h1:7AEw1Sq9UEWpQGTU8zV1XPNkFRBYPbyBh8tfhISV++s=
open-cluster-management.io/api v0.16.2-0.20250506092504-9143e192a0a7 h1:UupwgKlXEy/uyIIy8L6AEIOQUsZkz259C7mr44pJKC0=
open-cluster-management.io/api v0.16.2-0.20250506092504-9143e192a0a7/go.mod h1:/OeqXycNBZQoe3WG6ghuWsMgsKGuMZrK8ZpsU6gWL0Y=
open-cluster-management.io/api v0.16.2-0.20250527062515-98a1d87193c1 h1:OAFgR9hrr3dpiwb+Pgz5gSKKdtnHwD8L+PkBd4HjHXc=
open-cluster-management.io/api v0.16.2-0.20250527062515-98a1d87193c1/go.mod h1:/OeqXycNBZQoe3WG6ghuWsMgsKGuMZrK8ZpsU6gWL0Y=
open-cluster-management.io/sdk-go v0.16.1-0.20250428032116-875454003818 h1:b7HpdTpKPzLEoJ5UtrXCed1PjxaKOxEboJ+kG6FZudI=
open-cluster-management.io/sdk-go v0.16.1-0.20250428032116-875454003818/go.mod h1:n89YVVoi5zm3KVpOyVMmTdD4rGOVSsykUtu7Ol3do3M=
sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.31.0 h1:CPT0ExVicCzcpeN4baWEV2ko2Z/AsiZgEdwgcfwLgMo=

View File

@@ -61,6 +61,17 @@ spec:
description: Spec reperesents the desired ManifestWork payload and Placement
reference to be reconciled
properties:
cascadeDeletionPolicy:
default: Background
description: |-
CascadeDeletionPolicy decides the manifestWorkReplicaSet is deleted before/after the related manifestWorks are gone.
Acceptable values are:
'Background'- the manifestWorkReplicaSet is deleted without waiting for the related manifestWorks to be gone.
'Foreground'- the manifestWorkReplicaSet is deleted until the related manifestWorks are gone.
enum:
- Background
- Foreground
type: string
manifestWorkTemplate:
description: ManifestWorkTemplate is the ManifestWorkSpec that will
be used to generate a per-cluster ManifestWork
@@ -581,6 +592,7 @@ spec:
minItems: 1
type: array
required:
- cascadeDeletionPolicy
- placementRefs
type: object
status:

View File

@@ -86,7 +86,9 @@ spec:
{{if .ExternalServerURL}}
- "--spoke-external-server-urls={{ .ExternalServerURL }}"
{{end}}
{{if eq .Replica 1}}
{{if .WorkStatusSyncInterval}}
- "--status-sync-interval={{ .WorkStatusSyncInterval }}"
{{else if eq .Replica 1}}
- "--status-sync-interval=60s"
{{end}}
{{if gt .ClientCertExpirationSeconds 0}}

View File

@@ -75,7 +75,9 @@ spec:
- "--terminate-on-files=/spoke/config/kubeconfig"
{{end}}
- "--terminate-on-files=/spoke/hub-kubeconfig/kubeconfig"
{{if eq .Replica 1}}
{{if .WorkStatusSyncInterval}}
- "--status-sync-interval={{ .WorkStatusSyncInterval }}"
{{else if eq .Replica 1}}
- "--status-sync-interval=60s"
{{end}}
{{if gt .WorkKubeAPIQPS 0.0}}

View File

@@ -171,6 +171,8 @@ type klusterletConfig struct {
RegistrationKubeAPIBurst int32
WorkKubeAPIQPS float32
WorkKubeAPIBurst int32
AppliedManifestWorkEvictionGracePeriod string
WorkStatusSyncInterval string
AgentKubeAPIQPS float32
AgentKubeAPIBurst int32
ExternalManagedKubeConfigSecret string
@@ -178,7 +180,6 @@ type klusterletConfig struct {
ExternalManagedKubeConfigWorkSecret string
ExternalManagedKubeConfigAgentSecret string
InstallMode operatorapiv1.InstallMode
AppliedManifestWorkEvictionGracePeriod string
// PriorityClassName is the name of the PriorityClass used by the deployed agents
PriorityClassName string
@@ -252,20 +253,19 @@ func (n *klusterletController) sync(ctx context.Context, controllerContext facto
}
config := klusterletConfig{
KlusterletName: klusterlet.Name,
KlusterletNamespace: helpers.KlusterletNamespace(klusterlet),
AgentNamespace: helpers.AgentNamespace(klusterlet),
AgentID: string(klusterlet.UID),
RegistrationImage: klusterlet.Spec.RegistrationImagePullSpec,
WorkImage: klusterlet.Spec.WorkImagePullSpec,
ClusterName: klusterlet.Spec.ClusterName,
SingletonImage: klusterlet.Spec.ImagePullSpec,
HubKubeConfigSecret: helpers.HubKubeConfig,
ExternalServerURL: getServersFromKlusterlet(klusterlet),
OperatorNamespace: n.operatorNamespace,
Replica: replica,
PriorityClassName: helpers.AgentPriorityClassName(klusterlet, n.kubeVersion),
AppliedManifestWorkEvictionGracePeriod: getAppliedManifestWorkEvictionGracePeriod(klusterlet),
KlusterletName: klusterlet.Name,
KlusterletNamespace: helpers.KlusterletNamespace(klusterlet),
AgentNamespace: helpers.AgentNamespace(klusterlet),
AgentID: string(klusterlet.UID),
RegistrationImage: klusterlet.Spec.RegistrationImagePullSpec,
WorkImage: klusterlet.Spec.WorkImagePullSpec,
ClusterName: klusterlet.Spec.ClusterName,
SingletonImage: klusterlet.Spec.ImagePullSpec,
HubKubeConfigSecret: helpers.HubKubeConfig,
ExternalServerURL: getServersFromKlusterlet(klusterlet),
OperatorNamespace: n.operatorNamespace,
Replica: replica,
PriorityClassName: helpers.AgentPriorityClassName(klusterlet, n.kubeVersion),
ExternalManagedKubeConfigSecret: helpers.ExternalManagedKubeConfig,
ExternalManagedKubeConfigRegistrationSecret: helpers.ExternalManagedKubeConfigRegistration,
@@ -385,6 +385,12 @@ func (n *klusterletController) sync(ctx context.Context, controllerContext facto
workFeatureGates = klusterlet.Spec.WorkConfiguration.FeatureGates
config.WorkKubeAPIQPS = float32(klusterlet.Spec.WorkConfiguration.KubeAPIQPS)
config.WorkKubeAPIBurst = klusterlet.Spec.WorkConfiguration.KubeAPIBurst
if klusterlet.Spec.WorkConfiguration.AppliedManifestWorkEvictionGracePeriod != nil {
config.AppliedManifestWorkEvictionGracePeriod = klusterlet.Spec.WorkConfiguration.AppliedManifestWorkEvictionGracePeriod.Duration.String()
}
if klusterlet.Spec.WorkConfiguration.StatusSyncInterval != nil {
config.WorkStatusSyncInterval = klusterlet.Spec.WorkConfiguration.StatusSyncInterval.Duration.String()
}
}
config.WorkFeatureGates, workFeatureMsgs = helpers.ConvertToFeatureGateFlags("Work", workFeatureGates, ocmfeature.DefaultSpokeWorkFeatureGates)
@@ -474,22 +480,6 @@ func getServersFromKlusterlet(klusterlet *operatorapiv1.Klusterlet) string {
return strings.Join(serverString, ",")
}
func getAppliedManifestWorkEvictionGracePeriod(klusterlet *operatorapiv1.Klusterlet) string {
if klusterlet == nil {
return ""
}
if klusterlet.Spec.WorkConfiguration == nil {
return ""
}
if klusterlet.Spec.WorkConfiguration.AppliedManifestWorkEvictionGracePeriod == nil {
return ""
}
return klusterlet.Spec.WorkConfiguration.AppliedManifestWorkEvictionGracePeriod.Duration.String()
}
// getManagedKubeConfig is a helper func for Hosted mode, it will retrieve managed cluster
// kubeconfig from "external-managed-kubeconfig" secret.
func getManagedKubeConfig(ctx context.Context, kubeClient kubernetes.Interface,

View File

@@ -479,7 +479,9 @@ func assertRegistrationDeployment(t *testing.T, actions []clienttesting.Action,
}
}
func assertWorkDeployment(t *testing.T, actions []clienttesting.Action, verb, clusterName string, mode operatorapiv1.InstallMode, replica int32) {
func assertWorkDeployment(t *testing.T, actions []clienttesting.Action, verb, clusterName string,
mode operatorapiv1.InstallMode, replica int32,
workStatusSyncInterval, appliedManifestWorkEvictionGracePeriod string) {
deployment := getDeployments(actions, verb, "work-agent")
if deployment == nil {
t.Errorf("work deployment not found")
@@ -506,11 +508,14 @@ func assertWorkDeployment(t *testing.T, actions []clienttesting.Action, verb, cl
}
expectArgs = append(expectArgs, "--terminate-on-files=/spoke/hub-kubeconfig/kubeconfig")
if *deployment.Spec.Replicas == 1 {
expectArgs = append(expectArgs, "--status-sync-interval=60s")
if workStatusSyncInterval != "" {
expectArgs = append(expectArgs, fmt.Sprintf("--status-sync-interval=%s", workStatusSyncInterval))
}
expectArgs = append(expectArgs, "--kube-api-qps=20", "--kube-api-burst=50")
if appliedManifestWorkEvictionGracePeriod != "" {
expectArgs = append(expectArgs, fmt.Sprintf("--appliedmanifestwork-eviction-grace-period=%s", appliedManifestWorkEvictionGracePeriod))
}
if !equality.Semantic.DeepEqual(args, expectArgs) {
t.Errorf("Expect args %v, but got %v", expectArgs, args)
@@ -1126,7 +1131,8 @@ func TestReplica(t *testing.T) {
// should have 1 replica for registration deployment and 0 for work
assertRegistrationDeployment(t, controller.kubeClient.Actions(), createVerb, "", "cluster1", 1, false)
assertWorkDeployment(t, controller.kubeClient.Actions(), createVerb, "cluster1", operatorapiv1.InstallModeDefault, 0)
assertWorkDeployment(t, controller.kubeClient.Actions(), createVerb, "cluster1", operatorapiv1.InstallModeDefault,
0, "", "")
klusterlet = newKlusterlet("klusterlet", "testns", "cluster1")
klusterlet.Status.Conditions = []metav1.Condition{
@@ -1149,7 +1155,8 @@ func TestReplica(t *testing.T) {
}
// should have 1 replica for work
assertWorkDeployment(t, controller.kubeClient.Actions(), "update", "cluster1", operatorapiv1.InstallModeDefault, 1)
assertWorkDeployment(t, controller.kubeClient.Actions(), "update", "cluster1", operatorapiv1.InstallModeDefault,
1, "60s", "")
controller.kubeClient.PrependReactor("list", "nodes", func(action clienttesting.Action) (handled bool, ret runtime.Object, err error) {
if action.GetVerb() != "list" {
@@ -1171,7 +1178,51 @@ func TestReplica(t *testing.T) {
// should have 3 replicas for clusters with multiple nodes
assertRegistrationDeployment(t, controller.kubeClient.Actions(), "update", "", "cluster1", 3, false)
assertWorkDeployment(t, controller.kubeClient.Actions(), "update", "cluster1", operatorapiv1.InstallModeDefault, 3)
assertWorkDeployment(t, controller.kubeClient.Actions(), "update", "cluster1", operatorapiv1.InstallModeDefault,
3, "", "")
}
func TestWorkConfig(t *testing.T) {
klusterlet := newKlusterlet("klusterlet", "testns", "cluster1")
workSyncInterval := metav1.Duration{Duration: 20 * time.Second}
appliedManifestWorkEvictionGracePeriod := metav1.Duration{Duration: 30 * time.Minute}
if klusterlet.Spec.WorkConfiguration == nil {
klusterlet.Spec.WorkConfiguration = &operatorapiv1.WorkAgentConfiguration{
StatusSyncInterval: &workSyncInterval,
AppliedManifestWorkEvictionGracePeriod: &appliedManifestWorkEvictionGracePeriod,
}
} else {
klusterlet.Spec.WorkConfiguration.StatusSyncInterval = &workSyncInterval
klusterlet.Spec.WorkConfiguration.AppliedManifestWorkEvictionGracePeriod = &appliedManifestWorkEvictionGracePeriod
}
klusterlet.Status.Conditions = []metav1.Condition{
{
Type: operatorapiv1.ConditionHubConnectionDegraded,
Status: metav1.ConditionFalse,
},
}
hubSecret := newSecret(helpers.HubKubeConfig, "testns")
hubSecret.Data["kubeconfig"] = []byte("dummuykubeconnfig")
hubSecret.Data["cluster-name"] = []byte("cluster1")
objects := []runtime.Object{
newNamespace("testns"),
newSecret(helpers.BootstrapHubKubeConfig, "testns"),
hubSecret,
}
syncContext := testingcommon.NewFakeSyncContext(t, "klusterlet")
controller := newTestController(t, klusterlet, syncContext.Recorder(), nil, false,
objects...)
err := controller.controller.sync(context.TODO(), syncContext)
if err != nil {
t.Errorf("Expected non error when sync, %v", err)
}
assertRegistrationDeployment(t, controller.kubeClient.Actions(), createVerb, "", "cluster1", 1, false)
assertWorkDeployment(t, controller.kubeClient.Actions(), createVerb, "cluster1", operatorapiv1.InstallModeDefault,
1, "20s", "30m0s")
}
func TestClusterNameChange(t *testing.T) {
@@ -1243,7 +1294,7 @@ func TestClusterNameChange(t *testing.T) {
if err != nil {
t.Errorf("Expected non error when sync, %v", err)
}
assertWorkDeployment(t, controller.kubeClient.Actions(), "update", "cluster2", "", 0)
assertWorkDeployment(t, controller.kubeClient.Actions(), "update", "cluster2", "", 0, "", "")
// Update klusterlet with different cluster name and rerun sync
klusterlet = newKlusterlet("klusterlet", "testns", "cluster3")
@@ -1260,7 +1311,7 @@ func TestClusterNameChange(t *testing.T) {
t.Errorf("Expected non error when sync, %v", err)
}
assertRegistrationDeployment(t, controller.kubeClient.Actions(), "update", "https://localhost", "cluster3", 1, false)
assertWorkDeployment(t, controller.kubeClient.Actions(), "update", "cluster3", "", 0)
assertWorkDeployment(t, controller.kubeClient.Actions(), "update", "cluster3", "", 0, "", "")
}
func TestSyncWithPullSecret(t *testing.T) {
@@ -1484,58 +1535,3 @@ func (f *fakeManagedClusterBuilder) build(_ context.Context) (*managedClusterCli
kubeconfigSecretCreationTime: creationTime,
}, nil
}
func TestGetAppliedManifestWorkEvictionGracePeriod(t *testing.T) {
cases := []struct {
name string
klusterlet *operatorapiv1.Klusterlet
workConfiguration *operatorapiv1.WorkAgentConfiguration
expectedEvictionGracePeriod string
}{
{
name: "klusterlet is nil",
},
{
name: "without workConfiguration",
klusterlet: newKlusterlet("test", "test-ns", "test"),
},
{
name: "without appliedManifestWorkEvictionGracePeriod",
klusterlet: newKlusterlet("test", "test-ns", "test"),
workConfiguration: &operatorapiv1.WorkAgentConfiguration{},
},
{
name: "with appliedManifestWorkEvictionGracePeriod",
klusterlet: newKlusterlet("test", "test-ns", "test"),
workConfiguration: &operatorapiv1.WorkAgentConfiguration{
AppliedManifestWorkEvictionGracePeriod: &metav1.Duration{
Duration: 10 * time.Minute,
},
},
expectedEvictionGracePeriod: "10m",
},
}
for _, c := range cases {
t.Run(c.name, func(t *testing.T) {
if c.klusterlet != nil {
c.klusterlet.Spec.WorkConfiguration = c.workConfiguration
}
actualString := getAppliedManifestWorkEvictionGracePeriod(c.klusterlet)
if len(actualString) == 0 || len(c.expectedEvictionGracePeriod) == 0 {
assert.Equal(t, c.expectedEvictionGracePeriod, actualString)
} else {
expected, err := time.ParseDuration(c.expectedEvictionGracePeriod)
if err != nil {
t.Errorf("Failed to parse duration: %s", c.expectedEvictionGracePeriod)
}
actual, err := time.ParseDuration(actualString)
if err != nil {
t.Errorf("Failed to parse duration: %s", actualString)
}
assert.Equal(t, expected, actual)
}
})
}
}

View File

@@ -160,7 +160,7 @@ var _ = ginkgo.Describe("Enable addon management feature gate", ginkgo.Ordered,
return err
}
return fmt.Errorf("the managedClusterAddon should be deleted")
return fmt.Errorf("the managedClusterAddon %s should be deleted", addOnName)
}).ShouldNot(gomega.HaveOccurred())
ginkgo.By(fmt.Sprintf("delete addon template resources for cluster %v", universalClusterName))

View File

@@ -11,6 +11,7 @@ import (
. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
clusterv1beta2 "open-cluster-management.io/api/cluster/v1beta2"
@@ -172,7 +173,12 @@ var _ = BeforeSuite(func() {
},
},
}, metav1.CreateOptions{})
Expect(err).ToNot(HaveOccurred())
if err != nil {
// ignore the already exists error so we can run the e2e test multiple times locally
if !errors.IsAlreadyExists(err) {
Expect(err).ToNot(HaveOccurred())
}
}
Eventually(func() error {
umc, err := hub.ClusterClient.ClusterV1().ManagedClusters().Get(context.TODO(), universalClusterName, metav1.GetOptions{})

View File

@@ -3,6 +3,7 @@ package framework
import (
"context"
"fmt"
"time"
. "github.com/onsi/gomega"
corev1 "k8s.io/api/core/v1"
@@ -84,6 +85,9 @@ func (spoke *Spoke) CreateKlusterlet(
DeployOption: operatorapiv1.KlusterletDeployOption{
Mode: mode,
},
WorkConfiguration: &operatorapiv1.WorkAgentConfiguration{
StatusSyncInterval: &metav1.Duration{Duration: 5 * time.Second},
},
},
}

2
vendor/modules.txt vendored
View File

@@ -1714,7 +1714,7 @@ open-cluster-management.io/addon-framework/pkg/agent
open-cluster-management.io/addon-framework/pkg/assets
open-cluster-management.io/addon-framework/pkg/index
open-cluster-management.io/addon-framework/pkg/utils
# open-cluster-management.io/api v0.16.2-0.20250506092504-9143e192a0a7
# open-cluster-management.io/api v0.16.2-0.20250527062515-98a1d87193c1
## explicit; go 1.23.6
open-cluster-management.io/api/addon/v1alpha1
open-cluster-management.io/api/client/addon/clientset/versioned

View File

@@ -264,7 +264,10 @@ spec:
reservedClusterClaimSuffixes:
description: Custom suffixes for reserved ClusterClaims.
items:
maxLength: 64
minLength: 1
type: string
maxItems: 10
type: array
required:
- maxCustomClusterClaims
@@ -486,6 +489,13 @@ spec:
If it is set empty, use the default value: 50
format: int32
type: integer
statusSyncInterval:
description: |-
StatusSyncInterval is the interval for the work agent to check the status of ManifestWorks.
Larger value means less frequent status sync and less api calls to the managed cluster, vice versa.
The value(x) should be: 5s <= x <= 1h.
pattern: ^([0-9]+(s|m|h))+$
type: string
type: object
workImagePullSpec:
description: |-

View File

@@ -193,6 +193,9 @@ type ClusterClaimConfiguration struct {
// Custom suffixes for reserved ClusterClaims.
// +optional
// +kubebuilder:validation:MaxItems=10
// +kubebuilder:validation:items:MinLength=1
// +kubebuilder:validation:items:MaxLength=64
ReservedClusterClaimSuffixes []string `json:"reservedClusterClaimSuffixes,omitempty"`
}
@@ -296,6 +299,14 @@ type WorkAgentConfiguration struct {
// +kubebuilder:validation:Type=string
// +kubebuilder:validation:Pattern="^([0-9]+(s|m|h))+$"
AppliedManifestWorkEvictionGracePeriod *metav1.Duration `json:"appliedManifestWorkEvictionGracePeriod,omitempty"`
// StatusSyncInterval is the interval for the work agent to check the status of ManifestWorks.
// Larger value means less frequent status sync and less api calls to the managed cluster, vice versa.
// The value(x) should be: 5s <= x <= 1h.
// +optional
// +kubebuilder:validation:Type=string
// +kubebuilder:validation:Pattern="^([0-9]+(s|m|h))+$"
StatusSyncInterval *metav1.Duration `json:"statusSyncInterval,omitempty"`
}
const (

View File

@@ -780,6 +780,11 @@ func (in *WorkAgentConfiguration) DeepCopyInto(out *WorkAgentConfiguration) {
*out = new(metav1.Duration)
**out = **in
}
if in.StatusSyncInterval != nil {
in, out := &in.StatusSyncInterval, &out.StatusSyncInterval
*out = new(metav1.Duration)
**out = **in
}
return
}

View File

@@ -348,6 +348,7 @@ var map_WorkAgentConfiguration = map[string]string{
"kubeAPIQPS": "KubeAPIQPS indicates the maximum QPS while talking with apiserver of hub cluster from the spoke cluster. If it is set empty, use the default value: 50",
"kubeAPIBurst": "KubeAPIBurst indicates the maximum burst of the throttle while talking with apiserver of hub cluster from the spoke cluster. If it is set empty, use the default value: 100",
"appliedManifestWorkEvictionGracePeriod": "AppliedManifestWorkEvictionGracePeriod is the eviction grace period the work agent will wait before evicting the AppliedManifestWorks, whose corresponding ManifestWorks are missing on the hub cluster, from the managed cluster. If not present, the default value of the work agent will be used.",
"statusSyncInterval": "StatusSyncInterval is the interval for the work agent to check the status of ManifestWorks. Larger value means less frequent status sync and less api calls to the managed cluster, vice versa. The value(x) should be: 5s <= x <= 1h.",
}
func (WorkAgentConfiguration) SwaggerDoc() map[string]string {

View File

@@ -61,6 +61,17 @@ spec:
description: Spec reperesents the desired ManifestWork payload and Placement
reference to be reconciled
properties:
cascadeDeletionPolicy:
default: Background
description: |-
CascadeDeletionPolicy decides the manifestWorkReplicaSet is deleted before/after the related manifestWorks are gone.
Acceptable values are:
'Background'- the manifestWorkReplicaSet is deleted without waiting for the related manifestWorks to be gone.
'Foreground'- the manifestWorkReplicaSet is deleted until the related manifestWorks are gone.
enum:
- Background
- Foreground
type: string
manifestWorkTemplate:
description: ManifestWorkTemplate is the ManifestWorkSpec that will
be used to generate a per-cluster ManifestWork
@@ -581,6 +592,7 @@ spec:
minItems: 1
type: array
required:
- cascadeDeletionPolicy
- placementRefs
type: object
status:

View File

@@ -63,6 +63,16 @@ type ManifestWorkReplicaSetSpec struct {
// +kubebuilder:validation:MinItems=1
// +required
PlacementRefs []LocalPlacementReference `json:"placementRefs"`
// CascadeDeletionPolicy decides the manifestWorkReplicaSet is deleted before/after the related manifestWorks are gone.
// Acceptable values are:
// 'Background'- the manifestWorkReplicaSet is deleted without waiting for the related manifestWorks to be gone.
// 'Foreground'- the manifestWorkReplicaSet is deleted until the related manifestWorks are gone.
// +kubebuilder:default=Background
// +kubebuilder:validation:Enum=Background;Foreground
// +kubebuilder:validation:Required
// +optional
CascadeDeletionPolicy CascadeDeletionPolicy `json:"cascadeDeletionPolicy,omitempty"`
}
// ManifestWorkReplicaSetStatus defines the observed state of ManifestWorkReplicaSet
@@ -172,3 +182,14 @@ const (
// Reason: AsExpected, NotAsExpected or Processing
ManifestWorkReplicaSetConditionManifestworkApplied string = "ManifestworkApplied"
)
// CascadeDeletionPolicy decides the manifestWorkReplicaSet is deleted before/after the related manifestWorks are gone.
type CascadeDeletionPolicy string
const (
// Foreground decides the manifestWorkReplicaSet is deleted until the related manifestWorks are gone.
Foreground CascadeDeletionPolicy = "Foreground"
// Background decides the manifestWorkReplicaSet is deleted without waiting for the related manifestWorks to be gone.
Background CascadeDeletionPolicy = "Background"
)

View File

@@ -39,9 +39,10 @@ func (ManifestWorkReplicaSetList) SwaggerDoc() map[string]string {
}
var map_ManifestWorkReplicaSetSpec = map[string]string{
"": "ManifestWorkReplicaSetSpec defines the desired state of ManifestWorkReplicaSet",
"manifestWorkTemplate": "ManifestWorkTemplate is the ManifestWorkSpec that will be used to generate a per-cluster ManifestWork",
"placementRefs": "PacementRefs is a list of the names of the Placement resource, from which a PlacementDecision will be found and used to distribute the ManifestWork.",
"": "ManifestWorkReplicaSetSpec defines the desired state of ManifestWorkReplicaSet",
"manifestWorkTemplate": "ManifestWorkTemplate is the ManifestWorkSpec that will be used to generate a per-cluster ManifestWork",
"placementRefs": "PacementRefs is a list of the names of the Placement resource, from which a PlacementDecision will be found and used to distribute the ManifestWork.",
"cascadeDeletionPolicy": "CascadeDeletionPolicy decides the manifestWorkReplicaSet is deleted before/after the related manifestWorks are gone. Acceptable values are: 'Background'- the manifestWorkReplicaSet is deleted without waiting for the related manifestWorks to be gone. 'Foreground'- the manifestWorkReplicaSet is deleted until the related manifestWorks are gone.",
}
func (ManifestWorkReplicaSetSpec) SwaggerDoc() map[string]string {