support update strategy in manifestwork (#139)

* Add ssa strategy

Signed-off-by: Jian Qiu <jqiu@redhat.com>

* Add test cases

Signed-off-by: Jian Qiu <jqiu@redhat.com>

* Resolve comments

Signed-off-by: Jian Qiu <jqiu@redhat.com>

* Use default manager in api repo

Signed-off-by: Jian Qiu <jqiu@redhat.com>

* Refatcor deleteOption

patch ownerref separately for deleteOption

Signed-off-by: Jian Qiu <jqiu@redhat.com>

* Do not reconcile when ssa conflict

Signed-off-by: Jian Qiu <jqiu@redhat.com>
This commit is contained in:
Jian Qiu
2022-07-07 10:09:42 +08:00
committed by GitHub
parent 718605172f
commit 1a7686ee47
14 changed files with 1363 additions and 187 deletions

View File

@@ -68,6 +68,40 @@ spec:
resource:
description: Resource is the resource name of the Kubernetes resource.
type: string
executor:
description: Executor is the configuration that makes the work agent to perform some pre-request processing/checking. e.g. the executor identity tells the work agent to check the executor has sufficient permission to write the workloads to the local managed cluster. Note that nil executor is still supported for backward-compatibility which indicates that the work agent will not perform any additional actions before applying resources.
type: object
properties:
subject:
description: Subject is the subject identity which the work agent uses to talk to the local cluster when applying the resources.
type: object
required:
- type
properties:
serviceAccount:
description: ServiceAccount is for identifying which service account to use by the work agent. Only required if the type is "ServiceAccount".
type: object
required:
- name
- namespace
properties:
name:
description: Name is the name of the service account.
type: string
maxLength: 253
minLength: 1
pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*)$
namespace:
description: Namespace is the namespace of the service account.
type: string
maxLength: 253
minLength: 1
pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*)$
type:
description: 'Type is the type of the subject identity. Supported types are: "ServiceAccount".'
type: string
enum:
- ServiceAccount
manifestConfigs:
description: ManifestConfigs represents the configurations of manifests defined in workload field.
type: array
@@ -75,11 +109,10 @@ spec:
description: ManifestConfigOption represents the configurations of a manifest defined in workload field.
type: object
required:
- feedbackRules
- resourceIdentifier
properties:
feedbackRules:
description: FeedbackRules defines what resource status field should be returned.
description: FeedbackRules defines what resource status field should be returned. If it is not set or empty, no feedback rules will be honored.
type: array
items:
type: object
@@ -129,6 +162,32 @@ spec:
resource:
description: Resource is the resource name of the Kubernetes resource.
type: string
updateStrategy:
description: UpdateStrategy defines the strategy to update this manifest. UpdateStrategy is Update if it is not set, optional
type: object
required:
- type
properties:
serverSideApply:
description: serverSideApply defines the configuration for server side apply. It is honored only when type of updateStrategy is ServerSideApply
type: object
properties:
fieldManager:
description: FieldManager is the manager to apply the resource. It is work-agent by default, but can be other name with work-agent as the prefix.
type: string
default: work-agent
pattern: ^work-agent
force:
description: Force represents to force apply the manifest.
type: boolean
type:
description: type defines the strategy to update this manifest, default value is Update. Update type means to update resource by an update call. CreateOnly type means do not update resource based on current manifest. ServerSideApply type means to update resource using server side apply with work-controller as the field manager. If there is conflict, the related Applied condition of manifest will be in the status of False with the reason of ApplyConflict.
type: string
default: Update
enum:
- Update
- CreateOnly
- ServerSideApply
workload:
description: Workload represents the manifest workload to be deployed on a managed cluster.
type: object

4
go.mod
View File

@@ -9,6 +9,7 @@ require (
github.com/openshift/build-machinery-go v0.0.0-20220121085309-f94edc2d6874
github.com/openshift/generic-admission-server v1.14.1-0.20220220163846-6395b86cc87e
github.com/openshift/library-go v0.0.0-20220329193146-715792ed530d
github.com/pkg/errors v0.9.1
github.com/spf13/cobra v1.4.0
github.com/spf13/pflag v1.0.5
k8s.io/api v0.23.5
@@ -20,7 +21,7 @@ require (
k8s.io/klog/v2 v2.60.1
k8s.io/kube-aggregator v0.23.5
k8s.io/utils v0.0.0-20220210201930-3a6ce19ff2f9
open-cluster-management.io/api v0.7.0
open-cluster-management.io/api v0.7.1-0.20220629035306-4907911fd551
sigs.k8s.io/controller-runtime v0.11.1
)
@@ -64,7 +65,6 @@ require (
github.com/nxadm/tail v1.4.8 // indirect
github.com/openshift/api v0.0.0-20220315184754-d7c10d0b647e // indirect
github.com/openshift/client-go v0.0.0-20211209144617-7385dd6338e3 // indirect
github.com/pkg/errors v0.9.1 // indirect
github.com/pkg/profile v1.3.0 // indirect
github.com/prometheus/client_golang v1.11.0 // indirect
github.com/prometheus/client_model v0.2.0 // indirect

4
go.sum
View File

@@ -1229,8 +1229,8 @@ modernc.org/golex v1.0.0/go.mod h1:b/QX9oBD/LhixY6NDh+IdGv17hgB+51fET1i2kPSmvk=
modernc.org/mathutil v1.0.0/go.mod h1:wU0vUrJsVWBZ4P6e7xtFJEhFSNsfRLJ8H458uRjg03k=
modernc.org/strutil v1.0.0/go.mod h1:lstksw84oURvj9y3tn8lGvRxyRC1S2+g5uuIzNfIOBs=
modernc.org/xc v1.0.0/go.mod h1:mRNCo0bvLjGhHO9WsyuKVU4q0ceiDDDoEeWDJHrNx8I=
open-cluster-management.io/api v0.7.0 h1:Xt1tRCwt+wrhtCOEQ6g+7sFvIkMjffWnn5PSUSoKJcc=
open-cluster-management.io/api v0.7.0/go.mod h1:Wg7YOcVNxsNDj2G8ViWTD/utCfb9cZc9MpNb4fKlXSs=
open-cluster-management.io/api v0.7.1-0.20220629035306-4907911fd551 h1:FOzEuNJ+G5QGcUODSAJou5dWP6wphnETQY1dVWJLoX4=
open-cluster-management.io/api v0.7.1-0.20220629035306-4907911fd551/go.mod h1:+OEARSAl2jIhuLItUcS30UgLA3khmA9ihygLVxzEn+U=
rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8=
rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0=
rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA=

View File

@@ -2,6 +2,7 @@ package helper
import (
"context"
"encoding/json"
"testing"
"time"
@@ -11,9 +12,11 @@ import (
"k8s.io/apimachinery/pkg/api/meta"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/diff"
fakedynamic "k8s.io/client-go/dynamic/fake"
clienttesting "k8s.io/client-go/testing"
fakeworkclient "open-cluster-management.io/api/client/work/clientset/versioned/fake"
workapiv1 "open-cluster-management.io/api/work/v1"
)
@@ -504,3 +507,132 @@ func TestHubHash(t *testing.T) {
})
}
}
func TestFindManifestConiguration(t *testing.T) {
cases := []struct {
name string
options []workapiv1.ManifestConfigOption
resourceMeta workapiv1.ManifestResourceMeta
expectedOption *workapiv1.ManifestConfigOption
}{
{
name: "nil options",
options: nil,
resourceMeta: workapiv1.ManifestResourceMeta{Group: "", Resource: "configmaps", Name: "test", Namespace: "testns"},
expectedOption: nil,
},
{
name: "options not found",
options: []workapiv1.ManifestConfigOption{
{ResourceIdentifier: workapiv1.ResourceIdentifier{Group: "", Resource: "nodes", Name: "node1"}},
{ResourceIdentifier: workapiv1.ResourceIdentifier{Group: "", Resource: "configmaps", Name: "test1", Namespace: "testns"}},
},
resourceMeta: workapiv1.ManifestResourceMeta{Group: "", Resource: "configmaps", Name: "test", Namespace: "testns"},
expectedOption: nil,
},
{
name: "options found",
options: []workapiv1.ManifestConfigOption{
{ResourceIdentifier: workapiv1.ResourceIdentifier{Group: "", Resource: "nodes", Name: "node1"}},
{ResourceIdentifier: workapiv1.ResourceIdentifier{Group: "", Resource: "configmaps", Name: "test", Namespace: "testns"}},
},
resourceMeta: workapiv1.ManifestResourceMeta{Group: "", Resource: "configmaps", Name: "test", Namespace: "testns"},
expectedOption: &workapiv1.ManifestConfigOption{
ResourceIdentifier: workapiv1.ResourceIdentifier{Group: "", Resource: "configmaps", Name: "test", Namespace: "testns"},
},
},
}
for _, c := range cases {
t.Run(c.name, func(t *testing.T) {
option := FindManifestConiguration(c.resourceMeta, c.options)
if !equality.Semantic.DeepEqual(option, c.expectedOption) {
t.Errorf("expect option to be %v, but got %v", c.expectedOption, option)
}
})
}
}
func TestApplyOwnerReferences(t *testing.T) {
testCases := []struct {
name string
existing []metav1.OwnerReference
required metav1.OwnerReference
wantPatch bool
wantOwners []metav1.OwnerReference
}{
{
name: "add a owner",
required: metav1.OwnerReference{Name: "n1", UID: "a"},
wantPatch: true,
wantOwners: []metav1.OwnerReference{{Name: "n1", UID: "a"}},
},
{
name: "append a owner",
existing: []metav1.OwnerReference{{Name: "n2", UID: "b"}},
required: metav1.OwnerReference{Name: "n1", UID: "a"},
wantPatch: true,
wantOwners: []metav1.OwnerReference{{Name: "n2", UID: "b"}, {Name: "n1", UID: "a"}},
},
{
name: "remove a owner",
existing: []metav1.OwnerReference{{Name: "n2", UID: "b"}, {Name: "n1", UID: "a"}},
required: metav1.OwnerReference{Name: "n1", UID: "a-"},
wantPatch: true,
wantOwners: []metav1.OwnerReference{{Name: "n2", UID: "b"}},
},
{
name: "remove a non existing owner",
existing: []metav1.OwnerReference{{Name: "n2", UID: "b"}, {Name: "n1", UID: "a"}},
required: metav1.OwnerReference{Name: "n3", UID: "c-"},
wantPatch: false,
},
{
name: "append an existing owner",
existing: []metav1.OwnerReference{{Name: "n2", UID: "b"}, {Name: "n1", UID: "a"}},
required: metav1.OwnerReference{Name: "n1", UID: "a"},
wantPatch: false,
},
}
scheme := runtime.NewScheme()
if err := corev1.AddToScheme(scheme); err != nil {
t.Fatal(err)
}
for _, c := range testCases {
t.Run(c.name, func(t *testing.T) {
object := newSecret("ns1", "n1", false, "ns1-n1", c.existing...)
fakeClient := fakedynamic.NewSimpleDynamicClient(scheme, object)
gvr := schema.GroupVersionResource{Version: "v1", Resource: "secrets"}
err := ApplyOwnerReferences(context.TODO(), fakeClient, gvr, object, c.required)
if err != nil {
t.Errorf("apply err: %v", err)
}
actions := fakeClient.Actions()
if !c.wantPatch {
if len(actions) > 0 {
t.Fatalf("expect not patch but got %v", actions)
}
return
}
if len(actions) != 1 {
t.Fatalf("expect patch action but got %v", actions)
}
patch := actions[0].(clienttesting.PatchAction).GetPatch()
patchedObject := &metav1.PartialObjectMetadata{}
err = json.Unmarshal(patch, patchedObject)
if err != nil {
t.Fatalf("failed to marshal patch: %v", err)
}
if !equality.Semantic.DeepEqual(c.wantOwners, patchedObject.GetOwnerReferences()) {
t.Errorf("want ownerrefs %v, but got %v", c.wantOwners, patchedObject.GetOwnerReferences())
}
})
}
}

View File

@@ -3,6 +3,7 @@ package helper
import (
"context"
"crypto/sha256"
"encoding/json"
"fmt"
"strings"
"time"
@@ -17,6 +18,7 @@ import (
"k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/api/meta"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/apimachinery/pkg/types"
@@ -232,18 +234,9 @@ func DeleteAppliedResources(
continue
}
// Merge with the existing owners to move the owner.
modified := resourcemerge.BoolPtr(false)
resourcemerge.MergeOwnerRefs(modified, &existingOwner, []metav1.OwnerReference{*ownerCopy})
// If there are still any other existing owners (not only ManifestWorks), update ownerrefs only.
if len(existingOwner) > 0 {
if !*modified {
continue
}
u.SetOwnerReferences(existingOwner)
_, err = dynamicClient.Resource(gvr).Namespace(resource.Namespace).Update(ctx, u, metav1.UpdateOptions{})
if len(existingOwner) > 1 {
err := ApplyOwnerReferences(ctx, dynamicClient, gvr, u, *ownerCopy)
if err != nil {
errs = append(errs, fmt.Errorf(
"failed to remove owner from resource %v with key %s/%s: %w",
@@ -361,3 +354,49 @@ func NewAppliedManifestWorkOwner(appliedWork *workapiv1.AppliedManifestWork) *me
UID: appliedWork.UID,
}
}
func FindManifestConiguration(resourceMeta workapiv1.ManifestResourceMeta, manifestOptions []workapiv1.ManifestConfigOption) *workapiv1.ManifestConfigOption {
identifier := workapiv1.ResourceIdentifier{
Group: resourceMeta.Group,
Resource: resourceMeta.Resource,
Namespace: resourceMeta.Namespace,
Name: resourceMeta.Name,
}
for _, config := range manifestOptions {
if config.ResourceIdentifier == identifier {
return &config
}
}
return nil
}
func ApplyOwnerReferences(ctx context.Context, dynamicClient dynamic.Interface, gvr schema.GroupVersionResource, existing runtime.Object, requiredOwner metav1.OwnerReference) error {
accessor, err := meta.Accessor(existing)
if err != nil {
return fmt.Errorf("type %t cannot be accessed: %v", existing, err)
}
patch := &unstructured.Unstructured{}
patch.SetUID(accessor.GetUID())
patch.SetResourceVersion(accessor.GetResourceVersion())
patch.SetOwnerReferences([]metav1.OwnerReference{requiredOwner})
modified := false
patchedOwner := accessor.GetOwnerReferences()
resourcemerge.MergeOwnerRefs(&modified, &patchedOwner, []metav1.OwnerReference{requiredOwner})
patch.SetOwnerReferences(patchedOwner)
if !modified {
return nil
}
patchData, err := json.Marshal(patch)
if err != nil {
return err
}
klog.V(2).Infof("Patching resource %v %s/%s with patch %s", gvr, accessor.GetNamespace(), accessor.GetName(), string(patchData))
_, err = dynamicClient.Resource(gvr).Namespace(accessor.GetNamespace()).Patch(ctx, accessor.GetName(), types.MergePatchType, patchData, metav1.PatchOptions{})
return err
}

View File

@@ -2,6 +2,7 @@ package manifestcontroller
import (
"context"
"encoding/json"
"fmt"
"reflect"
"strings"
@@ -9,7 +10,7 @@ import (
apiextensionsclient "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset"
"k8s.io/apimachinery/pkg/api/equality"
"k8s.io/apimachinery/pkg/api/errors"
apierrors "k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/api/meta"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
@@ -19,13 +20,16 @@ import (
utilerrors "k8s.io/apimachinery/pkg/util/errors"
"k8s.io/client-go/dynamic"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/tools/cache"
"k8s.io/client-go/util/retry"
"k8s.io/klog/v2"
"k8s.io/utils/pointer"
"github.com/openshift/library-go/pkg/controller/factory"
"github.com/openshift/library-go/pkg/operator/events"
"github.com/openshift/library-go/pkg/operator/resource/resourceapply"
"github.com/openshift/library-go/pkg/operator/resource/resourcemerge"
"github.com/pkg/errors"
workv1client "open-cluster-management.io/api/client/work/clientset/versioned/typed/work/v1"
workinformer "open-cluster-management.io/api/client/work/informers/externalversions/work/v1"
worklister "open-cluster-management.io/api/client/work/listers/work/v1"
@@ -58,6 +62,14 @@ type applyResult struct {
resourceMeta workapiv1.ManifestResourceMeta
}
type serverSideApplyConflictError struct {
ssaErr error
}
func (e *serverSideApplyConflictError) Error() string {
return e.ssaErr.Error()
}
// NewManifestWorkController returns a ManifestWorkController
func NewManifestWorkController(
ctx context.Context,
@@ -105,7 +117,7 @@ func (m *ManifestWorkController) sync(ctx context.Context, controllerContext fac
klog.V(4).Infof("Reconciling ManifestWork %q", manifestWorkName)
manifestWork, err := m.manifestWorkLister.Get(manifestWorkName)
if errors.IsNotFound(err) {
if apierrors.IsNotFound(err) {
// work not found, could have been deleted, do nothing.
return nil
}
@@ -135,7 +147,7 @@ func (m *ManifestWorkController) sync(ctx context.Context, controllerContext fac
appliedManifestWorkName := fmt.Sprintf("%s-%s", m.hubHash, manifestWork.Name)
appliedManifestWork, err := m.appliedManifestWorkLister.Get(appliedManifestWorkName)
switch {
case errors.IsNotFound(err):
case apierrors.IsNotFound(err):
appliedManifestWork = &workapiv1.AppliedManifestWork{
ObjectMeta: metav1.ObjectMeta{
Name: appliedManifestWorkName,
@@ -162,10 +174,10 @@ func (m *ManifestWorkController) sync(ctx context.Context, controllerContext fac
resourceResults := make([]applyResult, len(manifestWork.Spec.Workload.Manifests))
err = retry.RetryOnConflict(retry.DefaultBackoff, func() error {
resourceResults = m.applyManifests(
ctx, manifestWork.Spec.Workload.Manifests, manifestWork.Spec.DeleteOption, controllerContext.Recorder(), *owner, resourceResults)
ctx, manifestWork.Spec.Workload.Manifests, manifestWork.Spec, controllerContext.Recorder(), *owner, resourceResults)
for _, result := range resourceResults {
if errors.IsConflict(result.Error) {
if apierrors.IsConflict(result.Error) {
return result.Error
}
}
@@ -178,7 +190,9 @@ func (m *ManifestWorkController) sync(ctx context.Context, controllerContext fac
newManifestConditions := []workapiv1.ManifestCondition{}
for _, result := range resourceResults {
if result.Error != nil {
// ignore server side apply conflict error since it cannot be resolved by error fallback.
var ssaConflict *serverSideApplyConflictError
if result.Error != nil && !errors.As(result.Error, &ssaConflict) {
errs = append(errs, result.Error)
}
@@ -209,7 +223,7 @@ func (m *ManifestWorkController) sync(ctx context.Context, controllerContext fac
func (m *ManifestWorkController) applyManifests(
ctx context.Context,
manifests []workapiv1.Manifest,
deleteOption *workapiv1.DeleteOption,
workSpec workapiv1.ManifestWorkSpec,
recorder events.Recorder,
owner metav1.OwnerReference,
existingResults []applyResult) []applyResult {
@@ -218,10 +232,10 @@ func (m *ManifestWorkController) applyManifests(
switch {
case existingResults[index].Result == nil:
// Apply if there is not result.
existingResults[index] = m.applyOneManifest(ctx, index, manifest, deleteOption, recorder, owner)
case errors.IsConflict(existingResults[index].Error):
existingResults[index] = m.applyOneManifest(ctx, index, manifest, workSpec, recorder, owner)
case apierrors.IsConflict(existingResults[index].Error):
// Apply if there is a resource confilct error.
existingResults[index] = m.applyOneManifest(ctx, index, manifest, deleteOption, recorder, owner)
existingResults[index] = m.applyOneManifest(ctx, index, manifest, workSpec, recorder, owner)
}
}
@@ -232,7 +246,7 @@ func (m *ManifestWorkController) applyOneManifest(
ctx context.Context,
index int,
manifest workapiv1.Manifest,
deleteOption *workapiv1.DeleteOption,
workSpec workapiv1.ManifestWorkSpec,
recorder events.Recorder,
owner metav1.OwnerReference) applyResult {
@@ -243,59 +257,123 @@ func (m *ManifestWorkController) applyOneManifest(
result := applyResult{}
unstructuredObj := &unstructured.Unstructured{}
if err := unstructuredObj.UnmarshalJSON(manifest.Raw); err != nil {
// parse the required and set resource meta
required := &unstructured.Unstructured{}
if err := required.UnmarshalJSON(manifest.Raw); err != nil {
result.Error = err
return result
}
resMeta, gvr, err := buildResourceMeta(index, unstructuredObj, m.restMapper)
resMeta, gvr, err := buildResourceMeta(index, required, m.restMapper)
result.resourceMeta = resMeta
if err != nil {
result.Error = err
return result
}
owner = manageOwnerRef(gvr, resMeta.Namespace, resMeta.Name, deleteOption, owner)
unstructuredObj.SetOwnerReferences([]metav1.OwnerReference{owner})
// try to get the existing at first.
existing, existingErr := m.spokeDynamicClient.
Resource(gvr).
Namespace(required.GetNamespace()).
Get(ctx, required.GetName(), metav1.GetOptions{})
if existingErr != nil && !apierrors.IsNotFound(existingErr) {
result.Error = existingErr
return result
}
results := resourceapply.ApplyDirectly(ctx, clientHolder, recorder, m.staticResourceCache, func(name string) ([]byte, error) {
return unstructuredObj.MarshalJSON()
}, "manifest")
// compute required ownerrefs based on delete option
requiredOwner := manageOwnerRef(gvr, resMeta.Namespace, resMeta.Name, workSpec.DeleteOption, owner)
result.Result = results[0].Result
result.Changed = results[0].Changed
result.Error = results[0].Error
// find update strategy option.
option := helper.FindManifestConiguration(resMeta, workSpec.ManifestConfigs)
// strategy is update by default
strategy := workapiv1.UpdateStrategy{Type: workapiv1.UpdateStrategyTypeUpdate}
if option != nil && option.UpdateStrategy != nil {
strategy = *option.UpdateStrategy
}
// Try apply with dynamic client if the manifest cannot be decoded by scheme or typed client is not found
// TODO we should check the certain error.
// Use dynamic client when scheme cannot decode manifest or typed client cannot handle the object
if isDecodeError(result.Error) || isUnhandledError(result.Error) || isUnsupportedError(result.Error) {
result.Result, result.Changed, result.Error = m.applyUnstructured(ctx, unstructuredObj, gvr, recorder)
// apply resource based on update strategy
switch strategy.Type {
case workapiv1.UpdateStrategyTypeServerSideApply:
result.Result, result.Changed, result.Error = m.serverSideApply(ctx, gvr, required, option.UpdateStrategy.ServerSideApply, recorder)
case workapiv1.UpdateStrategyTypeCreateOnly, workapiv1.UpdateStrategyTypeUpdate:
if strategy.Type == workapiv1.UpdateStrategyTypeCreateOnly && existingErr == nil {
result.Result = existing
break
}
required.SetOwnerReferences([]metav1.OwnerReference{requiredOwner})
results := resourceapply.ApplyDirectly(ctx, clientHolder, recorder, m.staticResourceCache, func(name string) ([]byte, error) {
return required.MarshalJSON()
}, "manifest")
result.Result = results[0].Result
result.Changed = results[0].Changed
result.Error = results[0].Error
// Try apply with dynamic client if the manifest cannot be decoded by scheme or typed client is not found
// TODO we should check the certain error.
// Use dynamic client when scheme cannot decode manifest or typed client cannot handle the object
if isDecodeError(result.Error) || isUnhandledError(result.Error) || isUnsupportedError(result.Error) {
result.Result, result.Changed, result.Error = m.applyUnstructured(ctx, existing, required, gvr, recorder)
}
}
// patch the ownerref no matter apply fails or not.
if result.Error == nil {
result.Error = helper.ApplyOwnerReferences(ctx, m.spokeDynamicClient, gvr, result.Result, requiredOwner)
}
return result
}
func (m *ManifestWorkController) applyUnstructured(
func (m *ManifestWorkController) serverSideApply(
ctx context.Context,
required *unstructured.Unstructured,
gvr schema.GroupVersionResource,
required *unstructured.Unstructured,
config *workapiv1.ServerSideApplyConfig,
recorder events.Recorder) (*unstructured.Unstructured, bool, error) {
existing, err := m.spokeDynamicClient.
force := false
fieldManager := workapiv1.DefaultFieldManager
if config != nil {
force = config.Force
if len(config.FieldManager) > 0 {
fieldManager = config.FieldManager
}
}
patch, err := json.Marshal(resourcemerge.WithCleanLabelsAndAnnotations(required))
if err != nil {
return nil, false, err
}
// TODO use Apply method instead when upgrading the client-go to 0.25.x
actual, err := m.spokeDynamicClient.
Resource(gvr).
Namespace(required.GetNamespace()).
Get(ctx, required.GetName(), metav1.GetOptions{})
Patch(ctx, required.GetName(), types.ApplyPatchType, patch, metav1.PatchOptions{FieldManager: fieldManager, Force: pointer.Bool(force)})
resourceKey, _ := cache.MetaNamespaceKeyFunc(required)
recorder.Eventf(fmt.Sprintf(
"Server Side Applied %s %s", required.GetKind(), resourceKey), "Patched with field manager %s", fieldManager)
switch {
case errors.IsNotFound(err):
if apierrors.IsConflict(err) {
return actual, true, &serverSideApplyConflictError{ssaErr: err}
}
return actual, true, err
}
func (m *ManifestWorkController) applyUnstructured(
ctx context.Context,
existing, required *unstructured.Unstructured,
gvr schema.GroupVersionResource,
recorder events.Recorder) (*unstructured.Unstructured, bool, error) {
if existing == nil {
actual, err := m.spokeDynamicClient.Resource(gvr).Namespace(required.GetNamespace()).Create(
ctx, resourcemerge.WithCleanLabelsAndAnnotations(required).(*unstructured.Unstructured), metav1.CreateOptions{})
recorder.Eventf(fmt.Sprintf(
"%s Created", required.GetKind()), "Created %s/%s because it was missing", required.GetNamespace(), required.GetName())
return actual, true, err
case err != nil:
return nil, false, err
}
// Merge OwnerRefs, Labels, and Annotations.

View File

@@ -9,6 +9,7 @@ import (
"github.com/openshift/library-go/pkg/operator/resource/resourceapply"
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/equality"
"k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/api/meta"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
@@ -97,6 +98,7 @@ func assertManifestCondition(
type testCase struct {
name string
workManifest []*unstructured.Unstructured
workManifestConfig []workapiv1.ManifestConfigOption
spokeObject []runtime.Object
spokeDynamicObject []runtime.Object
expectedWorkAction []string
@@ -116,6 +118,7 @@ func newTestCase(name string) *testCase {
return &testCase{
name: name,
workManifest: []*unstructured.Unstructured{},
workManifestConfig: []workapiv1.ManifestConfigOption{},
spokeObject: []runtime.Object{},
spokeDynamicObject: []runtime.Object{},
expectedWorkAction: []string{},
@@ -132,6 +135,11 @@ func (t *testCase) withWorkManifest(objects ...*unstructured.Unstructured) *test
return t
}
func (t *testCase) withManifestConfig(configs ...workapiv1.ManifestConfigOption) *testCase {
t.workManifestConfig = configs
return t
}
func (t *testCase) withSpokeObject(objects ...runtime.Object) *testCase {
t.spokeObject = objects
return t
@@ -270,6 +278,7 @@ func TestSync(t *testing.T) {
withWorkManifest(spoketesting.NewUnstructured("v1", "Secret", "ns1", "test")).
withExpectedWorkAction("update").
withAppliedWorkAction("create").
withExpectedDynamicAction("get").
withExpectedKubeAction("get", "create").
withExpectedManifestCondition(expectedCondition{string(workapiv1.ManifestApplied), metav1.ConditionTrue}).
withExpectedWorkCondition(expectedCondition{string(workapiv1.WorkApplied), metav1.ConditionTrue}),
@@ -283,6 +292,7 @@ func TestSync(t *testing.T) {
newTestCase("update single resource").
withWorkManifest(spoketesting.NewUnstructured("v1", "Secret", "ns1", "test")).
withSpokeObject(spoketesting.NewSecret("test", "ns1", "value2")).
withExpectedDynamicAction("get").
withExpectedWorkAction("update").
withAppliedWorkAction("create").
withExpectedKubeAction("get", "delete", "create").
@@ -308,6 +318,7 @@ func TestSync(t *testing.T) {
withSpokeObject(spoketesting.NewSecret("test", "ns1", "value2")).
withExpectedWorkAction("update").
withAppliedWorkAction("create").
withExpectedDynamicAction("get", "get").
withExpectedKubeAction("get", "delete", "create", "get", "create").
withExpectedManifestCondition(expectedCondition{string(workapiv1.ManifestApplied), metav1.ConditionTrue}, expectedCondition{string(workapiv1.ManifestApplied), metav1.ConditionTrue}).
withExpectedWorkCondition(expectedCondition{string(workapiv1.WorkApplied), metav1.ConditionTrue}),
@@ -338,6 +349,7 @@ func TestFailedToApplyResource(t *testing.T) {
withSpokeObject(spoketesting.NewSecret("test", "ns1", "value2")).
withExpectedWorkAction("update").
withAppliedWorkAction("create").
withExpectedDynamicAction("get", "get").
withExpectedKubeAction("get", "delete", "create", "get", "create").
withExpectedManifestCondition(expectedCondition{string(workapiv1.ManifestApplied), metav1.ConditionTrue}, expectedCondition{string(workapiv1.ManifestApplied), metav1.ConditionFalse}).
withExpectedWorkCondition(expectedCondition{string(workapiv1.WorkApplied), metav1.ConditionFalse})
@@ -369,6 +381,130 @@ func TestFailedToApplyResource(t *testing.T) {
tc.validate(t, controller.dynamicClient, controller.workClient, controller.kubeClient)
}
func TestUpdateStrategy(t *testing.T) {
cases := []*testCase{
newTestCase("update single resource with nil updateStrategy").
withWorkManifest(spoketesting.NewUnstructuredWithContent("v1", "NewObject", "ns1", "n1", map[string]interface{}{"spec": map[string]interface{}{"key1": "val1"}})).
withSpokeDynamicObject(spoketesting.NewUnstructuredWithContent("v1", "NewObject", "ns1", "n1", map[string]interface{}{"spec": map[string]interface{}{"key1": "val2"}})).
withManifestConfig(newManifestConfigOption("", "newobjects", "ns1", "n1", nil)).
withExpectedWorkAction("update").
withAppliedWorkAction("create").
withExpectedDynamicAction("get", "update").
withExpectedManifestCondition(expectedCondition{string(workapiv1.ManifestApplied), metav1.ConditionTrue}).
withExpectedWorkCondition(expectedCondition{string(workapiv1.WorkApplied), metav1.ConditionTrue}),
newTestCase("update single resource with update updateStrategy").
withWorkManifest(spoketesting.NewUnstructuredWithContent("v1", "NewObject", "ns1", "n1", map[string]interface{}{"spec": map[string]interface{}{"key1": "val1"}})).
withSpokeDynamicObject(spoketesting.NewUnstructuredWithContent("v1", "NewObject", "ns1", "n1", map[string]interface{}{"spec": map[string]interface{}{"key1": "val2"}})).
withManifestConfig(newManifestConfigOption("", "newobjects", "ns1", "n1", &workapiv1.UpdateStrategy{Type: workapiv1.UpdateStrategyTypeUpdate})).
withExpectedWorkAction("update").
withAppliedWorkAction("create").
withExpectedDynamicAction("get", "update").
withExpectedManifestCondition(expectedCondition{string(workapiv1.ManifestApplied), metav1.ConditionTrue}).
withExpectedWorkCondition(expectedCondition{string(workapiv1.WorkApplied), metav1.ConditionTrue}),
newTestCase("create single resource with updateStrategy not found").
withWorkManifest(spoketesting.NewUnstructuredWithContent("v1", "NewObject", "ns1", "n1", map[string]interface{}{"spec": map[string]interface{}{"key1": "val1"}})).
withSpokeDynamicObject(spoketesting.NewUnstructuredWithContent("v1", "NewObject", "ns1", "n1", map[string]interface{}{"spec": map[string]interface{}{"key1": "val2"}})).
withManifestConfig(newManifestConfigOption("", "newobjects", "ns1", "n2", &workapiv1.UpdateStrategy{Type: workapiv1.UpdateStrategyTypeServerSideApply})).
withExpectedWorkAction("update").
withAppliedWorkAction("create").
withExpectedDynamicAction("get", "update").
withExpectedManifestCondition(expectedCondition{string(workapiv1.ManifestApplied), metav1.ConditionTrue}).
withExpectedWorkCondition(expectedCondition{string(workapiv1.WorkApplied), metav1.ConditionTrue}),
newTestCase("create single resource with server side apply updateStrategy").
withWorkManifest(spoketesting.NewUnstructuredWithContent("v1", "NewObject", "ns1", "n1", map[string]interface{}{"spec": map[string]interface{}{"key1": "val1"}})).
withManifestConfig(newManifestConfigOption("", "newobjects", "ns1", "n1", &workapiv1.UpdateStrategy{Type: workapiv1.UpdateStrategyTypeServerSideApply})).
withExpectedWorkAction("update").
withAppliedWorkAction("create").
withExpectedDynamicAction("get", "patch", "patch").
withExpectedManifestCondition(expectedCondition{string(workapiv1.ManifestApplied), metav1.ConditionTrue}).
withExpectedWorkCondition(expectedCondition{string(workapiv1.WorkApplied), metav1.ConditionTrue}),
newTestCase("update single resource with server side apply updateStrategy").
withWorkManifest(spoketesting.NewUnstructuredWithContent("v1", "NewObject", "ns1", "n1", map[string]interface{}{"spec": map[string]interface{}{"key1": "val1"}})).
withSpokeDynamicObject(spoketesting.NewUnstructuredWithContent("v1", "NewObject", "ns1", "n1", map[string]interface{}{"spec": map[string]interface{}{"key1": "val2"}})).
withManifestConfig(newManifestConfigOption("", "newobjects", "ns1", "n1", &workapiv1.UpdateStrategy{Type: workapiv1.UpdateStrategyTypeServerSideApply})).
withExpectedWorkAction("update").
withAppliedWorkAction("create").
withExpectedDynamicAction("get", "patch", "patch").
withExpectedManifestCondition(expectedCondition{string(workapiv1.ManifestApplied), metav1.ConditionTrue}).
withExpectedWorkCondition(expectedCondition{string(workapiv1.WorkApplied), metav1.ConditionTrue}),
newTestCase("update single resource with create only updateStrategy").
withWorkManifest(spoketesting.NewUnstructuredWithContent("v1", "NewObject", "ns1", "n1", map[string]interface{}{"spec": map[string]interface{}{"key1": "val1"}})).
withSpokeDynamicObject(spoketesting.NewUnstructuredWithContent("v1", "NewObject", "ns1", "n1", map[string]interface{}{"spec": map[string]interface{}{"key1": "val2"}})).
withManifestConfig(newManifestConfigOption("", "newobjects", "ns1", "n1", &workapiv1.UpdateStrategy{Type: workapiv1.UpdateStrategyTypeCreateOnly})).
withExpectedWorkAction("update").
withAppliedWorkAction("create").
withExpectedDynamicAction("get", "patch").
withExpectedManifestCondition(expectedCondition{string(workapiv1.ManifestApplied), metav1.ConditionTrue}).
withExpectedWorkCondition(expectedCondition{string(workapiv1.WorkApplied), metav1.ConditionTrue}),
}
for _, c := range cases {
t.Run(c.name, func(t *testing.T) {
work, workKey := spoketesting.NewManifestWork(0, c.workManifest...)
work.Spec.ManifestConfigs = c.workManifestConfig
work.Finalizers = []string{controllers.ManifestWorkFinalizer}
controller := newController(t, work, nil, spoketesting.NewFakeRestMapper()).
withKubeObject(c.spokeObject...).
withUnstructuredObject(c.spokeDynamicObject...)
// The default reactor doesn't support apply, so we need our own (trivial) reactor
controller.dynamicClient.PrependReactor("patch", "newobjects", func(action clienttesting.Action) (handled bool, ret runtime.Object, err error) {
return true, spoketesting.NewUnstructuredWithContent("v1", "NewObject", "ns1", "n1", map[string]interface{}{"spec": map[string]interface{}{"key1": "val1"}}), nil // clusterroleaggregator drops returned objects so no point in constructing them
})
syncContext := spoketesting.NewFakeSyncContext(t, workKey)
err := controller.controller.sync(context.TODO(), syncContext)
if err != nil {
t.Errorf("Should be success with no err: %v", err)
}
c.validate(t, controller.dynamicClient, controller.workClient, controller.kubeClient)
})
}
}
func TestServerSideApplyConflict(t *testing.T) {
testCase := newTestCase("update single resource with server side apply updateStrategy").
withWorkManifest(spoketesting.NewUnstructuredWithContent("v1", "NewObject", "ns1", "n1", map[string]interface{}{"spec": map[string]interface{}{"key1": "val1"}})).
withSpokeDynamicObject(spoketesting.NewUnstructuredWithContent("v1", "NewObject", "ns1", "n1", map[string]interface{}{"spec": map[string]interface{}{"key1": "val2"}})).
withManifestConfig(newManifestConfigOption("", "newobjects", "ns1", "n1", &workapiv1.UpdateStrategy{Type: workapiv1.UpdateStrategyTypeServerSideApply})).
withExpectedWorkAction("update").
withAppliedWorkAction("create").
withExpectedDynamicAction("get", "patch").
withExpectedManifestCondition(expectedCondition{string(workapiv1.ManifestApplied), metav1.ConditionFalse}).
withExpectedWorkCondition(expectedCondition{string(workapiv1.WorkApplied), metav1.ConditionFalse})
work, workKey := spoketesting.NewManifestWork(0, testCase.workManifest...)
work.Spec.ManifestConfigs = testCase.workManifestConfig
work.Finalizers = []string{controllers.ManifestWorkFinalizer}
controller := newController(t, work, nil, spoketesting.NewFakeRestMapper()).
withKubeObject(testCase.spokeObject...).
withUnstructuredObject(testCase.spokeDynamicObject...)
// The default reactor doesn't support apply, so we need our own (trivial) reactor
controller.dynamicClient.PrependReactor("patch", "newobjects", func(action clienttesting.Action) (handled bool, ret runtime.Object, err error) {
return true, nil, errors.NewConflict(schema.GroupResource{Resource: "newobjects"}, "n1", fmt.Errorf("conflict error"))
})
syncContext := spoketesting.NewFakeSyncContext(t, workKey)
err := controller.controller.sync(context.TODO(), syncContext)
if err != nil {
t.Errorf("Should be success with no err: %v", err)
}
testCase.validate(t, controller.dynamicClient, controller.workClient, controller.kubeClient)
}
func newManifestConfigOption(group, resource, namespace, name string, strategy *workapiv1.UpdateStrategy) workapiv1.ManifestConfigOption {
return workapiv1.ManifestConfigOption{
ResourceIdentifier: workapiv1.ResourceIdentifier{
Resource: resource,
Group: group,
Namespace: namespace,
Name: name,
},
UpdateStrategy: strategy,
}
}
// Test unstructured compare
func TestIsSameUnstructured(t *testing.T) {
cases := []struct {
@@ -742,29 +878,27 @@ func TestApplyUnstructred(t *testing.T) {
cases := []struct {
name string
owner metav1.OwnerReference
existingObject []runtime.Object
existing *unstructured.Unstructured
required *unstructured.Unstructured
gvr schema.GroupVersionResource
validateActions func(t *testing.T, actions []clienttesting.Action)
}{
{
name: "create a new object with owner",
existingObject: []runtime.Object{},
owner: metav1.OwnerReference{APIVersion: "v1", Name: "test", UID: "testowner"},
required: spoketesting.NewUnstructured("v1", "Secret", "ns1", "test"),
gvr: schema.GroupVersionResource{Version: "v1", Resource: "secrets"},
name: "create a new object with owner",
owner: metav1.OwnerReference{APIVersion: "v1", Name: "test", UID: "testowner"},
required: spoketesting.NewUnstructured("v1", "Secret", "ns1", "test"),
gvr: schema.GroupVersionResource{Version: "v1", Resource: "secrets"},
validateActions: func(t *testing.T, actions []clienttesting.Action) {
if len(actions) != 2 {
t.Errorf("Expect 2 actions, but have %d", len(actions))
if len(actions) != 1 {
t.Errorf("Expect 1 actions, but have %d", len(actions))
}
spoketesting.AssertAction(t, actions[0], "get")
spoketesting.AssertAction(t, actions[1], "create")
spoketesting.AssertAction(t, actions[0], "create")
obj := actions[1].(clienttesting.CreateActionImpl).Object.(*unstructured.Unstructured)
obj := actions[0].(clienttesting.CreateActionImpl).Object.(*unstructured.Unstructured)
owners := obj.GetOwnerReferences()
if len(owners) != 1 {
t.Errorf("Expect 2 owners, but have %d", len(owners))
t.Errorf("Expect 1 owners, but have %d", len(owners))
}
if owners[0].UID != "testowner" {
@@ -773,20 +907,18 @@ func TestApplyUnstructred(t *testing.T) {
},
},
{
name: "create a new object without owner",
existingObject: []runtime.Object{},
owner: metav1.OwnerReference{APIVersion: "v1", Name: "test", UID: "testowner-"},
required: spoketesting.NewUnstructured("v1", "Secret", "ns1", "test"),
gvr: schema.GroupVersionResource{Version: "v1", Resource: "secrets"},
name: "create a new object without owner",
owner: metav1.OwnerReference{APIVersion: "v1", Name: "test", UID: "testowner-"},
required: spoketesting.NewUnstructured("v1", "Secret", "ns1", "test"),
gvr: schema.GroupVersionResource{Version: "v1", Resource: "secrets"},
validateActions: func(t *testing.T, actions []clienttesting.Action) {
if len(actions) != 2 {
t.Errorf("Expect 2 actions, but have %d", len(actions))
if len(actions) != 1 {
t.Errorf("Expect 1 actions, but have %d", len(actions))
}
spoketesting.AssertAction(t, actions[0], "get")
spoketesting.AssertAction(t, actions[1], "create")
spoketesting.AssertAction(t, actions[0], "create")
obj := actions[1].(clienttesting.CreateActionImpl).Object.(*unstructured.Unstructured)
obj := actions[0].(clienttesting.CreateActionImpl).Object.(*unstructured.Unstructured)
owners := obj.GetOwnerReferences()
if len(owners) != 0 {
t.Errorf("Expect 1 owners, but have %d", len(owners))
@@ -795,20 +927,19 @@ func TestApplyUnstructred(t *testing.T) {
},
{
name: "update an object owner",
existingObject: []runtime.Object{spoketesting.NewUnstructured(
"v1", "Secret", "ns1", "test", metav1.OwnerReference{APIVersion: "v1", Name: "test1", UID: "testowner1"})},
existing: spoketesting.NewUnstructured(
"v1", "Secret", "ns1", "test", metav1.OwnerReference{APIVersion: "v1", Name: "test1", UID: "testowner1"}),
owner: metav1.OwnerReference{APIVersion: "v1", Name: "test", UID: "testowner"},
required: spoketesting.NewUnstructured("v1", "Secret", "ns1", "test"),
gvr: schema.GroupVersionResource{Version: "v1", Resource: "secrets"},
validateActions: func(t *testing.T, actions []clienttesting.Action) {
if len(actions) != 2 {
t.Errorf("Expect 2 actions, but have %d", len(actions))
if len(actions) != 1 {
t.Errorf("Expect 1 actions, but have %d", len(actions))
}
spoketesting.AssertAction(t, actions[0], "get")
spoketesting.AssertAction(t, actions[1], "update")
spoketesting.AssertAction(t, actions[0], "update")
obj := actions[1].(clienttesting.UpdateActionImpl).Object.(*unstructured.Unstructured)
obj := actions[0].(clienttesting.UpdateActionImpl).Object.(*unstructured.Unstructured)
owners := obj.GetOwnerReferences()
if len(owners) != 2 {
t.Errorf("Expect 2 owners, but have %d", len(owners))
@@ -824,35 +955,32 @@ func TestApplyUnstructred(t *testing.T) {
},
{
name: "update an object without owner",
existingObject: []runtime.Object{spoketesting.NewUnstructured(
"v1", "Secret", "ns1", "test", metav1.OwnerReference{APIVersion: "v1", Name: "test1", UID: "testowner1"})},
existing: spoketesting.NewUnstructured(
"v1", "Secret", "ns1", "test", metav1.OwnerReference{APIVersion: "v1", Name: "test1", UID: "testowner1"}),
owner: metav1.OwnerReference{Name: "test", UID: "testowner-"},
required: spoketesting.NewUnstructured("v1", "Secret", "ns1", "test"),
gvr: schema.GroupVersionResource{Version: "v1", Resource: "secrets"},
validateActions: func(t *testing.T, actions []clienttesting.Action) {
if len(actions) != 0 {
t.Errorf("Expect 0 actions, but have %d", len(actions))
}
},
},
{
name: "remove an object owner",
existing: spoketesting.NewUnstructured(
"v1", "Secret", "ns1", "test", metav1.OwnerReference{APIVersion: "v1", Name: "test", UID: "testowner"}),
owner: metav1.OwnerReference{APIVersion: "v1", Name: "test", UID: "testowner-"},
required: spoketesting.NewUnstructured("v1", "Secret", "ns1", "test"),
gvr: schema.GroupVersionResource{Version: "v1", Resource: "secrets"},
validateActions: func(t *testing.T, actions []clienttesting.Action) {
if len(actions) != 1 {
t.Errorf("Expect 1 actions, but have %d", len(actions))
}
spoketesting.AssertAction(t, actions[0], "get")
},
},
{
name: "remove an object owner",
existingObject: []runtime.Object{spoketesting.NewUnstructured(
"v1", "Secret", "ns1", "test", metav1.OwnerReference{APIVersion: "v1", Name: "test", UID: "testowner"})},
owner: metav1.OwnerReference{APIVersion: "v1", Name: "test", UID: "testowner-"},
required: spoketesting.NewUnstructured("v1", "Secret", "ns1", "test"),
gvr: schema.GroupVersionResource{Version: "v1", Resource: "secrets"},
validateActions: func(t *testing.T, actions []clienttesting.Action) {
if len(actions) != 2 {
t.Errorf("Expect 2 actions, but have %d", len(actions))
}
spoketesting.AssertAction(t, actions[0], "update")
spoketesting.AssertAction(t, actions[0], "get")
spoketesting.AssertAction(t, actions[1], "update")
obj := actions[1].(clienttesting.UpdateActionImpl).Object.(*unstructured.Unstructured)
obj := actions[0].(clienttesting.UpdateActionImpl).Object.(*unstructured.Unstructured)
owners := obj.GetOwnerReferences()
if len(owners) != 0 {
t.Errorf("Expect 0 owner, but have %d", len(owners))
@@ -861,14 +989,12 @@ func TestApplyUnstructred(t *testing.T) {
},
{
name: "merge labels",
existingObject: []runtime.Object{
func() runtime.Object {
obj := spoketesting.NewUnstructured(
"v1", "Secret", "ns1", "test", metav1.OwnerReference{APIVersion: "v1", Name: "test1", UID: "testowner1"})
obj.SetLabels(map[string]string{"foo": "bar"})
return obj
}(),
},
existing: func() *unstructured.Unstructured {
obj := spoketesting.NewUnstructured(
"v1", "Secret", "ns1", "test", metav1.OwnerReference{APIVersion: "v1", Name: "test1", UID: "testowner1"})
obj.SetLabels(map[string]string{"foo": "bar"})
return obj
}(),
owner: metav1.OwnerReference{APIVersion: "v1", Name: "test1", UID: "testowner1"},
required: func() *unstructured.Unstructured {
obj := spoketesting.NewUnstructured(
@@ -878,14 +1004,13 @@ func TestApplyUnstructred(t *testing.T) {
}(),
gvr: schema.GroupVersionResource{Version: "v1", Resource: "secrets"},
validateActions: func(t *testing.T, actions []clienttesting.Action) {
if len(actions) != 2 {
t.Errorf("Expect 2 actions, but have %d", len(actions))
if len(actions) != 1 {
t.Errorf("Expect 1 actions, but have %d", len(actions))
}
spoketesting.AssertAction(t, actions[0], "get")
spoketesting.AssertAction(t, actions[1], "update")
spoketesting.AssertAction(t, actions[0], "update")
obj := actions[1].(clienttesting.UpdateActionImpl).Object.(*unstructured.Unstructured)
obj := actions[0].(clienttesting.UpdateActionImpl).Object.(*unstructured.Unstructured)
labels := obj.GetLabels()
if len(labels) != 2 {
t.Errorf("Expect 2 labels, but have %d", len(labels))
@@ -894,14 +1019,12 @@ func TestApplyUnstructred(t *testing.T) {
},
{
name: "merge annotation",
existingObject: []runtime.Object{
func() runtime.Object {
obj := spoketesting.NewUnstructured(
"v1", "Secret", "ns1", "test", metav1.OwnerReference{APIVersion: "v1", Name: "test1", UID: "testowner1"})
obj.SetAnnotations(map[string]string{"foo": "bar"})
return obj
}(),
},
existing: func() *unstructured.Unstructured {
obj := spoketesting.NewUnstructured(
"v1", "Secret", "ns1", "test", metav1.OwnerReference{APIVersion: "v1", Name: "test1", UID: "testowner1"})
obj.SetAnnotations(map[string]string{"foo": "bar"})
return obj
}(),
owner: metav1.OwnerReference{APIVersion: "v1", Name: "test1", UID: "testowner1"},
required: func() *unstructured.Unstructured {
obj := spoketesting.NewUnstructured(
@@ -911,14 +1034,13 @@ func TestApplyUnstructred(t *testing.T) {
}(),
gvr: schema.GroupVersionResource{Version: "v1", Resource: "secrets"},
validateActions: func(t *testing.T, actions []clienttesting.Action) {
if len(actions) != 2 {
t.Errorf("Expect 2 actions, but have %d", len(actions))
if len(actions) != 1 {
t.Errorf("Expect 1 actions, but have %d", len(actions))
}
spoketesting.AssertAction(t, actions[0], "get")
spoketesting.AssertAction(t, actions[1], "update")
spoketesting.AssertAction(t, actions[0], "update")
obj := actions[1].(clienttesting.UpdateActionImpl).Object.(*unstructured.Unstructured)
obj := actions[0].(clienttesting.UpdateActionImpl).Object.(*unstructured.Unstructured)
annotations := obj.GetAnnotations()
if len(annotations) != 2 {
t.Errorf("Expect 2 annotations, but have %d", len(annotations))
@@ -927,14 +1049,12 @@ func TestApplyUnstructred(t *testing.T) {
},
{
name: "set existing finalizer",
existingObject: []runtime.Object{
func() runtime.Object {
obj := spoketesting.NewUnstructured(
"v1", "Secret", "ns1", "test", metav1.OwnerReference{APIVersion: "v1", Name: "test1", UID: "testowner1"})
obj.SetFinalizers([]string{"foo"})
return obj
}(),
},
existing: func() *unstructured.Unstructured {
obj := spoketesting.NewUnstructured(
"v1", "Secret", "ns1", "test", metav1.OwnerReference{APIVersion: "v1", Name: "test1", UID: "testowner1"})
obj.SetFinalizers([]string{"foo"})
return obj
}(),
owner: metav1.OwnerReference{APIVersion: "v1", Name: "test1", UID: "testowner1"},
required: func() *unstructured.Unstructured {
obj := spoketesting.NewUnstructured(
@@ -944,24 +1064,20 @@ func TestApplyUnstructred(t *testing.T) {
}(),
gvr: schema.GroupVersionResource{Version: "v1", Resource: "secrets"},
validateActions: func(t *testing.T, actions []clienttesting.Action) {
if len(actions) != 1 {
t.Errorf("Expect 1 actions, but have %d", len(actions))
if len(actions) != 0 {
t.Errorf("Expect 0 actions, but have %d", len(actions))
}
spoketesting.AssertAction(t, actions[0], "get")
},
},
{
name: "nothing to update",
existingObject: []runtime.Object{
func() runtime.Object {
obj := spoketesting.NewUnstructured(
"v1", "Secret", "ns1", "test", metav1.OwnerReference{APIVersion: "v1", Name: "test1", UID: "testowner1"})
obj.SetLabels(map[string]string{"foo": "bar"})
obj.SetAnnotations(map[string]string{"foo": "bar"})
return obj
}(),
},
existing: func() *unstructured.Unstructured {
obj := spoketesting.NewUnstructured(
"v1", "Secret", "ns1", "test", metav1.OwnerReference{APIVersion: "v1", Name: "test1", UID: "testowner1"})
obj.SetLabels(map[string]string{"foo": "bar"})
obj.SetAnnotations(map[string]string{"foo": "bar"})
return obj
}(),
owner: metav1.OwnerReference{APIVersion: "v1", Name: "test1", UID: "testowner1"},
required: func() *unstructured.Unstructured {
obj := spoketesting.NewUnstructured(
@@ -972,11 +1088,9 @@ func TestApplyUnstructred(t *testing.T) {
}(),
gvr: schema.GroupVersionResource{Version: "v1", Resource: "secrets"},
validateActions: func(t *testing.T, actions []clienttesting.Action) {
if len(actions) != 1 {
t.Errorf("Expect 1 actions, but have %v", actions)
if len(actions) != 0 {
t.Errorf("Expect 0 actions, but have %v", actions)
}
spoketesting.AssertAction(t, actions[0], "get")
},
},
}
@@ -985,13 +1099,16 @@ func TestApplyUnstructred(t *testing.T) {
t.Run(c.name, func(t *testing.T) {
work, workKey := spoketesting.NewManifestWork(0)
work.Finalizers = []string{controllers.ManifestWorkFinalizer}
controller := newController(t, work, nil, spoketesting.NewFakeRestMapper()).
withUnstructuredObject(c.existingObject...)
objects := []runtime.Object{}
if c.existing != nil {
objects = append(objects, c.existing)
}
controller := newController(t, work, nil, spoketesting.NewFakeRestMapper()).withUnstructuredObject(objects...)
syncContext := spoketesting.NewFakeSyncContext(t, workKey)
c.required.SetOwnerReferences([]metav1.OwnerReference{c.owner})
_, _, err := controller.controller.applyUnstructured(
context.TODO(), c.required, c.gvr, syncContext.Recorder())
context.TODO(), c.existing, c.required, c.gvr, syncContext.Recorder())
if err != nil {
t.Errorf("expect no error, but got %v", err)

View File

@@ -22,6 +22,7 @@ import (
workinformer "open-cluster-management.io/api/client/work/informers/externalversions/work/v1"
worklister "open-cluster-management.io/api/client/work/listers/work/v1"
workapiv1 "open-cluster-management.io/api/work/v1"
"open-cluster-management.io/work/pkg/helper"
"open-cluster-management.io/work/pkg/spoke/statusfeedback"
)
@@ -199,26 +200,23 @@ func (c *AvailableStatusController) getFeedbackValues(
errs := []error{}
values := []workapiv1.FeedbackValue{}
identifier := workapiv1.ResourceIdentifier{
Group: resourceMeta.Group,
Resource: resourceMeta.Resource,
Namespace: resourceMeta.Namespace,
Name: resourceMeta.Name,
option := helper.FindManifestConiguration(resourceMeta, manifestOptions)
if option == nil || len(option.FeedbackRules) == 0 {
return values, metav1.Condition{
Type: statusFeedbackConditionType,
Reason: "NoStatusFeedbackSynced",
Status: metav1.ConditionTrue,
}
}
for _, field := range manifestOptions {
if field.ResourceIdentifier != identifier {
continue
for _, rule := range option.FeedbackRules {
valuesByRule, err := c.statusReader.GetValuesByRule(obj, rule)
if err != nil {
errs = append(errs, err)
}
for _, rule := range field.FeedbackRules {
valuesByRule, err := c.statusReader.GetValuesByRule(obj, rule)
if err != nil {
errs = append(errs, err)
}
if len(valuesByRule) > 0 {
values = append(values, valuesByRule...)
}
if len(valuesByRule) > 0 {
values = append(values, valuesByRule...)
}
}
@@ -233,14 +231,6 @@ func (c *AvailableStatusController) getFeedbackValues(
}
}
if len(values) == 0 {
return values, metav1.Condition{
Type: statusFeedbackConditionType,
Reason: "NoStatusFeedbackSynced",
Status: metav1.ConditionTrue,
}
}
return values, metav1.Condition{
Type: statusFeedbackConditionType,
Reason: "StatusFeedbackSynced",

View File

@@ -0,0 +1,434 @@
package integration
import (
"context"
"fmt"
"time"
"github.com/onsi/ginkgo"
"github.com/onsi/gomega"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/apimachinery/pkg/types"
utilrand "k8s.io/apimachinery/pkg/util/rand"
"k8s.io/utils/pointer"
workapiv1 "open-cluster-management.io/api/work/v1"
"open-cluster-management.io/work/pkg/spoke"
"open-cluster-management.io/work/test/integration/util"
)
var _ = ginkgo.Describe("ManifestWork Update Strategy", func() {
var o *spoke.WorkloadAgentOptions
var cancel context.CancelFunc
var work *workapiv1.ManifestWork
var manifests []workapiv1.Manifest
var err error
ginkgo.BeforeEach(func() {
o = spoke.NewWorkloadAgentOptions()
o.HubKubeconfigFile = hubKubeconfigFileName
o.SpokeClusterName = utilrand.String(5)
o.StatusSyncInterval = 3 * time.Second
ns := &corev1.Namespace{}
ns.Name = o.SpokeClusterName
_, err := spokeKubeClient.CoreV1().Namespaces().Create(context.Background(), ns, metav1.CreateOptions{})
gomega.Expect(err).ToNot(gomega.HaveOccurred())
var ctx context.Context
ctx, cancel = context.WithCancel(context.Background())
go startWorkAgent(ctx, o)
// reset manifests
manifests = nil
})
ginkgo.JustBeforeEach(func() {
work = util.NewManifestWork(o.SpokeClusterName, "", manifests)
})
ginkgo.AfterEach(func() {
if cancel != nil {
cancel()
}
err := spokeKubeClient.CoreV1().Namespaces().Delete(context.Background(), o.SpokeClusterName, metav1.DeleteOptions{})
gomega.Expect(err).ToNot(gomega.HaveOccurred())
})
ginkgo.Context("Create only strategy", func() {
var object *unstructured.Unstructured
ginkgo.BeforeEach(func() {
object, _, err = util.NewDeployment(o.SpokeClusterName, "deploy1", "sa")
gomega.Expect(err).ToNot(gomega.HaveOccurred())
manifests = append(manifests, util.ToManifest(object))
})
ginkgo.It("deployed resource should not be updated when work is updated", func() {
work.Spec.ManifestConfigs = []workapiv1.ManifestConfigOption{
{
ResourceIdentifier: workapiv1.ResourceIdentifier{
Group: "apps",
Resource: "deployments",
Namespace: o.SpokeClusterName,
Name: "deploy1",
},
UpdateStrategy: &workapiv1.UpdateStrategy{
Type: workapiv1.UpdateStrategyTypeCreateOnly,
},
},
}
work, err = hubWorkClient.WorkV1().ManifestWorks(o.SpokeClusterName).Create(context.Background(), work, metav1.CreateOptions{})
gomega.Expect(err).ToNot(gomega.HaveOccurred())
util.AssertWorkCondition(work.Namespace, work.Name, hubWorkClient, string(workapiv1.WorkApplied), metav1.ConditionTrue,
[]metav1.ConditionStatus{metav1.ConditionTrue}, eventuallyTimeout, eventuallyInterval)
// update work
err = unstructured.SetNestedField(object.Object, int64(3), "spec", "replicas")
gomega.Expect(err).ToNot(gomega.HaveOccurred())
gomega.Eventually(func() error {
work, err = hubWorkClient.WorkV1().ManifestWorks(o.SpokeClusterName).Get(context.Background(), work.Name, metav1.GetOptions{})
if err != nil {
return err
}
work.Spec.Workload.Manifests[0] = util.ToManifest(object)
_, err = hubWorkClient.WorkV1().ManifestWorks(o.SpokeClusterName).Update(context.Background(), work, metav1.UpdateOptions{})
return err
}, eventuallyTimeout, eventuallyInterval).ShouldNot(gomega.HaveOccurred())
util.AssertWorkCondition(work.Namespace, work.Name, hubWorkClient, string(workapiv1.WorkApplied), metav1.ConditionTrue,
[]metav1.ConditionStatus{metav1.ConditionTrue}, eventuallyTimeout, eventuallyInterval)
gomega.Eventually(func() error {
deploy, err := spokeKubeClient.AppsV1().Deployments(o.SpokeClusterName).Get(context.Background(), "deploy1", metav1.GetOptions{})
if err != nil {
return err
}
if *deploy.Spec.Replicas != 1 {
return fmt.Errorf("Replicas should not be changed")
}
return nil
}, eventuallyTimeout, eventuallyInterval).ShouldNot(gomega.HaveOccurred())
})
})
ginkgo.Context("Server side apply strategy", func() {
var object *unstructured.Unstructured
ginkgo.BeforeEach(func() {
object, _, err = util.NewDeployment(o.SpokeClusterName, "deploy1", "sa")
gomega.Expect(err).ToNot(gomega.HaveOccurred())
manifests = append(manifests, util.ToManifest(object))
})
ginkgo.It("deployed resource should be applied when work is updated", func() {
work.Spec.ManifestConfigs = []workapiv1.ManifestConfigOption{
{
ResourceIdentifier: workapiv1.ResourceIdentifier{
Group: "apps",
Resource: "deployments",
Namespace: o.SpokeClusterName,
Name: "deploy1",
},
UpdateStrategy: &workapiv1.UpdateStrategy{
Type: workapiv1.UpdateStrategyTypeServerSideApply,
},
},
}
work, err = hubWorkClient.WorkV1().ManifestWorks(o.SpokeClusterName).Create(context.Background(), work, metav1.CreateOptions{})
gomega.Expect(err).ToNot(gomega.HaveOccurred())
util.AssertWorkCondition(work.Namespace, work.Name, hubWorkClient, string(workapiv1.WorkApplied), metav1.ConditionTrue,
[]metav1.ConditionStatus{metav1.ConditionTrue}, eventuallyTimeout, eventuallyInterval)
// update work
err = unstructured.SetNestedField(object.Object, int64(3), "spec", "replicas")
gomega.Eventually(func() error {
work, err = hubWorkClient.WorkV1().ManifestWorks(o.SpokeClusterName).Get(context.Background(), work.Name, metav1.GetOptions{})
if err != nil {
return err
}
work.Spec.Workload.Manifests[0] = util.ToManifest(object)
_, err = hubWorkClient.WorkV1().ManifestWorks(o.SpokeClusterName).Update(context.Background(), work, metav1.UpdateOptions{})
return err
}, eventuallyTimeout, eventuallyInterval).ShouldNot(gomega.HaveOccurred())
gomega.Eventually(func() error {
deploy, err := spokeKubeClient.AppsV1().Deployments(o.SpokeClusterName).Get(context.Background(), "deploy1", metav1.GetOptions{})
if err != nil {
return err
}
if *deploy.Spec.Replicas != 3 {
return fmt.Errorf("Replicas should be updated to 3 but got %d", *deploy.Spec.Replicas)
}
return nil
}, eventuallyTimeout, eventuallyInterval).ShouldNot(gomega.HaveOccurred())
})
ginkgo.It("should get conflict if a field is taken by another manager", func() {
work.Spec.ManifestConfigs = []workapiv1.ManifestConfigOption{
{
ResourceIdentifier: workapiv1.ResourceIdentifier{
Group: "apps",
Resource: "deployments",
Namespace: o.SpokeClusterName,
Name: "deploy1",
},
UpdateStrategy: &workapiv1.UpdateStrategy{
Type: workapiv1.UpdateStrategyTypeServerSideApply,
},
},
}
work, err = hubWorkClient.WorkV1().ManifestWorks(o.SpokeClusterName).Create(context.Background(), work, metav1.CreateOptions{})
gomega.Expect(err).ToNot(gomega.HaveOccurred())
util.AssertWorkCondition(work.Namespace, work.Name, hubWorkClient, string(workapiv1.WorkApplied), metav1.ConditionTrue,
[]metav1.ConditionStatus{metav1.ConditionTrue}, eventuallyTimeout, eventuallyInterval)
// update deployment with another field manager
err = unstructured.SetNestedField(object.Object, int64(2), "spec", "replicas")
gomega.Expect(err).ToNot(gomega.HaveOccurred())
patch, err := object.MarshalJSON()
gomega.Expect(err).ToNot(gomega.HaveOccurred())
_, err = spokeKubeClient.AppsV1().Deployments(o.SpokeClusterName).Patch(
context.Background(), "deploy1", types.ApplyPatchType, []byte(patch), metav1.PatchOptions{Force: pointer.Bool(true), FieldManager: "test-integration"})
gomega.Expect(err).ToNot(gomega.HaveOccurred())
// Update deployment by work
err = unstructured.SetNestedField(object.Object, int64(3), "spec", "replicas")
gomega.Expect(err).ToNot(gomega.HaveOccurred())
gomega.Eventually(func() error {
work, err = hubWorkClient.WorkV1().ManifestWorks(o.SpokeClusterName).Get(context.Background(), work.Name, metav1.GetOptions{})
if err != nil {
return err
}
work.Spec.Workload.Manifests[0] = util.ToManifest(object)
_, err = hubWorkClient.WorkV1().ManifestWorks(o.SpokeClusterName).Update(context.Background(), work, metav1.UpdateOptions{})
return err
}, eventuallyTimeout, eventuallyInterval).ShouldNot(gomega.HaveOccurred())
// Failed to apply due to conflict
util.AssertWorkCondition(work.Namespace, work.Name, hubWorkClient, string(workapiv1.WorkApplied), metav1.ConditionFalse,
[]metav1.ConditionStatus{metav1.ConditionFalse}, eventuallyTimeout, eventuallyInterval)
// remove the replica field and the apply should work
unstructured.RemoveNestedField(object.Object, "spec", "replicas")
gomega.Eventually(func() error {
work, err = hubWorkClient.WorkV1().ManifestWorks(o.SpokeClusterName).Get(context.Background(), work.Name, metav1.GetOptions{})
if err != nil {
return err
}
work.Spec.Workload.Manifests[0] = util.ToManifest(object)
_, err = hubWorkClient.WorkV1().ManifestWorks(o.SpokeClusterName).Update(context.Background(), work, metav1.UpdateOptions{})
return err
}, eventuallyTimeout, eventuallyInterval).ShouldNot(gomega.HaveOccurred())
util.AssertWorkCondition(work.Namespace, work.Name, hubWorkClient, string(workapiv1.WorkApplied), metav1.ConditionTrue,
[]metav1.ConditionStatus{metav1.ConditionTrue}, eventuallyTimeout, eventuallyInterval)
})
ginkgo.It("two manifest works with different field manager", func() {
work.Spec.ManifestConfigs = []workapiv1.ManifestConfigOption{
{
ResourceIdentifier: workapiv1.ResourceIdentifier{
Group: "apps",
Resource: "deployments",
Namespace: o.SpokeClusterName,
Name: "deploy1",
},
UpdateStrategy: &workapiv1.UpdateStrategy{
Type: workapiv1.UpdateStrategyTypeServerSideApply,
},
},
}
work, err = hubWorkClient.WorkV1().ManifestWorks(o.SpokeClusterName).Create(context.Background(), work, metav1.CreateOptions{})
gomega.Expect(err).ToNot(gomega.HaveOccurred())
util.AssertWorkCondition(work.Namespace, work.Name, hubWorkClient, string(workapiv1.WorkApplied), metav1.ConditionTrue,
[]metav1.ConditionStatus{metav1.ConditionTrue}, eventuallyTimeout, eventuallyInterval)
// Create another work with different fieldmanager
objCopy := object.DeepCopy()
// work1 does not want to own replica field
unstructured.RemoveNestedField(objCopy.Object, "spec", "replicas")
work1 := util.NewManifestWork(o.SpokeClusterName, "another", []workapiv1.Manifest{util.ToManifest(objCopy)})
work1.Spec.ManifestConfigs = []workapiv1.ManifestConfigOption{
{
ResourceIdentifier: workapiv1.ResourceIdentifier{
Group: "apps",
Resource: "deployments",
Namespace: o.SpokeClusterName,
Name: "deploy1",
},
UpdateStrategy: &workapiv1.UpdateStrategy{
Type: workapiv1.UpdateStrategyTypeServerSideApply,
ServerSideApply: &workapiv1.ServerSideApplyConfig{
Force: true,
FieldManager: "work-agent-another",
},
},
},
}
_, err = hubWorkClient.WorkV1().ManifestWorks(o.SpokeClusterName).Create(context.Background(), work1, metav1.CreateOptions{})
gomega.Expect(err).ToNot(gomega.HaveOccurred())
util.AssertWorkCondition(work1.Namespace, work1.Name, hubWorkClient, string(workapiv1.WorkApplied), metav1.ConditionTrue,
[]metav1.ConditionStatus{metav1.ConditionTrue}, eventuallyTimeout, eventuallyInterval)
// Update deployment replica by work should work since this work still owns the replicas field
err = unstructured.SetNestedField(object.Object, int64(3), "spec", "replicas")
gomega.Expect(err).ToNot(gomega.HaveOccurred())
gomega.Eventually(func() error {
work, err = hubWorkClient.WorkV1().ManifestWorks(o.SpokeClusterName).Get(context.Background(), work.Name, metav1.GetOptions{})
if err != nil {
return err
}
work.Spec.Workload.Manifests[0] = util.ToManifest(object)
_, err = hubWorkClient.WorkV1().ManifestWorks(o.SpokeClusterName).Update(context.Background(), work, metav1.UpdateOptions{})
return err
}, eventuallyTimeout, eventuallyInterval).ShouldNot(gomega.HaveOccurred())
// This should work since this work still own replicas
util.AssertWorkCondition(work.Namespace, work.Name, hubWorkClient, string(workapiv1.WorkApplied), metav1.ConditionTrue,
[]metav1.ConditionStatus{metav1.ConditionTrue}, eventuallyTimeout, eventuallyInterval)
gomega.Eventually(func() error {
deploy, err := spokeKubeClient.AppsV1().Deployments(o.SpokeClusterName).Get(context.Background(), "deploy1", metav1.GetOptions{})
if err != nil {
return err
}
if *deploy.Spec.Replicas != 3 {
return fmt.Errorf("expected replica is not correct, got %d", *deploy.Spec.Replicas)
}
return nil
}, eventuallyTimeout, eventuallyInterval).ShouldNot(gomega.HaveOccurred())
// Update sa field will not work
err = unstructured.SetNestedField(object.Object, "another-sa", "spec", "template", "spec", "serviceAccountName")
gomega.Expect(err).ToNot(gomega.HaveOccurred())
gomega.Eventually(func() error {
work, err = hubWorkClient.WorkV1().ManifestWorks(o.SpokeClusterName).Get(context.Background(), work.Name, metav1.GetOptions{})
if err != nil {
return err
}
work.Spec.Workload.Manifests[0] = util.ToManifest(object)
_, err = hubWorkClient.WorkV1().ManifestWorks(o.SpokeClusterName).Update(context.Background(), work, metav1.UpdateOptions{})
return err
}, eventuallyTimeout, eventuallyInterval).ShouldNot(gomega.HaveOccurred())
// This should work since this work still own replicas
util.AssertWorkCondition(work.Namespace, work.Name, hubWorkClient, string(workapiv1.WorkApplied), metav1.ConditionFalse,
[]metav1.ConditionStatus{metav1.ConditionFalse}, eventuallyTimeout, eventuallyInterval)
})
ginkgo.It("with delete options", func() {
work.Spec.ManifestConfigs = []workapiv1.ManifestConfigOption{
{
ResourceIdentifier: workapiv1.ResourceIdentifier{
Group: "apps",
Resource: "deployments",
Namespace: o.SpokeClusterName,
Name: "deploy1",
},
UpdateStrategy: &workapiv1.UpdateStrategy{
Type: workapiv1.UpdateStrategyTypeServerSideApply,
},
},
}
work, err = hubWorkClient.WorkV1().ManifestWorks(o.SpokeClusterName).Create(context.Background(), work, metav1.CreateOptions{})
gomega.Expect(err).ToNot(gomega.HaveOccurred())
util.AssertWorkCondition(work.Namespace, work.Name, hubWorkClient, string(workapiv1.WorkApplied), metav1.ConditionTrue,
[]metav1.ConditionStatus{metav1.ConditionTrue}, eventuallyTimeout, eventuallyInterval)
// Create another work with different fieldmanager
objCopy := object.DeepCopy()
// work1 does not want to own replica field
unstructured.RemoveNestedField(objCopy.Object, "spec", "replicas")
work1 := util.NewManifestWork(o.SpokeClusterName, "another", []workapiv1.Manifest{util.ToManifest(objCopy)})
work1.Spec.ManifestConfigs = []workapiv1.ManifestConfigOption{
{
ResourceIdentifier: workapiv1.ResourceIdentifier{
Group: "apps",
Resource: "deployments",
Namespace: o.SpokeClusterName,
Name: "deploy1",
},
UpdateStrategy: &workapiv1.UpdateStrategy{
Type: workapiv1.UpdateStrategyTypeServerSideApply,
ServerSideApply: &workapiv1.ServerSideApplyConfig{
Force: true,
FieldManager: "work-agent-another",
},
},
},
}
_, err = hubWorkClient.WorkV1().ManifestWorks(o.SpokeClusterName).Create(context.Background(), work1, metav1.CreateOptions{})
gomega.Expect(err).ToNot(gomega.HaveOccurred())
util.AssertWorkCondition(work1.Namespace, work1.Name, hubWorkClient, string(workapiv1.WorkApplied), metav1.ConditionTrue,
[]metav1.ConditionStatus{metav1.ConditionTrue}, eventuallyTimeout, eventuallyInterval)
gomega.Eventually(func() error {
deploy, err := spokeKubeClient.AppsV1().Deployments(o.SpokeClusterName).Get(context.Background(), "deploy1", metav1.GetOptions{})
if err != nil {
return err
}
if len(deploy.OwnerReferences) != 2 {
return fmt.Errorf("expected ownerrefs is not correct, got %v", deploy.OwnerReferences)
}
return nil
}, eventuallyTimeout, eventuallyInterval).ShouldNot(gomega.HaveOccurred())
// update deleteOption of the first work
gomega.Eventually(func() error {
work, err = hubWorkClient.WorkV1().ManifestWorks(o.SpokeClusterName).Get(context.Background(), work.Name, metav1.GetOptions{})
if err != nil {
return err
}
work.Spec.DeleteOption = &workapiv1.DeleteOption{PropagationPolicy: workapiv1.DeletePropagationPolicyTypeOrphan}
_, err = hubWorkClient.WorkV1().ManifestWorks(o.SpokeClusterName).Update(context.Background(), work, metav1.UpdateOptions{})
return err
}, eventuallyTimeout, eventuallyInterval).ShouldNot(gomega.HaveOccurred())
gomega.Eventually(func() error {
deploy, err := spokeKubeClient.AppsV1().Deployments(o.SpokeClusterName).Get(context.Background(), "deploy1", metav1.GetOptions{})
if err != nil {
return err
}
if len(deploy.OwnerReferences) != 1 {
return fmt.Errorf("expected ownerrefs is not correct, got %v", deploy.OwnerReferences)
}
return nil
}, eventuallyTimeout, eventuallyInterval).ShouldNot(gomega.HaveOccurred())
})
})
})

4
vendor/modules.txt vendored
View File

@@ -1186,8 +1186,8 @@ k8s.io/utils/net
k8s.io/utils/path
k8s.io/utils/pointer
k8s.io/utils/trace
# open-cluster-management.io/api v0.7.0
## explicit; go 1.17
# open-cluster-management.io/api v0.7.1-0.20220629035306-4907911fd551
## explicit; go 1.18
open-cluster-management.io/api/client/work/clientset/versioned
open-cluster-management.io/api/client/work/clientset/versioned/fake
open-cluster-management.io/api/client/work/clientset/versioned/scheme

View File

@@ -68,6 +68,40 @@ spec:
resource:
description: Resource is the resource name of the Kubernetes resource.
type: string
executor:
description: Executor is the configuration that makes the work agent to perform some pre-request processing/checking. e.g. the executor identity tells the work agent to check the executor has sufficient permission to write the workloads to the local managed cluster. Note that nil executor is still supported for backward-compatibility which indicates that the work agent will not perform any additional actions before applying resources.
type: object
properties:
subject:
description: Subject is the subject identity which the work agent uses to talk to the local cluster when applying the resources.
type: object
required:
- type
properties:
serviceAccount:
description: ServiceAccount is for identifying which service account to use by the work agent. Only required if the type is "ServiceAccount".
type: object
required:
- name
- namespace
properties:
name:
description: Name is the name of the service account.
type: string
maxLength: 253
minLength: 1
pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*)$
namespace:
description: Namespace is the namespace of the service account.
type: string
maxLength: 253
minLength: 1
pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*)$
type:
description: 'Type is the type of the subject identity. Supported types are: "ServiceAccount".'
type: string
enum:
- ServiceAccount
manifestConfigs:
description: ManifestConfigs represents the configurations of manifests defined in workload field.
type: array
@@ -75,11 +109,10 @@ spec:
description: ManifestConfigOption represents the configurations of a manifest defined in workload field.
type: object
required:
- feedbackRules
- resourceIdentifier
properties:
feedbackRules:
description: FeedbackRules defines what resource status field should be returned.
description: FeedbackRules defines what resource status field should be returned. If it is not set or empty, no feedback rules will be honored.
type: array
items:
type: object
@@ -129,6 +162,32 @@ spec:
resource:
description: Resource is the resource name of the Kubernetes resource.
type: string
updateStrategy:
description: UpdateStrategy defines the strategy to update this manifest. UpdateStrategy is Update if it is not set, optional
type: object
required:
- type
properties:
serverSideApply:
description: serverSideApply defines the configuration for server side apply. It is honored only when type of updateStrategy is ServerSideApply
type: object
properties:
fieldManager:
description: FieldManager is the manager to apply the resource. It is work-agent by default, but can be other name with work-agent as the prefix.
type: string
default: work-agent
pattern: ^work-agent
force:
description: Force represents to force apply the manifest.
type: boolean
type:
description: type defines the strategy to update this manifest, default value is Update. Update type means to update resource by an update call. CreateOnly type means do not update resource based on current manifest. ServerSideApply type means to update resource using server side apply with work-controller as the field manager. If there is conflict, the related Applied condition of manifest will be in the status of False with the reason of ApplyConflict.
type: string
default: Update
enum:
- Update
- CreateOnly
- ServerSideApply
workload:
description: Workload represents the manifest workload to be deployed on a managed cluster.
type: object

View File

@@ -39,6 +39,14 @@ type ManifestWorkSpec struct {
// ManifestConfigs represents the configurations of manifests defined in workload field.
// +optional
ManifestConfigs []ManifestConfigOption `json:"manifestConfigs,omitempty"`
// Executor is the configuration that makes the work agent to perform some pre-request processing/checking.
// e.g. the executor identity tells the work agent to check the executor has sufficient permission to write
// the workloads to the local managed cluster.
// Note that nil executor is still supported for backward-compatibility which indicates that the work agent
// will not perform any additional actions before applying resources.
// +optional
Executor *ManifestWorkExecutor `json:"executor,omitempty"`
}
// Manifest represents a resource to be deployed on managed cluster.
@@ -78,12 +86,121 @@ type ManifestConfigOption struct {
// +required
ResourceIdentifier ResourceIdentifier `json:"resourceIdentifier"`
// FeedbackRules defines what resource status field should be returned.
// FeedbackRules defines what resource status field should be returned. If it is not set or empty,
// no feedback rules will be honored.
// +optional
FeedbackRules []FeedbackRule `json:"feedbackRules,omitempty"`
// UpdateStrategy defines the strategy to update this manifest. UpdateStrategy is Update
// if it is not set,
// optional
UpdateStrategy *UpdateStrategy `json:"updateStrategy"`
}
// ManifestWorkExecutor is the executor that applies the resources to the managed cluster. i.e. the
// work agent.
type ManifestWorkExecutor struct {
// Subject is the subject identity which the work agent uses to talk to the
// local cluster when applying the resources.
Subject ManifestWorkExecutorSubject `json:"subject"`
}
// ManifestWorkExecutorSubject is the subject identity used by the work agent to apply the resources.
// The work agent should check whether the applying resources are out-of-scope of the permission held
// by the executor identity.
type ManifestWorkExecutorSubject struct {
// Type is the type of the subject identity.
// Supported types are: "ServiceAccount".
// +kubebuilder:validation:Enum=ServiceAccount
// +kubebuilder:validation:Required
// +required
FeedbackRules []FeedbackRule `json:"feedbackRules"`
Type ManifestWorkExecutorSubjectType `json:"type"`
// ServiceAccount is for identifying which service account to use by the work agent.
// Only required if the type is "ServiceAccount".
// +optional
ServiceAccount *ManifestWorkSubjectServiceAccount `json:"serviceAccount,omitempty"`
}
// ManifestWorkExecutorSubjectType is the type of the subject.
type ManifestWorkExecutorSubjectType string
const (
// ExecutorSubjectTypeServiceAccount indicates that the workload resources belong to a ServiceAccount
// in the managed cluster.
ExecutorSubjectTypeServiceAccount ManifestWorkExecutorSubjectType = "ServiceAccount"
)
// ManifestWorkSubjectServiceAccount references service account in the managed clusters.
type ManifestWorkSubjectServiceAccount struct {
// Namespace is the namespace of the service account.
// +kubebuilder:validation:Required
// +kubebuilder:validation:MinLength=1
// +kubebuilder:validation:MaxLength=253
// +kubebuilder:validation:Pattern=`^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*)$`
// +required
Namespace string `json:"namespace"`
// Name is the name of the service account.
// +kubebuilder:validation:Required
// +kubebuilder:validation:MinLength=1
// +kubebuilder:validation:MaxLength=253
// +kubebuilder:validation:Pattern=`^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*)$`
// +required
Name string `json:"name"`
}
// UpdateStrategy defines the strategy to update this manifest
type UpdateStrategy struct {
// type defines the strategy to update this manifest, default value is Update.
// Update type means to update resource by an update call.
// CreateOnly type means do not update resource based on current manifest.
// ServerSideApply type means to update resource using server side apply with work-controller as the field manager.
// If there is conflict, the related Applied condition of manifest will be in the status of False with the
// reason of ApplyConflict.
// +kubebuilder:default=Update
// +kubebuilder:validation:Enum=Update;CreateOnly;ServerSideApply
// +kubebuilder:validation:Required
// +required
Type UpdateStrategyType `json:"type,omitempty"`
// serverSideApply defines the configuration for server side apply. It is honored only when
// type of updateStrategy is ServerSideApply
// +optional
ServerSideApply *ServerSideApplyConfig `json:"serverSideApply,omitempty"`
}
type UpdateStrategyType string
const (
// Update type means to update resource by an update call.
UpdateStrategyTypeUpdate UpdateStrategyType = "Update"
// CreateOnly type means do not update resource based on current manifest. This should be used only when
// ServerSideApply type is not support on the spoke, and the user on hub would like some other controller
// on the spoke to own the control of the resource.
UpdateStrategyTypeCreateOnly UpdateStrategyType = "CreateOnly"
// ServerSideApply type means to update resource using server side apply with work-controller as the field manager.
// If there is conflict, the related Applied condition of manifest will be in the status of False with the
// reason of ApplyConflict. This type allows another controller on the spoke to control certain field of the resource.
UpdateStrategyTypeServerSideApply UpdateStrategyType = "ServerSideApply"
)
type ServerSideApplyConfig struct {
// Force represents to force apply the manifest.
// +optional
Force bool `json:"force"`
// FieldManager is the manager to apply the resource. It is work-agent by default, but can be other name with work-agent
// as the prefix.
// +kubebuilder:default=work-agent
// +kubebuilder:validation:Pattern=`^work-agent`
// +optional
FieldManager string `json:"fieldManager,omitempty"`
}
// DefaultFieldManager is the default field manager of the manifestwork when the field manager is not set.
const DefaultFieldManager = "work-agent"
type FeedbackRule struct {
// Type defines the option of how status can be returned.
// It can be jsonPaths or wellKnownStatus.

View File

@@ -284,6 +284,11 @@ func (in *ManifestConfigOption) DeepCopyInto(out *ManifestConfigOption) {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
if in.UpdateStrategy != nil {
in, out := &in.UpdateStrategy, &out.UpdateStrategy
*out = new(UpdateStrategy)
(*in).DeepCopyInto(*out)
}
return
}
@@ -364,6 +369,44 @@ func (in *ManifestWork) DeepCopyObject() runtime.Object {
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ManifestWorkExecutor) DeepCopyInto(out *ManifestWorkExecutor) {
*out = *in
in.Subject.DeepCopyInto(&out.Subject)
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ManifestWorkExecutor.
func (in *ManifestWorkExecutor) DeepCopy() *ManifestWorkExecutor {
if in == nil {
return nil
}
out := new(ManifestWorkExecutor)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ManifestWorkExecutorSubject) DeepCopyInto(out *ManifestWorkExecutorSubject) {
*out = *in
if in.ServiceAccount != nil {
in, out := &in.ServiceAccount, &out.ServiceAccount
*out = new(ManifestWorkSubjectServiceAccount)
**out = **in
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ManifestWorkExecutorSubject.
func (in *ManifestWorkExecutorSubject) DeepCopy() *ManifestWorkExecutorSubject {
if in == nil {
return nil
}
out := new(ManifestWorkExecutorSubject)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ManifestWorkList) DeepCopyInto(out *ManifestWorkList) {
*out = *in
@@ -413,6 +456,11 @@ func (in *ManifestWorkSpec) DeepCopyInto(out *ManifestWorkSpec) {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
if in.Executor != nil {
in, out := &in.Executor, &out.Executor
*out = new(ManifestWorkExecutor)
(*in).DeepCopyInto(*out)
}
return
}
@@ -450,6 +498,22 @@ func (in *ManifestWorkStatus) DeepCopy() *ManifestWorkStatus {
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ManifestWorkSubjectServiceAccount) DeepCopyInto(out *ManifestWorkSubjectServiceAccount) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ManifestWorkSubjectServiceAccount.
func (in *ManifestWorkSubjectServiceAccount) DeepCopy() *ManifestWorkSubjectServiceAccount {
if in == nil {
return nil
}
out := new(ManifestWorkSubjectServiceAccount)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ManifestsTemplate) DeepCopyInto(out *ManifestsTemplate) {
*out = *in
@@ -526,6 +590,22 @@ func (in *SelectivelyOrphan) DeepCopy() *SelectivelyOrphan {
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ServerSideApplyConfig) DeepCopyInto(out *ServerSideApplyConfig) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServerSideApplyConfig.
func (in *ServerSideApplyConfig) DeepCopy() *ServerSideApplyConfig {
if in == nil {
return nil
}
out := new(ServerSideApplyConfig)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *StatusFeedbackResult) DeepCopyInto(out *StatusFeedbackResult) {
*out = *in
@@ -548,3 +628,24 @@ func (in *StatusFeedbackResult) DeepCopy() *StatusFeedbackResult {
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *UpdateStrategy) DeepCopyInto(out *UpdateStrategy) {
*out = *in
if in.ServerSideApply != nil {
in, out := &in.ServerSideApply, &out.ServerSideApply
*out = new(ServerSideApplyConfig)
**out = **in
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UpdateStrategy.
func (in *UpdateStrategy) DeepCopy() *UpdateStrategy {
if in == nil {
return nil
}
out := new(UpdateStrategy)
in.DeepCopyInto(out)
return out
}

View File

@@ -131,7 +131,8 @@ func (ManifestCondition) SwaggerDoc() map[string]string {
var map_ManifestConfigOption = map[string]string{
"": "ManifestConfigOption represents the configurations of a manifest defined in workload field.",
"resourceIdentifier": "ResourceIdentifier represents the group, resource, name and namespace of a resoure. iff this refers to a resource not created by this manifest work, the related rules will not be executed.",
"feedbackRules": "FeedbackRules defines what resource status field should be returned.",
"feedbackRules": "FeedbackRules defines what resource status field should be returned. If it is not set or empty, no feedback rules will be honored.",
"updateStrategy": "UpdateStrategy defines the strategy to update this manifest. UpdateStrategy is Update if it is not set, optional",
}
func (ManifestConfigOption) SwaggerDoc() map[string]string {
@@ -172,6 +173,25 @@ func (ManifestWork) SwaggerDoc() map[string]string {
return map_ManifestWork
}
var map_ManifestWorkExecutor = map[string]string{
"": "ManifestWorkExecutor is the executor that applies the resources to the managed cluster. i.e. the work agent.",
"subject": "Subject is the subject identity which the work agent uses to talk to the local cluster when applying the resources.",
}
func (ManifestWorkExecutor) SwaggerDoc() map[string]string {
return map_ManifestWorkExecutor
}
var map_ManifestWorkExecutorSubject = map[string]string{
"": "ManifestWorkExecutorSubject is the subject identity used by the work agent to apply the resources. The work agent should check whether the applying resources are out-of-scope of the permission held by the executor identity.",
"type": "Type is the type of the subject identity. Supported types are: \"ServiceAccount\".",
"serviceAccount": "ServiceAccount is for identifying which service account to use by the work agent. Only required if the type is \"ServiceAccount\".",
}
func (ManifestWorkExecutorSubject) SwaggerDoc() map[string]string {
return map_ManifestWorkExecutorSubject
}
var map_ManifestWorkList = map[string]string{
"": "ManifestWorkList is a collection of manifestworks.",
"metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds",
@@ -187,6 +207,7 @@ var map_ManifestWorkSpec = map[string]string{
"workload": "Workload represents the manifest workload to be deployed on a managed cluster.",
"deleteOption": "DeleteOption represents deletion strategy when the manifestwork is deleted. Foreground deletion strategy is applied to all the resource in this manifestwork if it is not set.",
"manifestConfigs": "ManifestConfigs represents the configurations of manifests defined in workload field.",
"executor": "Executor is the configuration that makes the work agent to perform some pre-request processing/checking. e.g. the executor identity tells the work agent to check the executor has sufficient permission to write the workloads to the local managed cluster. Note that nil executor is still supported for backward-compatibility which indicates that the work agent will not perform any additional actions before applying resources.",
}
func (ManifestWorkSpec) SwaggerDoc() map[string]string {
@@ -203,6 +224,16 @@ func (ManifestWorkStatus) SwaggerDoc() map[string]string {
return map_ManifestWorkStatus
}
var map_ManifestWorkSubjectServiceAccount = map[string]string{
"": "ManifestWorkSubjectServiceAccount references service account in the managed clusters.",
"namespace": "Namespace is the namespace of the service account.",
"name": "Name is the name of the service account.",
}
func (ManifestWorkSubjectServiceAccount) SwaggerDoc() map[string]string {
return map_ManifestWorkSubjectServiceAccount
}
var map_ManifestsTemplate = map[string]string{
"": "ManifestsTemplate represents the manifest workload to be deployed on a managed cluster.",
"manifests": "Manifests represents a list of kuberenetes resources to be deployed on a managed cluster.",
@@ -233,6 +264,15 @@ func (SelectivelyOrphan) SwaggerDoc() map[string]string {
return map_SelectivelyOrphan
}
var map_ServerSideApplyConfig = map[string]string{
"force": "Force represents to force apply the manifest.",
"fieldManager": "FieldManager is the manager to apply the resource. It is work-agent by default, but can be other name with work-agent as the prefix.",
}
func (ServerSideApplyConfig) SwaggerDoc() map[string]string {
return map_ServerSideApplyConfig
}
var map_StatusFeedbackResult = map[string]string{
"": "StatusFeedbackResult represents the values of the feild synced back defined in statusFeedbacks",
"values": "Values represents the synced value of the interested field.",
@@ -242,4 +282,14 @@ func (StatusFeedbackResult) SwaggerDoc() map[string]string {
return map_StatusFeedbackResult
}
var map_UpdateStrategy = map[string]string{
"": "UpdateStrategy defines the strategy to update this manifest",
"type": "type defines the strategy to update this manifest, default value is Update. Update type means to update resource by an update call. CreateOnly type means do not update resource based on current manifest. ServerSideApply type means to update resource using server side apply with work-controller as the field manager. If there is conflict, the related Applied condition of manifest will be in the status of False with the reason of ApplyConflict.",
"serverSideApply": "serverSideApply defines the configuration for server side apply. It is honored only when type of updateStrategy is ServerSideApply",
}
func (UpdateStrategy) SwaggerDoc() map[string]string {
return map_UpdateStrategy
}
// AUTO-GENERATED FUNCTIONS END HERE