mirror of
https://github.com/kubevela/kubevela.git
synced 2026-02-14 18:10:21 +00:00
Support create resources (workloads/traits) in different namespace with application (#1197)
[WIP]create tracker if needed and update components [WIP] add finalizer code and add logic for ac controller [WIP] refactor handle func to app handler [WIP] support helm cross namespace [WIP] rewrite application controller logic rebase master and solve conflict WIP add base cross namespace workload and fix some bugs add more e2e-test and fix some bugs rebase master and resolve confilict move resourceTracker to v1beta1 add v1beta1 to test scheme to fix test modify e2e test, change v1alpha2 application to v1beta1 delete resourceTracker crossplane catagory add controller logic test add unittest for finalizer related func add more unit test neet order import walk around check-diff error add docs add some comments in AC controller as review said modify some issues
This commit is contained in:
@@ -203,6 +203,9 @@ type AppStatus struct {
|
||||
// Services record the status of the application services
|
||||
Services []ApplicationComponentStatus `json:"services,omitempty"`
|
||||
|
||||
// ResourceTracker record the status of the ResourceTracker
|
||||
ResourceTracker *v1alpha12.TypedReference `json:"resourceTracker,omitempty"`
|
||||
|
||||
// LatestRevision of the application configuration it generates
|
||||
// +optional
|
||||
LatestRevision *Revision `json:"latestRevision,omitempty"`
|
||||
|
||||
@@ -40,6 +40,11 @@ func (in *AppStatus) DeepCopyInto(out *AppStatus) {
|
||||
(*in)[i].DeepCopyInto(&(*out)[i])
|
||||
}
|
||||
}
|
||||
if in.ResourceTracker != nil {
|
||||
in, out := &in.ResourceTracker, &out.ResourceTracker
|
||||
*out = new(v1alpha1.TypedReference)
|
||||
**out = **in
|
||||
}
|
||||
if in.LatestRevision != nil {
|
||||
in, out := &in.LatestRevision, &out.LatestRevision
|
||||
*out = new(Revision)
|
||||
|
||||
@@ -43,6 +43,9 @@ type AppStatus struct {
|
||||
// Services record the status of the application services
|
||||
Services []common.ApplicationComponentStatus `json:"services,omitempty"`
|
||||
|
||||
// ResourceTracker record the status of the ResourceTracker
|
||||
ResourceTracker *runtimev1alpha1.TypedReference `json:"resourceTracker,omitempty"`
|
||||
|
||||
// LatestRevision of the application configuration it generates
|
||||
// +optional
|
||||
LatestRevision *common.Revision `json:"latestRevision,omitempty"`
|
||||
|
||||
@@ -144,6 +144,11 @@ func (in *AppStatus) DeepCopyInto(out *AppStatus) {
|
||||
(*in)[i].DeepCopyInto(&(*out)[i])
|
||||
}
|
||||
}
|
||||
if in.ResourceTracker != nil {
|
||||
in, out := &in.ResourceTracker, &out.ResourceTracker
|
||||
*out = new(v1alpha1.TypedReference)
|
||||
**out = **in
|
||||
}
|
||||
if in.LatestRevision != nil {
|
||||
in, out := &in.LatestRevision, &out.LatestRevision
|
||||
*out = new(common.Revision)
|
||||
|
||||
@@ -230,3 +230,21 @@ type ScopeDefinitionList struct {
|
||||
metav1.ListMeta `json:"metadata,omitempty"`
|
||||
Items []ScopeDefinition `json:"items"`
|
||||
}
|
||||
|
||||
// +kubebuilder:object:root=true
|
||||
|
||||
// An ResourceTracker represents a tracker for track cross namespace resources
|
||||
// +kubebuilder:resource:scope=Cluster,categories={oam}
|
||||
type ResourceTracker struct {
|
||||
metav1.TypeMeta `json:",inline"`
|
||||
metav1.ObjectMeta `json:"metadata,omitempty"`
|
||||
}
|
||||
|
||||
// +kubebuilder:object:root=true
|
||||
|
||||
// ResourceTrackerList contains a list of ResourceTracker
|
||||
type ResourceTrackerList struct {
|
||||
metav1.TypeMeta `json:",inline"`
|
||||
metav1.ListMeta `json:"metadata,omitempty"`
|
||||
Items []ResourceTracker `json:"items"`
|
||||
}
|
||||
|
||||
@@ -93,6 +93,14 @@ var (
|
||||
ScopeDefinitionGroupVersionKind = SchemeGroupVersion.WithKind(ScopeDefinitionKind)
|
||||
)
|
||||
|
||||
// ResourceTracker type metadata.
|
||||
var (
|
||||
ResourceTrackerKind = reflect.TypeOf(ResourceTracker{}).Name()
|
||||
ResourceTrackerGroupKind = schema.GroupKind{Group: Group, Kind: ResourceTrackerKind}.String()
|
||||
ResourceTrackerKindAPIVersion = ResourceTrackerKind + "." + SchemeGroupVersion.String()
|
||||
ResourceTrackerKindVersionKind = SchemeGroupVersion.WithKind(ResourceTrackerKind)
|
||||
)
|
||||
|
||||
// AppDeployment type metadata.
|
||||
var (
|
||||
AppDeploymentKind = reflect.TypeOf(AppDeployment{}).Name()
|
||||
@@ -110,4 +118,5 @@ func init() {
|
||||
SchemeBuilder.Register(&AppRollout{}, &AppRolloutList{})
|
||||
SchemeBuilder.Register(&ApplicationRevision{}, &ApplicationRevisionList{})
|
||||
SchemeBuilder.Register(&AppDeployment{}, &AppDeploymentList{})
|
||||
SchemeBuilder.Register(&ResourceTracker{}, &ResourceTrackerList{})
|
||||
}
|
||||
|
||||
@@ -753,6 +753,63 @@ func (in *PlacementStatus) DeepCopy() *PlacementStatus {
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *ResourceTracker) DeepCopyInto(out *ResourceTracker) {
|
||||
*out = *in
|
||||
out.TypeMeta = in.TypeMeta
|
||||
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceTracker.
|
||||
func (in *ResourceTracker) DeepCopy() *ResourceTracker {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(ResourceTracker)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
|
||||
func (in *ResourceTracker) DeepCopyObject() runtime.Object {
|
||||
if c := in.DeepCopy(); c != nil {
|
||||
return c
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *ResourceTrackerList) DeepCopyInto(out *ResourceTrackerList) {
|
||||
*out = *in
|
||||
out.TypeMeta = in.TypeMeta
|
||||
in.ListMeta.DeepCopyInto(&out.ListMeta)
|
||||
if in.Items != nil {
|
||||
in, out := &in.Items, &out.Items
|
||||
*out = make([]ResourceTracker, len(*in))
|
||||
for i := range *in {
|
||||
(*in)[i].DeepCopyInto(&(*out)[i])
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceTrackerList.
|
||||
func (in *ResourceTrackerList) DeepCopy() *ResourceTrackerList {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(ResourceTrackerList)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
|
||||
func (in *ResourceTrackerList) DeepCopyObject() runtime.Object {
|
||||
if c := in.DeepCopy(); c != nil {
|
||||
return c
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *ScopeDefinition) DeepCopyInto(out *ScopeDefinition) {
|
||||
*out = *in
|
||||
|
||||
@@ -381,6 +381,26 @@ spec:
|
||||
- name
|
||||
- revision
|
||||
type: object
|
||||
resourceTracker:
|
||||
description: ResourceTracker record the status of the ResourceTracker
|
||||
properties:
|
||||
apiVersion:
|
||||
description: APIVersion of the referenced object.
|
||||
type: string
|
||||
kind:
|
||||
description: Kind of the referenced object.
|
||||
type: string
|
||||
name:
|
||||
description: Name of the referenced object.
|
||||
type: string
|
||||
uid:
|
||||
description: UID of the referenced object.
|
||||
type: string
|
||||
required:
|
||||
- apiVersion
|
||||
- kind
|
||||
- name
|
||||
type: object
|
||||
rollingState:
|
||||
description: RollingState is the Rollout State
|
||||
type: string
|
||||
@@ -1404,6 +1424,26 @@ spec:
|
||||
- name
|
||||
- revision
|
||||
type: object
|
||||
resourceTracker:
|
||||
description: ResourceTracker record the status of the ResourceTracker
|
||||
properties:
|
||||
apiVersion:
|
||||
description: APIVersion of the referenced object.
|
||||
type: string
|
||||
kind:
|
||||
description: Kind of the referenced object.
|
||||
type: string
|
||||
name:
|
||||
description: Name of the referenced object.
|
||||
type: string
|
||||
uid:
|
||||
description: UID of the referenced object.
|
||||
type: string
|
||||
required:
|
||||
- apiVersion
|
||||
- kind
|
||||
- name
|
||||
type: object
|
||||
rollingState:
|
||||
description: RollingState is the Rollout State
|
||||
type: string
|
||||
|
||||
@@ -380,6 +380,26 @@ spec:
|
||||
- name
|
||||
- revision
|
||||
type: object
|
||||
resourceTracker:
|
||||
description: ResourceTracker record the status of the ResourceTracker
|
||||
properties:
|
||||
apiVersion:
|
||||
description: APIVersion of the referenced object.
|
||||
type: string
|
||||
kind:
|
||||
description: Kind of the referenced object.
|
||||
type: string
|
||||
name:
|
||||
description: Name of the referenced object.
|
||||
type: string
|
||||
uid:
|
||||
description: UID of the referenced object.
|
||||
type: string
|
||||
required:
|
||||
- apiVersion
|
||||
- kind
|
||||
- name
|
||||
type: object
|
||||
rollingState:
|
||||
description: RollingState is the Rollout State
|
||||
type: string
|
||||
@@ -795,6 +815,26 @@ spec:
|
||||
- name
|
||||
- revision
|
||||
type: object
|
||||
resourceTracker:
|
||||
description: ResourceTracker record the status of the ResourceTracker
|
||||
properties:
|
||||
apiVersion:
|
||||
description: APIVersion of the referenced object.
|
||||
type: string
|
||||
kind:
|
||||
description: Kind of the referenced object.
|
||||
type: string
|
||||
name:
|
||||
description: Name of the referenced object.
|
||||
type: string
|
||||
uid:
|
||||
description: UID of the referenced object.
|
||||
type: string
|
||||
required:
|
||||
- apiVersion
|
||||
- kind
|
||||
- name
|
||||
type: object
|
||||
rollingState:
|
||||
description: RollingState is the Rollout State
|
||||
type: string
|
||||
|
||||
41
charts/vela-core/crds/core.oam.dev_resourcetrackers.yaml
Normal file
41
charts/vela-core/crds/core.oam.dev_resourcetrackers.yaml
Normal file
@@ -0,0 +1,41 @@
|
||||
|
||||
---
|
||||
apiVersion: apiextensions.k8s.io/v1
|
||||
kind: CustomResourceDefinition
|
||||
metadata:
|
||||
annotations:
|
||||
controller-gen.kubebuilder.io/version: v0.2.4
|
||||
name: resourcetrackers.core.oam.dev
|
||||
spec:
|
||||
group: core.oam.dev
|
||||
names:
|
||||
categories:
|
||||
- oam
|
||||
kind: ResourceTracker
|
||||
listKind: ResourceTrackerList
|
||||
plural: resourcetrackers
|
||||
singular: resourcetracker
|
||||
scope: Cluster
|
||||
versions:
|
||||
- name: v1beta1
|
||||
schema:
|
||||
openAPIV3Schema:
|
||||
description: An ResourceTracker represents a tracker for track cross namespace resources
|
||||
properties:
|
||||
apiVersion:
|
||||
description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
|
||||
type: string
|
||||
kind:
|
||||
description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
|
||||
type: string
|
||||
metadata:
|
||||
type: object
|
||||
type: object
|
||||
served: true
|
||||
storage: true
|
||||
status:
|
||||
acceptedNames:
|
||||
kind: ""
|
||||
plural: ""
|
||||
conditions: []
|
||||
storedVersions: []
|
||||
54
docs/en/cue/cross-namespace-resource.md
Normal file
54
docs/en/cue/cross-namespace-resource.md
Normal file
@@ -0,0 +1,54 @@
|
||||
# Define resources located in defferent namespace with application
|
||||
|
||||
In this section, we will introduce how to use cue template create resources (workload/trait) in different namespace with the application.
|
||||
|
||||
By default, the `metadata.namespace` of K8s resource in CuE template is automatically filled with the same namespace of the applicaiton.
|
||||
|
||||
If you want to create K8s resources running in a specific namespace witch is different with the application, you can set the `metadata.namespace` field.
|
||||
KubeVela will create the resources in the specified namespace, and create a resourceTracker object as owener of those resources.
|
||||
|
||||
|
||||
## Usage
|
||||
|
||||
```yaml
|
||||
apiVersion: core.oam.dev/v1beta1
|
||||
kind: ComponentDefinition
|
||||
metadata:
|
||||
name: worker
|
||||
spec:
|
||||
definitionRef:
|
||||
name: deployments.apps
|
||||
schematic:
|
||||
cue:
|
||||
template: |
|
||||
parameter: {
|
||||
name: string
|
||||
image: string
|
||||
namespace: string # make this parameter `namespace` as keyword which represents the resource maybe located in defferent namespace with application
|
||||
}
|
||||
output: {
|
||||
apiVersion: "apps/v1"
|
||||
kind: "Deployment"
|
||||
metadata: {
|
||||
namespace: my-namespace
|
||||
}
|
||||
spec: {
|
||||
selector: matchLabels: {
|
||||
"app.oam.dev/component": parameter.name
|
||||
}
|
||||
template: {
|
||||
metadata: labels: {
|
||||
"app.oam.dev/component": parameter.name
|
||||
}
|
||||
spec: {
|
||||
containers: [{
|
||||
name: parameter.name
|
||||
image: parameter.image
|
||||
}]
|
||||
}}}
|
||||
}
|
||||
```
|
||||
## Limitations
|
||||
If you update definition by changing the `metadata.namespace` field. KubeVela will create new resources in the new namespace but not delete old resources.
|
||||
We wil fix the limitation in the near future.
|
||||
|
||||
@@ -379,6 +379,26 @@ spec:
|
||||
- name
|
||||
- revision
|
||||
type: object
|
||||
resourceTracker:
|
||||
description: ResourceTracker record the status of the ResourceTracker
|
||||
properties:
|
||||
apiVersion:
|
||||
description: APIVersion of the referenced object.
|
||||
type: string
|
||||
kind:
|
||||
description: Kind of the referenced object.
|
||||
type: string
|
||||
name:
|
||||
description: Name of the referenced object.
|
||||
type: string
|
||||
uid:
|
||||
description: UID of the referenced object.
|
||||
type: string
|
||||
required:
|
||||
- apiVersion
|
||||
- kind
|
||||
- name
|
||||
type: object
|
||||
rollingState:
|
||||
description: RollingState is the Rollout State
|
||||
type: string
|
||||
|
||||
@@ -367,6 +367,26 @@ spec:
|
||||
- name
|
||||
- revision
|
||||
type: object
|
||||
resourceTracker:
|
||||
description: ResourceTracker record the status of the ResourceTracker
|
||||
properties:
|
||||
apiVersion:
|
||||
description: APIVersion of the referenced object.
|
||||
type: string
|
||||
kind:
|
||||
description: Kind of the referenced object.
|
||||
type: string
|
||||
name:
|
||||
description: Name of the referenced object.
|
||||
type: string
|
||||
uid:
|
||||
description: UID of the referenced object.
|
||||
type: string
|
||||
required:
|
||||
- apiVersion
|
||||
- kind
|
||||
- name
|
||||
type: object
|
||||
rollingState:
|
||||
description: RollingState is the Rollout State
|
||||
type: string
|
||||
|
||||
@@ -0,0 +1,56 @@
|
||||
|
||||
---
|
||||
apiVersion: apiextensions.k8s.io/v1beta1
|
||||
kind: CustomResourceDefinition
|
||||
metadata:
|
||||
annotations:
|
||||
controller-gen.kubebuilder.io/version: v0.2.4
|
||||
name: resourcetrackerlists.core.oam.dev
|
||||
spec:
|
||||
group: core.oam.dev
|
||||
names:
|
||||
kind: ResourceTrackerList
|
||||
listKind: ResourceTrackerListList
|
||||
plural: resourcetrackerlists
|
||||
singular: resourcetrackerlist
|
||||
scope: Namespaced
|
||||
validation:
|
||||
openAPIV3Schema:
|
||||
description: ResourceTrackerList contains a list of ResourceTracker
|
||||
properties:
|
||||
apiVersion:
|
||||
description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
|
||||
type: string
|
||||
items:
|
||||
items:
|
||||
description: An ResourceTracker represents a tracker for track cross namespace resources
|
||||
properties:
|
||||
apiVersion:
|
||||
description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
|
||||
type: string
|
||||
kind:
|
||||
description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
|
||||
type: string
|
||||
metadata:
|
||||
type: object
|
||||
type: object
|
||||
type: array
|
||||
kind:
|
||||
description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
|
||||
type: string
|
||||
metadata:
|
||||
type: object
|
||||
required:
|
||||
- items
|
||||
type: object
|
||||
version: v1alpha2
|
||||
versions:
|
||||
- name: v1alpha2
|
||||
served: true
|
||||
storage: true
|
||||
status:
|
||||
acceptedNames:
|
||||
kind: ""
|
||||
plural: ""
|
||||
conditions: []
|
||||
storedVersions: []
|
||||
@@ -0,0 +1,42 @@
|
||||
|
||||
---
|
||||
apiVersion: apiextensions.k8s.io/v1beta1
|
||||
kind: CustomResourceDefinition
|
||||
metadata:
|
||||
annotations:
|
||||
controller-gen.kubebuilder.io/version: v0.2.4
|
||||
name: resourcetrackers.core.oam.dev
|
||||
spec:
|
||||
group: core.oam.dev
|
||||
names:
|
||||
categories:
|
||||
- oam
|
||||
kind: ResourceTracker
|
||||
listKind: ResourceTrackerList
|
||||
plural: resourcetrackers
|
||||
singular: resourcetracker
|
||||
scope: Cluster
|
||||
validation:
|
||||
openAPIV3Schema:
|
||||
description: An ResourceTracker represents a tracker for track cross namespace resources
|
||||
properties:
|
||||
apiVersion:
|
||||
description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
|
||||
type: string
|
||||
kind:
|
||||
description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
|
||||
type: string
|
||||
metadata:
|
||||
type: object
|
||||
type: object
|
||||
version: v1beta1
|
||||
versions:
|
||||
- name: v1beta1
|
||||
served: true
|
||||
storage: true
|
||||
status:
|
||||
acceptedNames:
|
||||
kind: ""
|
||||
plural: ""
|
||||
conditions: []
|
||||
storedVersions: []
|
||||
@@ -324,8 +324,9 @@ func generateComponentFromCUEModule(c client.Client, wl *Workload, appName, revi
|
||||
Name: sc.Name,
|
||||
}})
|
||||
}
|
||||
|
||||
comp.Namespace = ns
|
||||
if len(comp.Namespace) == 0 {
|
||||
comp.Namespace = ns
|
||||
}
|
||||
if comp.Labels == nil {
|
||||
comp.Labels = map[string]string{}
|
||||
}
|
||||
|
||||
@@ -22,6 +22,7 @@ import (
|
||||
|
||||
"github.com/crossplane/crossplane-runtime/apis/core/v1alpha1"
|
||||
"github.com/crossplane/crossplane-runtime/pkg/logging"
|
||||
"github.com/crossplane/crossplane-runtime/pkg/meta"
|
||||
"github.com/go-logr/logr"
|
||||
"github.com/pkg/errors"
|
||||
kerrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
@@ -30,6 +31,7 @@ import (
|
||||
"k8s.io/client-go/util/retry"
|
||||
ctrl "sigs.k8s.io/controller-runtime"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
"sigs.k8s.io/controller-runtime/pkg/reconcile"
|
||||
|
||||
"github.com/oam-dev/kubevela/apis/core.oam.dev/common"
|
||||
"github.com/oam-dev/kubevela/apis/core.oam.dev/v1beta1"
|
||||
@@ -42,7 +44,12 @@ import (
|
||||
)
|
||||
|
||||
// RolloutReconcileWaitTime is the time to wait before reconcile again an application still in rollout phase
|
||||
const RolloutReconcileWaitTime = time.Second * 3
|
||||
const (
|
||||
RolloutReconcileWaitTime = time.Second * 3
|
||||
resourceTrackerFinalizer = "resourceTracker.finalizer.core.oam.dev"
|
||||
errUpdateApplicationStatus = "cannot update application status"
|
||||
errUpdateApplicationFinalizer = "cannot update application finalizer"
|
||||
)
|
||||
|
||||
// Reconciler reconciles a Application object
|
||||
type Reconciler struct {
|
||||
@@ -72,22 +79,36 @@ func (r *Reconciler) Reconcile(req ctrl.Request) (ctrl.Result, error) {
|
||||
return ctrl.Result{}, err
|
||||
}
|
||||
|
||||
// TODO: check finalizer
|
||||
if app.DeletionTimestamp != nil {
|
||||
return ctrl.Result{}, nil
|
||||
}
|
||||
|
||||
applog.Info("Start Rendering")
|
||||
|
||||
app.Status.Phase = common.ApplicationRendering
|
||||
|
||||
// by default, we regard the spec is diff
|
||||
handler := &appHandler{
|
||||
r: r,
|
||||
app: app,
|
||||
logger: applog,
|
||||
}
|
||||
|
||||
if app.ObjectMeta.DeletionTimestamp.IsZero() {
|
||||
if registerFinalizers(app) {
|
||||
applog.Info("Register new finalizer", "application", app.Namespace+"/"+app.Name, "finalizers", app.ObjectMeta.Finalizers)
|
||||
return reconcile.Result{}, errors.Wrap(r.Client.Update(ctx, app), errUpdateApplicationFinalizer)
|
||||
}
|
||||
} else {
|
||||
needUpdate, err := handler.removeResourceTracker(ctx)
|
||||
if err != nil {
|
||||
applog.Error(err, "Failed to remove application resourceTracker")
|
||||
app.Status.SetConditions(v1alpha1.ReconcileError(errors.Wrap(err, "error to remove finalizer")))
|
||||
return reconcile.Result{}, errors.Wrap(r.UpdateStatus(ctx, app), errUpdateApplicationStatus)
|
||||
}
|
||||
if needUpdate {
|
||||
applog.Info("remove finalizer of application", "application", app.Namespace+"/"+app.Name, "finalizers", app.ObjectMeta.Finalizers)
|
||||
return ctrl.Result{}, errors.Wrap(r.Update(ctx, app), errUpdateApplicationFinalizer)
|
||||
}
|
||||
// deleting and no need to handle finalizer
|
||||
return reconcile.Result{}, nil
|
||||
}
|
||||
|
||||
applog.Info("Start Rendering")
|
||||
|
||||
app.Status.Phase = common.ApplicationRendering
|
||||
|
||||
applog.Info("parse template")
|
||||
// parse template
|
||||
appParser := appfile.NewApplicationParser(r.Client, r.dm, r.pd)
|
||||
@@ -120,6 +141,13 @@ func (r *Reconciler) Reconcile(req ctrl.Request) (ctrl.Result, error) {
|
||||
app.Status.SetConditions(errorCondition("Built", err))
|
||||
return handler.handleErr(err)
|
||||
}
|
||||
|
||||
err = handler.handleResourceTracker(ctx, comps, ac)
|
||||
if err != nil {
|
||||
applog.Error(err, "[Handle resourceTracker]")
|
||||
return handler.handleErr(err)
|
||||
}
|
||||
|
||||
// pass the App label and annotation to ac except some app specific ones
|
||||
oamutil.PassLabelAndAnnotation(app, ac)
|
||||
|
||||
@@ -166,6 +194,15 @@ func (r *Reconciler) Reconcile(req ctrl.Request) (ctrl.Result, error) {
|
||||
return ctrl.Result{}, r.UpdateStatus(ctx, app)
|
||||
}
|
||||
|
||||
// if any finalizers newly registered, return true
|
||||
func registerFinalizers(app *v1beta1.Application) bool {
|
||||
if !meta.FinalizerExists(&app.ObjectMeta, resourceTrackerFinalizer) && app.Status.ResourceTracker != nil {
|
||||
meta.AddFinalizer(&app.ObjectMeta, resourceTrackerFinalizer)
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// SetupWithManager install to manager
|
||||
func (r *Reconciler) SetupWithManager(mgr ctrl.Manager) error {
|
||||
// If Application Own these two child objects, AC status change will notify application controller and recursively update AC again, and trigger application event again...
|
||||
|
||||
@@ -0,0 +1,445 @@
|
||||
/*
|
||||
Copyright 2021 The KubeVela Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package application
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
. "github.com/onsi/ginkgo"
|
||||
. "github.com/onsi/gomega"
|
||||
|
||||
runtimev1alpha1 "github.com/crossplane/crossplane-runtime/apis/core/v1alpha1"
|
||||
"github.com/crossplane/crossplane-runtime/pkg/meta"
|
||||
"github.com/ghodss/yaml"
|
||||
|
||||
"github.com/oam-dev/kubevela/apis/core.oam.dev/common"
|
||||
"github.com/oam-dev/kubevela/apis/core.oam.dev/v1beta1"
|
||||
"github.com/oam-dev/kubevela/pkg/oam/util"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
ctrl "sigs.k8s.io/controller-runtime"
|
||||
)
|
||||
|
||||
var _ = Describe("Test application controller finalizer logic", func() {
|
||||
ctx := context.TODO()
|
||||
namespace := "cross-ns-namespace"
|
||||
|
||||
cd := &v1beta1.ComponentDefinition{}
|
||||
cDDefJson, _ := yaml.YAMLToJSON([]byte(crossCompDefYaml))
|
||||
|
||||
ncd := &v1beta1.ComponentDefinition{}
|
||||
ncdDefJson, _ := yaml.YAMLToJSON([]byte(normalCompDefYaml))
|
||||
|
||||
td := &v1beta1.TraitDefinition{}
|
||||
tdDefJson, _ := yaml.YAMLToJSON([]byte(crossNsTdYaml))
|
||||
|
||||
BeforeEach(func() {
|
||||
ns := v1.Namespace{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: namespace,
|
||||
},
|
||||
}
|
||||
Expect(k8sClient.Create(ctx, &ns)).Should(SatisfyAny(BeNil(), &util.AlreadyExistMatcher{}))
|
||||
|
||||
Expect(json.Unmarshal(cDDefJson, cd)).Should(BeNil())
|
||||
Expect(k8sClient.Create(ctx, cd.DeepCopy())).Should(SatisfyAny(BeNil(), &util.AlreadyExistMatcher{}))
|
||||
|
||||
Expect(json.Unmarshal(tdDefJson, td)).Should(BeNil())
|
||||
Expect(k8sClient.Create(ctx, td.DeepCopy())).Should(SatisfyAny(BeNil(), &util.AlreadyExistMatcher{}))
|
||||
|
||||
Expect(json.Unmarshal(ncdDefJson, ncd)).Should(BeNil())
|
||||
Expect(k8sClient.Create(ctx, ncd.DeepCopy())).Should(SatisfyAny(BeNil(), &util.AlreadyExistMatcher{}))
|
||||
})
|
||||
|
||||
AfterEach(func() {
|
||||
By("[TEST] Clean up resources after an integration test")
|
||||
})
|
||||
|
||||
It("Test component have normal workload", func() {
|
||||
appName := "app-1"
|
||||
appKey := types.NamespacedName{Namespace: namespace, Name: appName}
|
||||
app := getApp(appName, namespace, "normal-worker")
|
||||
Expect(k8sClient.Create(ctx, app)).Should(BeNil())
|
||||
|
||||
By("Create a normal workload app")
|
||||
checkApp := &v1beta1.Application{}
|
||||
_, err := reconciler.Reconcile(ctrl.Request{NamespacedName: appKey})
|
||||
Expect(err).Should(BeNil())
|
||||
Expect(k8sClient.Get(ctx, appKey, checkApp)).Should(BeNil())
|
||||
Expect(checkApp.Status.Phase).Should(Equal(common.ApplicationRunning))
|
||||
Expect(len(checkApp.Finalizers)).Should(BeEquivalentTo(0))
|
||||
|
||||
rt := &v1beta1.ResourceTracker{}
|
||||
Expect(k8sClient.Get(ctx, getTrackerKey(checkApp.Namespace, checkApp.Name), rt)).Should(util.NotFoundMatcher{})
|
||||
|
||||
By("add a cross namespace trait for application")
|
||||
updateApp := checkApp.DeepCopy()
|
||||
updateApp.Spec.Components[0].Traits = []v1beta1.ApplicationTrait{
|
||||
{
|
||||
Type: "cross-scaler",
|
||||
Properties: runtime.RawExtension{Raw: []byte(`{"replicas": 1}`)},
|
||||
},
|
||||
}
|
||||
Expect(k8sClient.Update(ctx, updateApp)).Should(BeNil())
|
||||
// first reconcile will create resourceTracker and set resourceTracker for app status
|
||||
_, err = reconciler.Reconcile(ctrl.Request{NamespacedName: appKey})
|
||||
Expect(err).Should(BeNil())
|
||||
checkApp = new(v1beta1.Application)
|
||||
Expect(k8sClient.Get(ctx, appKey, checkApp)).Should(BeNil())
|
||||
Expect(k8sClient.Get(ctx, getTrackerKey(checkApp.Namespace, checkApp.Name), rt)).Should(BeNil())
|
||||
Expect(checkApp.Status.ResourceTracker.UID).Should(BeEquivalentTo(rt.UID))
|
||||
Expect(len(checkApp.Finalizers)).Should(BeEquivalentTo(0))
|
||||
|
||||
// second reconcile will set finalizer for app
|
||||
_, err = reconciler.Reconcile(ctrl.Request{NamespacedName: appKey})
|
||||
checkApp = new(v1beta1.Application)
|
||||
Expect(k8sClient.Get(ctx, appKey, checkApp)).Should(BeNil())
|
||||
Expect(k8sClient.Get(ctx, getTrackerKey(checkApp.Namespace, checkApp.Name), rt)).Should(BeNil())
|
||||
Expect(err).Should(BeNil())
|
||||
Expect(len(checkApp.Finalizers)).Should(BeEquivalentTo(1))
|
||||
Expect(checkApp.Finalizers[0]).Should(BeEquivalentTo(resourceTrackerFinalizer))
|
||||
|
||||
By("update app by delete cross namespace trait, will delete resourceTracker and the status of app will flush")
|
||||
checkApp = &v1beta1.Application{}
|
||||
Expect(k8sClient.Get(ctx, appKey, checkApp)).Should(BeNil())
|
||||
updateApp = checkApp.DeepCopy()
|
||||
updateApp.Spec.Components[0].Traits = nil
|
||||
Expect(k8sClient.Update(ctx, updateApp)).Should(BeNil())
|
||||
_, err = reconciler.Reconcile(ctrl.Request{NamespacedName: appKey})
|
||||
Expect(err).Should(BeNil())
|
||||
checkApp = new(v1beta1.Application)
|
||||
Expect(k8sClient.Get(ctx, appKey, checkApp)).Should(BeNil())
|
||||
Expect(k8sClient.Get(ctx, getTrackerKey(checkApp.Namespace, checkApp.Name), rt)).Should(util.NotFoundMatcher{})
|
||||
Expect(checkApp.Status.ResourceTracker).Should(BeNil())
|
||||
})
|
||||
|
||||
It("Test cross namespace workload, then delete the app", func() {
|
||||
appName := "app-2"
|
||||
appKey := types.NamespacedName{Namespace: namespace, Name: appName}
|
||||
app := getApp(appName, namespace, "cross-worker")
|
||||
Expect(k8sClient.Create(ctx, app)).Should(BeNil())
|
||||
|
||||
By("Create a cross workload app")
|
||||
_, err := reconciler.Reconcile(ctrl.Request{NamespacedName: appKey})
|
||||
Expect(err).Should(BeNil())
|
||||
checkApp := &v1beta1.Application{}
|
||||
Expect(k8sClient.Get(ctx, appKey, checkApp)).Should(BeNil())
|
||||
Expect(checkApp.Status.Phase).Should(Equal(common.ApplicationRunning))
|
||||
Expect(len(checkApp.Finalizers)).Should(BeEquivalentTo(0))
|
||||
rt := &v1beta1.ResourceTracker{}
|
||||
Expect(k8sClient.Get(ctx, getTrackerKey(checkApp.Namespace, checkApp.Name), rt)).Should(BeNil())
|
||||
_, err = reconciler.Reconcile(ctrl.Request{NamespacedName: appKey})
|
||||
Expect(err).Should(BeNil())
|
||||
checkApp = new(v1beta1.Application)
|
||||
Expect(k8sClient.Get(ctx, appKey, checkApp)).Should(BeNil())
|
||||
Expect(len(checkApp.Finalizers)).Should(BeEquivalentTo(1))
|
||||
Expect(checkApp.Finalizers[0]).Should(BeEquivalentTo(resourceTrackerFinalizer))
|
||||
By("delete this cross workload app")
|
||||
Expect(k8sClient.Delete(ctx, checkApp)).Should(BeNil())
|
||||
By("delete app will delete resourceTracker")
|
||||
// reconcile will delete resourceTracker and unset app's finalizer
|
||||
_, err = reconciler.Reconcile(ctrl.Request{NamespacedName: appKey})
|
||||
Expect(err).Should(BeNil())
|
||||
checkApp = new(v1beta1.Application)
|
||||
Expect(k8sClient.Get(ctx, appKey, checkApp)).Should(util.NotFoundMatcher{})
|
||||
checkRt := new(v1beta1.ResourceTracker)
|
||||
Expect(k8sClient.Get(ctx, getTrackerKey(checkApp.Namespace, checkApp.Name), checkRt)).Should(util.NotFoundMatcher{})
|
||||
})
|
||||
})
|
||||
|
||||
var _ = Describe("Test finalizer related func", func() {
|
||||
ctx := context.TODO()
|
||||
namespace := "cross-ns-namespace"
|
||||
var handler appHandler
|
||||
|
||||
BeforeEach(func() {
|
||||
ns := v1.Namespace{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: namespace,
|
||||
},
|
||||
}
|
||||
Expect(k8sClient.Create(ctx, &ns)).Should(SatisfyAny(BeNil(), &util.AlreadyExistMatcher{}))
|
||||
})
|
||||
|
||||
AfterEach(func() {
|
||||
By("[TEST] Clean up resources after an integration test")
|
||||
})
|
||||
|
||||
It("Test getResourceTrackerAndOwnReference func", func() {
|
||||
app := getApp("app-1", namespace, "worker")
|
||||
handler = appHandler{
|
||||
r: reconciler,
|
||||
app: app,
|
||||
logger: reconciler.Log.WithValues("application", "finalizer-func-test"),
|
||||
}
|
||||
checkRt := new(v1beta1.ResourceTracker)
|
||||
Expect(k8sClient.Get(ctx, getTrackerKey(namespace, app.Name), checkRt)).Should(util.NotFoundMatcher{})
|
||||
rt, owner, err := handler.getResourceTrackerAndOwnReference(ctx)
|
||||
Expect(err).Should(BeNil())
|
||||
Expect(rt.UID).Should(BeEquivalentTo(owner.UID))
|
||||
Expect(owner.Kind).Should(BeEquivalentTo(v1beta1.ResourceTrackerKind))
|
||||
checkRt = new(v1beta1.ResourceTracker)
|
||||
Expect(k8sClient.Get(ctx, getTrackerKey(namespace, app.Name), checkRt)).Should(BeNil())
|
||||
Expect(checkRt.UID).Should(BeEquivalentTo(rt.UID))
|
||||
Expect(k8sClient.Delete(ctx, checkRt)).Should(BeNil())
|
||||
})
|
||||
|
||||
It("Test getResourceTrackerAndOwnReference func with already exsit resourceTracker", func() {
|
||||
app := getApp("app-2", namespace, "worker")
|
||||
handler = appHandler{
|
||||
r: reconciler,
|
||||
app: app,
|
||||
logger: reconciler.Log.WithValues("application", "finalizer-func-test"),
|
||||
}
|
||||
rt := &v1beta1.ResourceTracker{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: namespace + "-" + app.GetName(),
|
||||
},
|
||||
}
|
||||
Expect(k8sClient.Create(ctx, rt)).Should(BeNil())
|
||||
checkRt, owner, err := handler.getResourceTrackerAndOwnReference(ctx)
|
||||
Expect(err).Should(BeNil())
|
||||
Expect(rt.UID).Should(BeEquivalentTo(checkRt.UID))
|
||||
Expect(owner.Kind).Should(BeEquivalentTo(v1beta1.ResourceTrackerKind))
|
||||
Expect(checkRt.UID).Should(BeEquivalentTo(owner.UID))
|
||||
Expect(k8sClient.Delete(ctx, checkRt)).Should(BeNil())
|
||||
})
|
||||
|
||||
It("Test finalizeResourceTracker func with need update ", func() {
|
||||
app := getApp("app-3", namespace, "worker")
|
||||
rt := &v1beta1.ResourceTracker{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: namespace + "-" + app.GetName(),
|
||||
},
|
||||
}
|
||||
Expect(k8sClient.Create(ctx, rt)).Should(BeNil())
|
||||
app.Status.ResourceTracker = &runtimev1alpha1.TypedReference{
|
||||
Name: rt.Name,
|
||||
Kind: v1beta1.ResourceTrackerGroupKind,
|
||||
APIVersion: v1beta1.ResourceTrackerKindAPIVersion,
|
||||
UID: rt.UID}
|
||||
meta.AddFinalizer(&app.ObjectMeta, resourceTrackerFinalizer)
|
||||
handler = appHandler{
|
||||
r: reconciler,
|
||||
app: app,
|
||||
logger: reconciler.Log.WithValues("application", "finalizer-func-test"),
|
||||
}
|
||||
need, err := handler.removeResourceTracker(ctx)
|
||||
Expect(err).Should(BeNil())
|
||||
Expect(need).Should(BeEquivalentTo(true))
|
||||
Eventually(func() error {
|
||||
err := k8sClient.Get(ctx, getTrackerKey(namespace, app.Name), rt)
|
||||
if err == nil || !apierrors.IsNotFound(err) {
|
||||
return fmt.Errorf("resourceTracker still exsit")
|
||||
}
|
||||
return nil
|
||||
}, time.Second*60, time.Microsecond*300).Should(BeNil())
|
||||
Expect(app.Status.ResourceTracker).Should(BeNil())
|
||||
Expect(meta.FinalizerExists(app, resourceTrackerFinalizer)).Should(BeEquivalentTo(false))
|
||||
})
|
||||
|
||||
It("Test finalizeResourceTracker func without need ", func() {
|
||||
app := getApp("app-4", namespace, "worker")
|
||||
handler = appHandler{
|
||||
r: reconciler,
|
||||
app: app,
|
||||
logger: reconciler.Log.WithValues("application", "finalizer-func-test"),
|
||||
}
|
||||
need, err := handler.removeResourceTracker(ctx)
|
||||
Expect(err).Should(BeNil())
|
||||
Expect(need).Should(BeEquivalentTo(false))
|
||||
})
|
||||
})
|
||||
|
||||
func getApp(appName, namespace, comptype string) *v1beta1.Application {
|
||||
return &v1beta1.Application{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
Kind: "Application",
|
||||
APIVersion: "core.oam.dev/v1beta1",
|
||||
},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: appName,
|
||||
Namespace: namespace,
|
||||
},
|
||||
Spec: v1beta1.ApplicationSpec{
|
||||
Components: []v1beta1.ApplicationComponent{
|
||||
{
|
||||
Name: "comp1",
|
||||
Type: comptype,
|
||||
Properties: runtime.RawExtension{Raw: []byte(`{"cmd":["sleep","1000"],"image":"busybox"}`)},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func getTrackerKey(namespace, name string) types.NamespacedName {
|
||||
return types.NamespacedName{Name: fmt.Sprintf("%s-%s", namespace, name)}
|
||||
}
|
||||
|
||||
const (
|
||||
crossCompDefYaml = `
|
||||
apiVersion: core.oam.dev/v1beta1
|
||||
kind: ComponentDefinition
|
||||
metadata:
|
||||
name: cross-worker
|
||||
namespace: vela-system
|
||||
annotations:
|
||||
definition.oam.dev/description: "Long-running scalable backend worker without network endpoint"
|
||||
spec:
|
||||
workload:
|
||||
definition:
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
extension:
|
||||
healthPolicy: |
|
||||
isHealth: context.output.status.readyReplicas == context.output.status.replicas
|
||||
template: |
|
||||
output: {
|
||||
apiVersion: "apps/v1"
|
||||
kind: "Deployment"
|
||||
metadata: {
|
||||
namespace: "cross-namespace"
|
||||
}
|
||||
spec: {
|
||||
replicas: 0
|
||||
template: {
|
||||
metadata: labels: {
|
||||
"app.oam.dev/component": context.name
|
||||
}
|
||||
|
||||
spec: {
|
||||
containers: [{
|
||||
name: context.name
|
||||
image: parameter.image
|
||||
|
||||
if parameter["cmd"] != _|_ {
|
||||
command: parameter.cmd
|
||||
}
|
||||
}]
|
||||
}
|
||||
}
|
||||
|
||||
selector:
|
||||
matchLabels:
|
||||
"app.oam.dev/component": context.name
|
||||
}
|
||||
}
|
||||
|
||||
parameter: {
|
||||
// +usage=Which image would you like to use for your service
|
||||
// +short=i
|
||||
image: string
|
||||
|
||||
cmd?: [...string]
|
||||
}
|
||||
`
|
||||
|
||||
crossNsTdYaml = `
|
||||
apiVersion: core.oam.dev/v1beta1
|
||||
kind: TraitDefinition
|
||||
metadata:
|
||||
annotations:
|
||||
definition.oam.dev/description: "Manually scale the app"
|
||||
name: cross-scaler
|
||||
namespace: vela-system
|
||||
spec:
|
||||
appliesToWorkloads:
|
||||
- webservice
|
||||
- worker
|
||||
definitionRef:
|
||||
name: manualscalertraits.core.oam.dev
|
||||
workloadRefPath: spec.workloadRef
|
||||
extension:
|
||||
template: |-
|
||||
outputs: scaler: {
|
||||
apiVersion: "core.oam.dev/v1alpha2"
|
||||
kind: "ManualScalerTrait"
|
||||
metadata: {
|
||||
namespace: "cross-namespace"
|
||||
}
|
||||
spec: {
|
||||
replicaCount: parameter.replicas
|
||||
}
|
||||
}
|
||||
parameter: {
|
||||
//+short=r
|
||||
replicas: *1 | int
|
||||
}
|
||||
`
|
||||
|
||||
normalCompDefYaml = `
|
||||
apiVersion: core.oam.dev/v1beta1
|
||||
kind: ComponentDefinition
|
||||
metadata:
|
||||
name: normal-worker
|
||||
namespace: vela-system
|
||||
annotations:
|
||||
definition.oam.dev/description: "Long-running scalable backend worker without network endpoint"
|
||||
spec:
|
||||
workload:
|
||||
definition:
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
extension:
|
||||
healthPolicy: |
|
||||
isHealth: context.output.status.readyReplicas == context.output.status.replicas
|
||||
template: |
|
||||
output: {
|
||||
apiVersion: "apps/v1"
|
||||
kind: "Deployment"
|
||||
spec: {
|
||||
replicas: 0
|
||||
template: {
|
||||
metadata: labels: {
|
||||
"app.oam.dev/component": context.name
|
||||
}
|
||||
|
||||
spec: {
|
||||
containers: [{
|
||||
name: context.name
|
||||
image: parameter.image
|
||||
|
||||
if parameter["cmd"] != _|_ {
|
||||
command: parameter.cmd
|
||||
}
|
||||
}]
|
||||
}
|
||||
}
|
||||
|
||||
selector:
|
||||
matchLabels:
|
||||
"app.oam.dev/component": context.name
|
||||
}
|
||||
}
|
||||
|
||||
parameter: {
|
||||
// +usage=Which image would you like to use for your service
|
||||
// +short=i
|
||||
image: string
|
||||
|
||||
cmd?: [...string]
|
||||
}
|
||||
`
|
||||
)
|
||||
@@ -24,11 +24,13 @@ import (
|
||||
|
||||
runtimev1alpha1 "github.com/crossplane/crossplane-runtime/apis/core/v1alpha1"
|
||||
"github.com/crossplane/crossplane-runtime/pkg/logging"
|
||||
"github.com/crossplane/crossplane-runtime/pkg/meta"
|
||||
"github.com/go-logr/logr"
|
||||
"github.com/pkg/errors"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||
ctypes "k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
"k8s.io/klog/v2"
|
||||
@@ -376,3 +378,134 @@ func (h *appHandler) applyHelmModuleResources(ctx context.Context, comp *v1alpha
|
||||
klog.InfoS("Apply a HelmRelease", "namespace", release.GetNamespace(), "name", release.GetName())
|
||||
return nil
|
||||
}
|
||||
|
||||
// handleResourceTracker check the namesapce of all components and traits, if the namespace is different with application, the tracker will own them
|
||||
func (h *appHandler) handleResourceTracker(ctx context.Context, components []*v1alpha2.Component, ac *v1alpha2.ApplicationConfiguration) error {
|
||||
ref := new(metav1.OwnerReference)
|
||||
// resourceTracker is cache for resourceTracker, avoid get from k8s every time
|
||||
resourceTracker := new(v1beta1.ResourceTracker)
|
||||
needTracker := false
|
||||
for i, c := range components {
|
||||
u, err := oamutil.RawExtension2Unstructured(&c.Spec.Workload)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if checkResourceDiffWithApp(u, h.app.Namespace) {
|
||||
needTracker = true
|
||||
if len(resourceTracker.Name) == 0 {
|
||||
resourceTracker, ref, err = h.getResourceTrackerAndOwnReference(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
u.SetOwnerReferences([]metav1.OwnerReference{*ref})
|
||||
raw := oamutil.Object2RawExtension(u)
|
||||
components[i].Spec.Workload = raw
|
||||
}
|
||||
}
|
||||
for _, acComponent := range ac.Spec.Components {
|
||||
for i, t := range acComponent.Traits {
|
||||
u, err := oamutil.RawExtension2Unstructured(&t.Trait)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if checkResourceDiffWithApp(u, h.app.Namespace) {
|
||||
needTracker = true
|
||||
if len(resourceTracker.Name) == 0 {
|
||||
resourceTracker, ref, err = h.getResourceTrackerAndOwnReference(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
u.SetOwnerReferences([]metav1.OwnerReference{*ref})
|
||||
raw := oamutil.Object2RawExtension(u)
|
||||
acComponent.Traits[i].Trait = raw
|
||||
}
|
||||
}
|
||||
}
|
||||
if !needTracker {
|
||||
h.app.Status.ResourceTracker = nil
|
||||
// check weather related resourceTracker is existed, if yes delete it
|
||||
err := h.r.Get(ctx, ctypes.NamespacedName{Name: h.generateResourceTrackerName()}, resourceTracker)
|
||||
if err == nil {
|
||||
return h.r.Delete(ctx, resourceTracker)
|
||||
}
|
||||
if !apierrors.IsNotFound(err) {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
h.app.Status.ResourceTracker = &runtimev1alpha1.TypedReference{
|
||||
Name: resourceTracker.Name,
|
||||
Kind: v1beta1.ResourceTrackerGroupKind,
|
||||
APIVersion: v1beta1.ResourceTrackerKindAPIVersion,
|
||||
UID: resourceTracker.UID}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (h *appHandler) getResourceTrackerAndOwnReference(ctx context.Context) (*v1beta1.ResourceTracker, *metav1.OwnerReference, error) {
|
||||
resourceTracker := new(v1beta1.ResourceTracker)
|
||||
key := ctypes.NamespacedName{Name: h.generateResourceTrackerName()}
|
||||
err := h.r.Get(ctx, key, resourceTracker)
|
||||
if err != nil {
|
||||
if apierrors.IsNotFound(err) {
|
||||
resourceTracker = &v1beta1.ResourceTracker{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: h.generateResourceTrackerName(),
|
||||
},
|
||||
}
|
||||
if err = h.r.Client.Create(ctx, resourceTracker); err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
return resourceTracker, metav1.NewControllerRef(resourceTracker, v1beta1.ResourceTrackerKindVersionKind), nil
|
||||
}
|
||||
return nil, nil, err
|
||||
}
|
||||
return resourceTracker, metav1.NewControllerRef(resourceTracker, v1beta1.ResourceTrackerKindVersionKind), nil
|
||||
}
|
||||
|
||||
func (h *appHandler) generateResourceTrackerName() string {
|
||||
return fmt.Sprintf("%s-%s", h.app.Namespace, h.app.Name)
|
||||
}
|
||||
|
||||
func checkResourceDiffWithApp(u *unstructured.Unstructured, appNs string) bool {
|
||||
return len(u.GetNamespace()) != 0 && u.GetNamespace() != appNs
|
||||
}
|
||||
|
||||
// finalizeResourceTracker func return whether need to update application
|
||||
func (h *appHandler) removeResourceTracker(ctx context.Context) (bool, error) {
|
||||
client := h.r.Client
|
||||
rt := new(v1beta1.ResourceTracker)
|
||||
trackerName := h.generateResourceTrackerName()
|
||||
key := ctypes.NamespacedName{Name: trackerName}
|
||||
err := client.Get(ctx, key, rt)
|
||||
if err != nil {
|
||||
if apierrors.IsNotFound(err) {
|
||||
// for some cases the resourceTracker have been deleted but finalizer still exist
|
||||
if meta.FinalizerExists(h.app, resourceTrackerFinalizer) {
|
||||
meta.RemoveFinalizer(h.app, resourceTrackerFinalizer)
|
||||
return true, nil
|
||||
}
|
||||
// for some cases: informer cache haven't sync resourceTracker from k8s, return error trigger reconcile again
|
||||
if h.app.Status.ResourceTracker != nil {
|
||||
return false, fmt.Errorf("application status has resouceTracker but cannot get from k8s ")
|
||||
}
|
||||
return false, nil
|
||||
}
|
||||
return false, err
|
||||
}
|
||||
rt = &v1beta1.ResourceTracker{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: trackerName,
|
||||
},
|
||||
}
|
||||
err = h.r.Client.Delete(ctx, rt)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
h.logger.Info("delete application resourceTracker")
|
||||
meta.RemoveFinalizer(h.app, resourceTrackerFinalizer)
|
||||
h.app.Status.ResourceTracker = nil
|
||||
return true, nil
|
||||
}
|
||||
|
||||
@@ -203,8 +203,10 @@ func (r *components) renderComponent(ctx context.Context, acc v1alpha2.Applicati
|
||||
// don't pass the following annotation as those are for appConfig only
|
||||
util.RemoveAnnotations(w, []string{oam.AnnotationAppRollout, oam.AnnotationRollingComponent, oam.AnnotationInplaceUpgrade})
|
||||
ref := metav1.NewControllerRef(ac, v1alpha2.ApplicationConfigurationGroupVersionKind)
|
||||
w.SetNamespace(ac.GetNamespace())
|
||||
|
||||
// Don't override if the resources already has namespace, it was set by user or the application controller which is by design.
|
||||
if len(w.GetNamespace()) == 0 {
|
||||
w.SetNamespace(ac.GetNamespace())
|
||||
}
|
||||
traits := make([]*Trait, 0, len(acc.Traits))
|
||||
traitDefs := make([]v1alpha2.TraitDefinition, 0, len(acc.Traits))
|
||||
compInfoLabels[oam.LabelOAMResourceType] = oam.ResourceTypeTrait
|
||||
@@ -270,7 +272,10 @@ func (r *components) renderComponent(ctx context.Context, acc v1alpha2.Applicati
|
||||
}
|
||||
}
|
||||
// set the owner reference after its ref is edited
|
||||
w.SetOwnerReferences([]metav1.OwnerReference{*ref})
|
||||
// If workload is in different namespace with application set the ownerReference, otherwise the owner was set with a resourceTracker by application controller already.
|
||||
if ac.GetNamespace() == w.GetNamespace() {
|
||||
w.SetOwnerReferences([]metav1.OwnerReference{*ref})
|
||||
}
|
||||
|
||||
// create the ref after the workload name is set
|
||||
workloadRef := runtimev1alpha1.TypedReference{
|
||||
@@ -346,9 +351,15 @@ func setTraitProperties(t *unstructured.Unstructured, traitName, namespace strin
|
||||
if t.GetName() == "" {
|
||||
t.SetName(traitName)
|
||||
}
|
||||
// Don't override if the resources already has namespace, it was set by user or the application controller which is by design.
|
||||
if len(t.GetNamespace()) == 0 {
|
||||
t.SetNamespace(namespace)
|
||||
}
|
||||
// If trait is in different namespace with application set the ownerReference, otherwise the owner was set with a resourceTracker by application controller already.
|
||||
if t.GetNamespace() == namespace {
|
||||
t.SetOwnerReferences([]metav1.OwnerReference{*ref})
|
||||
}
|
||||
|
||||
t.SetOwnerReferences([]metav1.OwnerReference{*ref})
|
||||
t.SetNamespace(namespace)
|
||||
}
|
||||
|
||||
// setWorkloadInstanceName will set metadata.name for workload CR according to createRevision flag in traitDefinition
|
||||
|
||||
@@ -1681,6 +1681,21 @@ func TestSetTraitProperties(t *testing.T) {
|
||||
expU.SetName("comp1")
|
||||
expU.SetNamespace("ns")
|
||||
expU.SetOwnerReferences([]metav1.OwnerReference{{Name: "comp1"}})
|
||||
assert.Equal(t, expU, u)
|
||||
|
||||
u = &unstructured.Unstructured{}
|
||||
u.SetOwnerReferences([]metav1.OwnerReference{
|
||||
{
|
||||
Name: "resourceTracker",
|
||||
},
|
||||
})
|
||||
u.SetNamespace("another-ns")
|
||||
setTraitProperties(u, "comp1", "ns", &metav1.OwnerReference{Name: "comp1"})
|
||||
expU = &unstructured.Unstructured{}
|
||||
expU.SetName("comp1")
|
||||
expU.SetNamespace("another-ns")
|
||||
expU.SetOwnerReferences([]metav1.OwnerReference{{Name: "resourceTracker"}})
|
||||
assert.Equal(t, expU, u)
|
||||
}
|
||||
|
||||
func TestRenderTraitName(t *testing.T) {
|
||||
|
||||
702
test/e2e-test/app_resourcetracker_test.go
Normal file
702
test/e2e-test/app_resourcetracker_test.go
Normal file
@@ -0,0 +1,702 @@
|
||||
/*
|
||||
Copyright 2021 The KubeVela Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package controllers_test
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
. "github.com/onsi/ginkgo"
|
||||
. "github.com/onsi/gomega"
|
||||
|
||||
"github.com/oam-dev/kubevela/apis/core.oam.dev/common"
|
||||
"github.com/oam-dev/kubevela/apis/core.oam.dev/v1alpha2"
|
||||
"github.com/oam-dev/kubevela/apis/core.oam.dev/v1beta1"
|
||||
"github.com/oam-dev/kubevela/pkg/oam"
|
||||
"github.com/oam-dev/kubevela/pkg/oam/util"
|
||||
appsv1 "k8s.io/api/apps/v1"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
"sigs.k8s.io/yaml"
|
||||
)
|
||||
|
||||
var _ = Describe("Test application cross namespace resource", func() {
|
||||
ctx := context.Background()
|
||||
var (
|
||||
namespace = "app-resource-tracker-test-ns"
|
||||
crossNamespace = "cross-namespace"
|
||||
)
|
||||
|
||||
BeforeEach(func() {
|
||||
crossNs := corev1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: crossNamespace}}
|
||||
ns := corev1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: namespace}}
|
||||
Expect(k8sClient.Create(ctx, &ns)).Should(SatisfyAny(BeNil(), &util.AlreadyExistMatcher{}))
|
||||
Expect(k8sClient.Create(ctx, &crossNs)).Should(SatisfyAny(BeNil(), &util.AlreadyExistMatcher{}))
|
||||
|
||||
Eventually(func() error {
|
||||
ns := new(corev1.Namespace)
|
||||
return k8sClient.Get(ctx, types.NamespacedName{Name: namespace}, ns)
|
||||
}, time.Second*60, time.Microsecond*300).Should(BeNil())
|
||||
Eventually(func() error {
|
||||
ns := new(corev1.Namespace)
|
||||
return k8sClient.Get(ctx, types.NamespacedName{Name: crossNamespace}, ns)
|
||||
}, time.Second*60, time.Microsecond*300).Should(BeNil())
|
||||
})
|
||||
|
||||
AfterEach(func() {
|
||||
By("Clean up resources after a test")
|
||||
Expect(k8sClient.Delete(ctx, &corev1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: namespace}}, client.PropagationPolicy(metav1.DeletePropagationForeground))).Should(Succeed())
|
||||
Expect(k8sClient.Delete(ctx, &corev1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: crossNamespace}}, client.PropagationPolicy(metav1.DeletePropagationForeground))).Should(Succeed())
|
||||
// guarantee namespace have been deleted
|
||||
Eventually(func() error {
|
||||
ns := new(corev1.Namespace)
|
||||
err := k8sClient.Get(ctx, types.NamespacedName{Name: namespace}, ns)
|
||||
if err == nil {
|
||||
return fmt.Errorf("namespace still exist")
|
||||
}
|
||||
err = k8sClient.Get(ctx, types.NamespacedName{Name: crossNamespace}, ns)
|
||||
if err == nil {
|
||||
return fmt.Errorf("namespace still exist")
|
||||
}
|
||||
return nil
|
||||
}, time.Second*60, time.Microsecond*300).Should(BeNil())
|
||||
})
|
||||
|
||||
It("Test application have cross-namespace workload", func() {
|
||||
// install component definition
|
||||
crossCdJson, _ := yaml.YAMLToJSON([]byte(crossCompDefYaml))
|
||||
ccd := new(v1beta1.ComponentDefinition)
|
||||
Expect(json.Unmarshal(crossCdJson, ccd)).Should(BeNil())
|
||||
Expect(k8sClient.Create(ctx, ccd)).Should(SatisfyAny(BeNil(), &util.AlreadyExistMatcher{}))
|
||||
var (
|
||||
appName = "test-app-1"
|
||||
app = new(v1beta1.Application)
|
||||
componentName = "test-app-1-comp"
|
||||
)
|
||||
app = &v1beta1.Application{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: appName,
|
||||
Namespace: namespace,
|
||||
},
|
||||
Spec: v1beta1.ApplicationSpec{
|
||||
Components: []v1beta1.ApplicationComponent{
|
||||
v1beta1.ApplicationComponent{
|
||||
Name: componentName,
|
||||
Type: "cross-worker",
|
||||
Properties: runtime.RawExtension{Raw: []byte(`{"cmd":["sleep","1000"],"image":"busybox"}`)},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
Expect(k8sClient.Create(ctx, app)).Should(BeNil())
|
||||
By("check resource tracker has been created and app status ")
|
||||
resourceTracker := new(v1beta1.ResourceTracker)
|
||||
Eventually(func() error {
|
||||
app := new(v1beta1.Application)
|
||||
if err := k8sClient.Get(ctx, types.NamespacedName{Namespace: namespace, Name: appName}, app); err != nil {
|
||||
return fmt.Errorf("app not found %v", err)
|
||||
}
|
||||
if err := k8sClient.Get(ctx, generateResourceTrackerKey(app.Namespace, app.Name), resourceTracker); err != nil {
|
||||
return err
|
||||
}
|
||||
if app.Status.Phase != common.ApplicationRunning {
|
||||
return fmt.Errorf("application status is not running")
|
||||
}
|
||||
if app.Status.ResourceTracker == nil || app.Status.ResourceTracker.UID != resourceTracker.UID {
|
||||
return fmt.Errorf("appication status error ")
|
||||
}
|
||||
return nil
|
||||
}, time.Second*600, time.Microsecond*300).Should(BeNil())
|
||||
By("check resource is generated correctly")
|
||||
Expect(k8sClient.Get(ctx, types.NamespacedName{Namespace: namespace, Name: appName}, app)).Should(BeNil())
|
||||
var workload appsv1.Deployment
|
||||
Eventually(func() error {
|
||||
appContext := &v1alpha2.ApplicationContext{}
|
||||
if err := k8sClient.Get(ctx, types.NamespacedName{Namespace: namespace, Name: appName}, appContext); err != nil {
|
||||
return fmt.Errorf("cannot generate AppContext %v", err)
|
||||
}
|
||||
component := &v1alpha2.Component{}
|
||||
if err := k8sClient.Get(ctx, types.NamespacedName{Namespace: namespace, Name: componentName}, component); err != nil {
|
||||
return fmt.Errorf("cannot generate component %v", err)
|
||||
}
|
||||
if component.ObjectMeta.Labels[oam.LabelAppName] != appName {
|
||||
return fmt.Errorf("component error label ")
|
||||
}
|
||||
depolys := new(appsv1.DeploymentList)
|
||||
opts := []client.ListOption{
|
||||
client.InNamespace(crossNamespace),
|
||||
client.MatchingLabels{
|
||||
oam.LabelAppName: appName,
|
||||
},
|
||||
}
|
||||
err := k8sClient.List(ctx, depolys, opts...)
|
||||
if err != nil || len(depolys.Items) != 1 {
|
||||
return fmt.Errorf("error workload number %v", err)
|
||||
}
|
||||
workload = depolys.Items[0]
|
||||
if len(workload.OwnerReferences) != 1 || workload.OwnerReferences[0].UID != resourceTracker.UID {
|
||||
return fmt.Errorf("wrokload ownerreference error")
|
||||
}
|
||||
return nil
|
||||
}, time.Second*50, time.Microsecond*300).Should(BeNil())
|
||||
|
||||
By("deleting application will remove resourceTracker and related workload will be removed")
|
||||
time.Sleep(3 * time.Second) // wait informer cache to be synced
|
||||
Expect(k8sClient.Get(ctx, types.NamespacedName{Namespace: namespace, Name: appName}, app)).Should(BeNil())
|
||||
Expect(k8sClient.Delete(ctx, app)).Should(BeNil())
|
||||
Eventually(func() error {
|
||||
err := k8sClient.Get(ctx, generateResourceTrackerKey(app.Namespace, app.Name), resourceTracker)
|
||||
if err == nil {
|
||||
return fmt.Errorf("resourceTracker still exist")
|
||||
}
|
||||
if !apierrors.IsNotFound(err) {
|
||||
return err
|
||||
}
|
||||
err = k8sClient.Get(ctx, types.NamespacedName{Namespace: crossNamespace, Name: workload.GetName()}, &workload)
|
||||
if err == nil {
|
||||
return fmt.Errorf("wrokload still exist")
|
||||
}
|
||||
if !apierrors.IsNotFound(err) {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}, time.Second*30, time.Microsecond*300).Should(BeNil())
|
||||
})
|
||||
|
||||
It("Test update application by add a cross namespace trait resource", func() {
|
||||
var (
|
||||
appName = "test-app-2"
|
||||
app = new(v1beta1.Application)
|
||||
componentName = "test-app-2-comp"
|
||||
)
|
||||
// install component definition
|
||||
normalCdJson, _ := yaml.YAMLToJSON([]byte(normalCompDefYaml))
|
||||
ncd := new(v1beta1.ComponentDefinition)
|
||||
Expect(json.Unmarshal(normalCdJson, ncd)).Should(BeNil())
|
||||
Expect(k8sClient.Create(ctx, ncd)).Should(SatisfyAny(BeNil(), &util.AlreadyExistMatcher{}))
|
||||
|
||||
crossTdJson, err := yaml.YAMLToJSON([]byte(crossNsTdYaml))
|
||||
Expect(err).Should(BeNil())
|
||||
ctd := new(v1beta1.TraitDefinition)
|
||||
Expect(json.Unmarshal(crossTdJson, ctd)).Should(BeNil())
|
||||
Expect(k8sClient.Create(ctx, ctd)).Should(SatisfyAny(BeNil(), &util.AlreadyExistMatcher{}))
|
||||
|
||||
app = &v1beta1.Application{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: appName,
|
||||
Namespace: namespace,
|
||||
},
|
||||
Spec: v1beta1.ApplicationSpec{
|
||||
Components: []v1beta1.ApplicationComponent{
|
||||
v1beta1.ApplicationComponent{
|
||||
Name: componentName,
|
||||
Type: "normal-worker",
|
||||
Properties: runtime.RawExtension{Raw: []byte(`{"cmd":["sleep","1000"],"image":"busybox"}`)},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
Expect(k8sClient.Create(ctx, app)).Should(BeNil())
|
||||
resourceTracker := new(v1beta1.ResourceTracker)
|
||||
By("application contain a normal workload, check application and workload status")
|
||||
Eventually(func() error {
|
||||
app := new(v1beta1.Application)
|
||||
if err := k8sClient.Get(ctx, types.NamespacedName{Namespace: namespace, Name: appName}, app); err != nil {
|
||||
return fmt.Errorf("error to create application %v", err)
|
||||
}
|
||||
if app.Status.Phase != common.ApplicationRunning {
|
||||
return fmt.Errorf("application status not running")
|
||||
}
|
||||
depolys := new(appsv1.DeploymentList)
|
||||
opts := []client.ListOption{
|
||||
client.InNamespace(namespace),
|
||||
client.MatchingLabels{
|
||||
oam.LabelAppName: appName,
|
||||
},
|
||||
}
|
||||
err := k8sClient.List(ctx, depolys, opts...)
|
||||
if err != nil || len(depolys.Items) != 1 {
|
||||
return fmt.Errorf("error workload number %v", err)
|
||||
}
|
||||
workload := depolys.Items[0]
|
||||
if len(workload.OwnerReferences) != 1 || workload.OwnerReferences[0].Kind != v1alpha2.ApplicationConfigurationKind {
|
||||
return fmt.Errorf("workload owneRefernece err")
|
||||
}
|
||||
err = k8sClient.Get(ctx, generateResourceTrackerKey(app.Namespace, app.Name), resourceTracker)
|
||||
if err == nil {
|
||||
return fmt.Errorf("resourceTracker should not be created")
|
||||
}
|
||||
if !apierrors.IsNotFound(err) {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}, time.Second*60, time.Microsecond*300).Should(BeNil())
|
||||
|
||||
Eventually(func() error {
|
||||
err := k8sClient.Get(ctx, types.NamespacedName{Namespace: namespace, Name: appName}, app)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
app.Spec.Components[0].Traits = []v1beta1.ApplicationTrait{
|
||||
v1beta1.ApplicationTrait{
|
||||
Type: "cross-scaler",
|
||||
Properties: runtime.RawExtension{Raw: []byte(`{"replicas": 1}`)},
|
||||
},
|
||||
}
|
||||
return k8sClient.Update(ctx, app)
|
||||
}, time.Second*30, time.Microsecond*300).Should(BeNil())
|
||||
|
||||
By("add a cross namespace trait, check resourceTracker and trait status")
|
||||
Eventually(func() error {
|
||||
app := new(v1beta1.Application)
|
||||
if err := k8sClient.Get(ctx, types.NamespacedName{Namespace: namespace, Name: appName}, app); err != nil {
|
||||
return fmt.Errorf("error to get application %v", err)
|
||||
}
|
||||
if app.Status.Phase != common.ApplicationRunning {
|
||||
return fmt.Errorf("application status not running")
|
||||
}
|
||||
err := k8sClient.Get(ctx, generateResourceTrackerKey(app.Namespace, app.Name), resourceTracker)
|
||||
if err != nil {
|
||||
return fmt.Errorf("resourceTracker not generated %v", err)
|
||||
}
|
||||
mts := new(v1alpha2.ManualScalerTraitList)
|
||||
opts := []client.ListOption{
|
||||
client.InNamespace(crossNamespace),
|
||||
client.MatchingLabels{
|
||||
oam.LabelAppName: appName,
|
||||
},
|
||||
}
|
||||
err = k8sClient.List(ctx, mts, opts...)
|
||||
if err != nil || len(mts.Items) != 1 {
|
||||
return fmt.Errorf("failed generate cross namespace trait")
|
||||
}
|
||||
trait := mts.Items[0]
|
||||
if len(trait.OwnerReferences) != 1 || trait.OwnerReferences[0].UID != resourceTracker.UID {
|
||||
return fmt.Errorf("trait owner reference missmatch")
|
||||
}
|
||||
return nil
|
||||
}, time.Second*60, time.Microsecond*300).Should(BeNil())
|
||||
})
|
||||
|
||||
It("Test update application by delete a cross namespace trait resource", func() {
|
||||
var (
|
||||
appName = "test-app-3"
|
||||
app = new(v1beta1.Application)
|
||||
componentName = "test-app-3-comp"
|
||||
)
|
||||
By("install component definition")
|
||||
normalCdJson, _ := yaml.YAMLToJSON([]byte(normalCompDefYaml))
|
||||
ncd := new(v1beta1.ComponentDefinition)
|
||||
Expect(json.Unmarshal(normalCdJson, ncd)).Should(BeNil())
|
||||
Expect(k8sClient.Create(ctx, ncd)).Should(SatisfyAny(BeNil(), &util.AlreadyExistMatcher{}))
|
||||
|
||||
crossTdJson, err := yaml.YAMLToJSON([]byte(crossNsTdYaml))
|
||||
Expect(err).Should(BeNil())
|
||||
ctd := new(v1beta1.TraitDefinition)
|
||||
Expect(json.Unmarshal(crossTdJson, ctd)).Should(BeNil())
|
||||
Expect(k8sClient.Create(ctx, ctd)).Should(SatisfyAny(BeNil(), &util.AlreadyExistMatcher{}))
|
||||
|
||||
app = &v1beta1.Application{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: appName,
|
||||
Namespace: namespace,
|
||||
},
|
||||
Spec: v1beta1.ApplicationSpec{
|
||||
Components: []v1beta1.ApplicationComponent{
|
||||
v1beta1.ApplicationComponent{
|
||||
Name: componentName,
|
||||
Type: "normal-worker",
|
||||
Properties: runtime.RawExtension{Raw: []byte(`{"cmd":["sleep","1000"],"image":"busybox"}`)},
|
||||
Traits: []v1beta1.ApplicationTrait{
|
||||
v1beta1.ApplicationTrait{
|
||||
Type: "cross-scaler",
|
||||
Properties: runtime.RawExtension{Raw: []byte(`{"replicas": 1}`)},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
Expect(k8sClient.Create(ctx, app)).Should(BeNil())
|
||||
time.Sleep(3 * time.Second) // give informer cache to sync
|
||||
resourceTracker := new(v1beta1.ResourceTracker)
|
||||
By("create application will create a cross ns trait, and resourceTracker. check those status")
|
||||
Eventually(func() error {
|
||||
app = new(v1beta1.Application)
|
||||
if err := k8sClient.Get(ctx, types.NamespacedName{Namespace: namespace, Name: appName}, app); err != nil {
|
||||
return fmt.Errorf("error to get application %v", err)
|
||||
}
|
||||
if app.Status.Phase != common.ApplicationRunning {
|
||||
return fmt.Errorf("application status not running")
|
||||
}
|
||||
err := k8sClient.Get(ctx, generateResourceTrackerKey(app.Namespace, app.Name), resourceTracker)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error to get resourceTracker %v", err)
|
||||
}
|
||||
mts := new(v1alpha2.ManualScalerTraitList)
|
||||
opts := []client.ListOption{
|
||||
client.InNamespace(crossNamespace),
|
||||
client.MatchingLabels{
|
||||
oam.LabelAppName: appName,
|
||||
},
|
||||
}
|
||||
err = k8sClient.List(ctx, mts, opts...)
|
||||
if err != nil || len(mts.Items) != 1 {
|
||||
return fmt.Errorf("failed generate cross namespace trait")
|
||||
}
|
||||
trait := mts.Items[0]
|
||||
if len(trait.OwnerReferences) != 1 || trait.OwnerReferences[0].UID != resourceTracker.UID {
|
||||
return fmt.Errorf("trait owner reference missmatch")
|
||||
}
|
||||
return nil
|
||||
}, time.Second*60, time.Microsecond*300).Should(BeNil())
|
||||
|
||||
By("update application trait by delete cross ns trait, will delete resourceTracker and related trait resource")
|
||||
Eventually(func() error {
|
||||
app = new(v1beta1.Application)
|
||||
Expect(k8sClient.Get(ctx, types.NamespacedName{Namespace: namespace, Name: appName}, app)).Should(BeNil())
|
||||
app.Spec.Components[0].Traits = []v1beta1.ApplicationTrait{}
|
||||
return k8sClient.Update(ctx, app)
|
||||
}, time.Second*30, time.Microsecond*300).Should(BeNil())
|
||||
fmt.Println(app.ResourceVersion)
|
||||
Eventually(func() error {
|
||||
app = new(v1beta1.Application)
|
||||
if err := k8sClient.Get(ctx, types.NamespacedName{Namespace: namespace, Name: appName}, app); err != nil {
|
||||
return fmt.Errorf("error to get application %v", err)
|
||||
}
|
||||
if app.Status.Phase != common.ApplicationRunning {
|
||||
return fmt.Errorf("application status not running")
|
||||
}
|
||||
err := k8sClient.Get(ctx, generateResourceTrackerKey(app.Namespace, app.Name), resourceTracker)
|
||||
if err == nil {
|
||||
return fmt.Errorf("resourceTracker still exist")
|
||||
}
|
||||
mts := new(v1alpha2.ManualScalerTraitList)
|
||||
opts := []client.ListOption{
|
||||
client.InNamespace(crossNamespace),
|
||||
client.MatchingLabels{
|
||||
oam.LabelAppName: appName,
|
||||
},
|
||||
}
|
||||
err = k8sClient.List(ctx, mts, opts...)
|
||||
if err != nil || len(mts.Items) != 0 {
|
||||
return fmt.Errorf("cross ns trait still exist")
|
||||
}
|
||||
if app.Status.ResourceTracker != nil {
|
||||
return fmt.Errorf("application status resourceTracker field still exist %s", string(util.JSONMarshal(app.Status.ResourceTracker)))
|
||||
}
|
||||
return nil
|
||||
}, time.Second*60, time.Microsecond*300).Should(BeNil())
|
||||
})
|
||||
|
||||
It("Test application have two different workload", func() {
|
||||
var (
|
||||
appName = "test-app-4"
|
||||
app = new(v1beta1.Application)
|
||||
component1Name = "test-app-4-comp-1"
|
||||
component2Name = "test-app-4-comp-2"
|
||||
)
|
||||
By("install component definition")
|
||||
normalCdJson, _ := yaml.YAMLToJSON([]byte(normalCompDefYaml))
|
||||
ncd := new(v1beta1.ComponentDefinition)
|
||||
Expect(json.Unmarshal(normalCdJson, ncd)).Should(BeNil())
|
||||
Expect(k8sClient.Create(ctx, ncd)).Should(SatisfyAny(BeNil(), &util.AlreadyExistMatcher{}))
|
||||
|
||||
crossCdJson, err := yaml.YAMLToJSON([]byte(crossCompDefYaml))
|
||||
Expect(err).Should(BeNil())
|
||||
ctd := new(v1beta1.ComponentDefinition)
|
||||
Expect(json.Unmarshal(crossCdJson, ctd)).Should(BeNil())
|
||||
Expect(k8sClient.Create(ctx, ctd)).Should(SatisfyAny(BeNil(), &util.AlreadyExistMatcher{}))
|
||||
|
||||
app = &v1beta1.Application{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: appName,
|
||||
Namespace: namespace,
|
||||
},
|
||||
Spec: v1beta1.ApplicationSpec{
|
||||
Components: []v1beta1.ApplicationComponent{
|
||||
v1beta1.ApplicationComponent{
|
||||
Name: component1Name,
|
||||
Type: "normal-worker",
|
||||
Properties: runtime.RawExtension{Raw: []byte(`{"cmd":["sleep","1000"],"image":"busybox"}`)},
|
||||
},
|
||||
v1beta1.ApplicationComponent{
|
||||
Name: component2Name,
|
||||
Type: "cross-worker",
|
||||
Properties: runtime.RawExtension{Raw: []byte(`{"cmd":["sleep","1000"],"image":"busybox"}`)},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
Expect(k8sClient.Create(ctx, app)).Should(BeNil())
|
||||
time.Sleep(3 * time.Second) // give informer cache to sync
|
||||
resourceTracker := new(v1beta1.ResourceTracker)
|
||||
|
||||
By("create application will generate two workload, and generate resourceTracker")
|
||||
Eventually(func() error {
|
||||
app = new(v1beta1.Application)
|
||||
if err := k8sClient.Get(ctx, types.NamespacedName{Namespace: namespace, Name: appName}, app); err != nil {
|
||||
return fmt.Errorf("error to get application %v", err)
|
||||
}
|
||||
if app.Status.Phase != common.ApplicationRunning {
|
||||
return fmt.Errorf("application status not running")
|
||||
}
|
||||
err := k8sClient.Get(ctx, generateResourceTrackerKey(app.Namespace, app.Name), resourceTracker)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error to generate resourceTracker %v", err)
|
||||
}
|
||||
sameOpts := []client.ListOption{
|
||||
client.InNamespace(namespace),
|
||||
client.MatchingLabels{
|
||||
oam.LabelAppName: appName,
|
||||
},
|
||||
}
|
||||
crossOpts := []client.ListOption{
|
||||
client.InNamespace(crossNamespace),
|
||||
client.MatchingLabels{
|
||||
oam.LabelAppName: appName,
|
||||
},
|
||||
}
|
||||
same, cross := new(appsv1.DeploymentList), new(appsv1.DeploymentList)
|
||||
err = k8sClient.List(ctx, same, sameOpts...)
|
||||
if err != nil || len(same.Items) != 1 {
|
||||
return fmt.Errorf("failed generate same namespace workload")
|
||||
}
|
||||
sameDeplpoy := same.Items[0]
|
||||
if len(sameDeplpoy.OwnerReferences) != 1 || sameDeplpoy.OwnerReferences[0].Kind != v1alpha2.ApplicationConfigurationKind {
|
||||
return fmt.Errorf("same ns deploy have error ownerReference")
|
||||
}
|
||||
err = k8sClient.List(ctx, cross, crossOpts...)
|
||||
if err != nil || len(cross.Items) != 1 {
|
||||
return fmt.Errorf("failed generate cross namespace trait")
|
||||
}
|
||||
crossDeplpoy := cross.Items[0]
|
||||
if len(sameDeplpoy.OwnerReferences) != 1 || crossDeplpoy.OwnerReferences[0].UID != resourceTracker.UID {
|
||||
return fmt.Errorf("same ns deploy have error ownerReference")
|
||||
}
|
||||
if app.Status.ResourceTracker == nil || app.Status.ResourceTracker.UID != resourceTracker.UID {
|
||||
return fmt.Errorf("app status resourceTracker error")
|
||||
}
|
||||
return nil
|
||||
}, time.Second*60, time.Microsecond*300).Should(BeNil())
|
||||
By("update application by delete cross namespace workload, resource tracker will be deleted, then check app status")
|
||||
Eventually(func() error {
|
||||
app = new(v1beta1.Application)
|
||||
Expect(k8sClient.Get(ctx, types.NamespacedName{Namespace: namespace, Name: appName}, app)).Should(BeNil())
|
||||
app.Spec.Components = app.Spec.Components[:1] // delete a component
|
||||
return k8sClient.Update(ctx, app)
|
||||
}, time.Second*30, time.Microsecond*300).Should(BeNil())
|
||||
Eventually(func() error {
|
||||
app = new(v1beta1.Application)
|
||||
if err := k8sClient.Get(ctx, types.NamespacedName{Namespace: namespace, Name: appName}, app); err != nil {
|
||||
return fmt.Errorf("error to get application %v", err)
|
||||
}
|
||||
if app.Status.Phase != common.ApplicationRunning {
|
||||
return fmt.Errorf("application status not running")
|
||||
}
|
||||
err := k8sClient.Get(ctx, generateResourceTrackerKey(app.Namespace, app.Name), resourceTracker)
|
||||
if err == nil {
|
||||
return fmt.Errorf("resourceTracker still exist")
|
||||
}
|
||||
sameOpts := []client.ListOption{
|
||||
client.InNamespace(namespace),
|
||||
client.MatchingLabels{
|
||||
oam.LabelAppName: appName,
|
||||
},
|
||||
}
|
||||
crossOpts := []client.ListOption{
|
||||
client.InNamespace(crossNamespace),
|
||||
client.MatchingLabels{
|
||||
oam.LabelAppName: appName,
|
||||
},
|
||||
}
|
||||
same, cross := new(appsv1.DeploymentList), new(appsv1.DeploymentList)
|
||||
err = k8sClient.List(ctx, same, sameOpts...)
|
||||
if err != nil || len(same.Items) != 1 {
|
||||
return fmt.Errorf("failed generate same namespace workload")
|
||||
}
|
||||
sameDeplpoy := same.Items[0]
|
||||
if len(sameDeplpoy.OwnerReferences) != 1 || sameDeplpoy.OwnerReferences[0].Kind != v1alpha2.ApplicationConfigurationKind {
|
||||
return fmt.Errorf("same ns deploy have error ownerReference")
|
||||
}
|
||||
err = k8sClient.List(ctx, cross, crossOpts...)
|
||||
if err != nil || len(cross.Items) != 0 {
|
||||
return fmt.Errorf("error : cross namespace workload still exist")
|
||||
}
|
||||
if app.Status.ResourceTracker != nil {
|
||||
return fmt.Errorf("errror app status resourceTracker")
|
||||
}
|
||||
return nil
|
||||
}, time.Second*60, time.Microsecond*300).Should(BeNil())
|
||||
})
|
||||
})
|
||||
|
||||
func generateResourceTrackerKey(namespace string, name string) types.NamespacedName {
|
||||
return types.NamespacedName{Name: fmt.Sprintf("%s-%s", namespace, name)}
|
||||
}
|
||||
|
||||
const (
|
||||
crossCompDefYaml = `
|
||||
apiVersion: core.oam.dev/v1beta1
|
||||
kind: ComponentDefinition
|
||||
metadata:
|
||||
name: cross-worker
|
||||
namespace: app-resource-tracker-test-ns
|
||||
annotations:
|
||||
definition.oam.dev/description: "Long-running scalable backend worker without network endpoint"
|
||||
spec:
|
||||
workload:
|
||||
definition:
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
extension:
|
||||
healthPolicy: |
|
||||
isHealth: context.output.status.readyReplicas == context.output.status.replicas
|
||||
template: |
|
||||
output: {
|
||||
apiVersion: "apps/v1"
|
||||
kind: "Deployment"
|
||||
metadata: {
|
||||
namespace: "cross-namespace"
|
||||
}
|
||||
spec: {
|
||||
replicas: 0
|
||||
template: {
|
||||
metadata: labels: {
|
||||
"app.oam.dev/component": context.name
|
||||
}
|
||||
|
||||
spec: {
|
||||
containers: [{
|
||||
name: context.name
|
||||
image: parameter.image
|
||||
|
||||
if parameter["cmd"] != _|_ {
|
||||
command: parameter.cmd
|
||||
}
|
||||
}]
|
||||
}
|
||||
}
|
||||
|
||||
selector:
|
||||
matchLabels:
|
||||
"app.oam.dev/component": context.name
|
||||
}
|
||||
}
|
||||
|
||||
parameter: {
|
||||
// +usage=Which image would you like to use for your service
|
||||
// +short=i
|
||||
image: string
|
||||
|
||||
cmd?: [...string]
|
||||
}
|
||||
`
|
||||
normalCompDefYaml = `
|
||||
apiVersion: core.oam.dev/v1beta1
|
||||
kind: ComponentDefinition
|
||||
metadata:
|
||||
name: normal-worker
|
||||
namespace: app-resource-tracker-test-ns
|
||||
annotations:
|
||||
definition.oam.dev/description: "Long-running scalable backend worker without network endpoint"
|
||||
spec:
|
||||
workload:
|
||||
definition:
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
extension:
|
||||
healthPolicy: |
|
||||
isHealth: context.output.status.readyReplicas == context.output.status.replicas
|
||||
template: |
|
||||
output: {
|
||||
apiVersion: "apps/v1"
|
||||
kind: "Deployment"
|
||||
spec: {
|
||||
replicas: 0
|
||||
template: {
|
||||
metadata: labels: {
|
||||
"app.oam.dev/component": context.name
|
||||
}
|
||||
|
||||
spec: {
|
||||
containers: [{
|
||||
name: context.name
|
||||
image: parameter.image
|
||||
|
||||
if parameter["cmd"] != _|_ {
|
||||
command: parameter.cmd
|
||||
}
|
||||
}]
|
||||
}
|
||||
}
|
||||
|
||||
selector:
|
||||
matchLabels:
|
||||
"app.oam.dev/component": context.name
|
||||
}
|
||||
}
|
||||
|
||||
parameter: {
|
||||
// +usage=Which image would you like to use for your service
|
||||
// +short=i
|
||||
image: string
|
||||
|
||||
cmd?: [...string]
|
||||
}
|
||||
`
|
||||
crossNsTdYaml = `
|
||||
apiVersion: core.oam.dev/v1beta1
|
||||
kind: TraitDefinition
|
||||
metadata:
|
||||
annotations:
|
||||
definition.oam.dev/description: "Manually scale the app"
|
||||
name: cross-scaler
|
||||
namespace: app-resource-tracker-test-ns
|
||||
spec:
|
||||
appliesToWorkloads:
|
||||
- webservice
|
||||
- worker
|
||||
definitionRef:
|
||||
name: manualscalertraits.core.oam.dev
|
||||
workloadRefPath: spec.workloadRef
|
||||
extension:
|
||||
template: |-
|
||||
outputs: scaler: {
|
||||
apiVersion: "core.oam.dev/v1alpha2"
|
||||
kind: "ManualScalerTrait"
|
||||
metadata: {
|
||||
namespace: "cross-namespace"
|
||||
}
|
||||
spec: {
|
||||
replicaCount: parameter.replicas
|
||||
}
|
||||
}
|
||||
parameter: {
|
||||
//+short=r
|
||||
replicas: *1 | int
|
||||
}
|
||||
`
|
||||
)
|
||||
Reference in New Issue
Block a user