Feat: remove envbinding policy into workflow (#2556)

Fix: add more test
This commit is contained in:
Somefive
2021-11-05 17:29:05 +08:00
committed by GitHub
parent 50cfe0c68f
commit bf3a1cddf9
43 changed files with 1701 additions and 3492 deletions

View File

@@ -1,3 +1,5 @@
SHELL := /bin/bash
# Vela version
VELA_VERSION ?= master
# Repo info

View File

@@ -306,6 +306,17 @@ type AppStatus struct {
// AppliedResources record the resources that the workflow step apply.
AppliedResources []ClusterObjectReference `json:"appliedResources,omitempty"`
// PolicyStatus records the status of policy
PolicyStatus []PolicyStatus `json:"policy,omitempty"`
}
// PolicyStatus records the status of policy
type PolicyStatus struct {
Name string `json:"name"`
Type string `json:"type"`
// +kubebuilder:pruning:PreserveUnknownFields
Status *runtime.RawExtension `json:"status,omitempty"`
}
// WorkflowStatus record the status of workflow

View File

@@ -83,6 +83,13 @@ func (in *AppStatus) DeepCopyInto(out *AppStatus) {
*out = make([]ClusterObjectReference, len(*in))
copy(*out, *in)
}
if in.PolicyStatus != nil {
in, out := &in.PolicyStatus, &out.PolicyStatus
*out = make([]PolicyStatus, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AppStatus.
@@ -401,6 +408,26 @@ func (in *KubeParameter) DeepCopy() *KubeParameter {
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *PolicyStatus) DeepCopyInto(out *PolicyStatus) {
*out = *in
if in.Status != nil {
in, out := &in.Status, &out.Status
*out = new(runtime.RawExtension)
(*in).DeepCopyInto(*out)
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PolicyStatus.
func (in *PolicyStatus) DeepCopy() *PolicyStatus {
if in == nil {
return nil
}
out := new(PolicyStatus)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *RawComponent) DeepCopyInto(out *RawComponent) {
*out = *in

View File

@@ -17,43 +17,12 @@
package v1alpha1
import (
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"github.com/oam-dev/kubevela/apis/core.oam.dev/common"
"github.com/oam-dev/kubevela/apis/core.oam.dev/condition"
)
// ClusterManagementEngine represents a multi-cluster management solution
type ClusterManagementEngine string
const (
// OCMEngine represents Open-Cluster-Management multi-cluster management solution
OCMEngine ClusterManagementEngine = "ocm"
// SingleClusterEngine represents single cluster ClusterManagerEngine
SingleClusterEngine ClusterManagementEngine = "single-cluster"
// ClusterGatewayEngine represents multi-cluster management solution with cluster-gateway
ClusterGatewayEngine ClusterManagementEngine = "cluster-gateway"
)
// EnvBindingPhase is a label for the condition of a EnvBinding at the current time
type EnvBindingPhase string
const (
// EnvBindingPrepare means EnvBinding is preparing the pre-work for cluster scheduling
EnvBindingPrepare EnvBindingPhase = "preparing"
// EnvBindingRendering means EnvBinding is rendering the apps in different envs
EnvBindingRendering EnvBindingPhase = "rendering"
// EnvBindingScheduling means EnvBinding is deciding which cluster the apps is scheduled to.
EnvBindingScheduling EnvBindingPhase = "scheduling"
// EnvBindingFinished means EnvBinding finished env binding
EnvBindingFinished EnvBindingPhase = "finished"
// EnvBindingPolicyType refers to the type of EnvBinding
EnvBindingPolicyType = "env-binding"
)
// EnvPatch specify the parameter configuration for different environments
@@ -89,87 +58,24 @@ type EnvConfig struct {
Patch EnvPatch `json:"patch"`
}
// AppTemplate represents a application to be configured.
type AppTemplate struct {
// +kubebuilder:validation:EmbeddedResource
// +kubebuilder:pruning:PreserveUnknownFields
runtime.RawExtension `json:",inline"`
}
// ClusterDecision recorded the mapping of environment and cluster
type ClusterDecision struct {
Env string `json:"env"`
Cluster string `json:"cluster,omitempty"`
Namespace string `json:"namespace,omitempty"`
}
// A ConfigMapReference is a reference to a configMap in an arbitrary namespace.
type ConfigMapReference struct {
// Name of the secret.
Name string `json:"name"`
// Namespace of the secret.
Namespace string `json:"namespace,omitempty"`
}
// A EnvBindingSpec defines the desired state of a EnvBinding.
// EnvBindingSpec defines a list of envs
type EnvBindingSpec struct {
Engine ClusterManagementEngine `json:"engine,omitempty"`
// AppTemplate indicates the application template.
AppTemplate AppTemplate `json:"appTemplate"`
Envs []EnvConfig `json:"envs"`
// OutputResourcesTo specifies the namespace and name of a ConfigMap
// which store the resources rendered after differentiated configuration
// +optional
OutputResourcesTo *ConfigMapReference `json:"outputResourcesTo,omitempty"`
}
// A EnvBindingStatus is the status of EnvBinding
// PlacementDecision describes the placement of one application instance
type PlacementDecision struct {
Cluster string `json:"cluster"`
Namespace string `json:"namespace"`
}
// EnvStatus records the status of one env
type EnvStatus struct {
Env string `json:"env"`
Placements []PlacementDecision `json:"placements"`
}
// EnvBindingStatus records the status of all env
type EnvBindingStatus struct {
// ConditionedStatus reflects the observed status of a resource
condition.ConditionedStatus `json:",inline"`
Phase EnvBindingPhase `json:"phase,omitempty"`
ClusterDecisions []ClusterDecision `json:"clusterDecisions,omitempty"`
// ResourceTracker record the status of the ResourceTracker
ResourceTracker *corev1.ObjectReference `json:"resourceTracker,omitempty"`
}
// EnvBinding is the Schema for the EnvBinding API
// +kubebuilder:object:root=true
// +kubebuilder:subresource:status
// +kubebuilder:resource:scope=Namespaced,categories={oam},shortName=envbind
// +kubebuilder:printcolumn:name="ENGINE",type=string,JSONPath=`.spec.engine`
// +kubebuilder:printcolumn:name="PHASE",type=string,JSONPath=`.status.phase`
// +kubebuilder:printcolumn:name="AGE",type=date,JSONPath=".metadata.creationTimestamp"
type EnvBinding struct {
metav1.TypeMeta `json:",inline"`
metav1.ObjectMeta `json:"metadata,omitempty"`
Spec EnvBindingSpec `json:"spec,omitempty"`
Status EnvBindingStatus `json:"status,omitempty"`
}
// +kubebuilder:object:root=true
// EnvBindingList contains a list of EnvBinding.
type EnvBindingList struct {
metav1.TypeMeta `json:",inline"`
metav1.ListMeta `json:"metadata,omitempty"`
Items []EnvBinding `json:"items"`
}
// SetConditions set condition for EnvBinding
func (e *EnvBinding) SetConditions(c ...condition.Condition) {
e.Status.SetConditions(c...)
}
// GetCondition gets condition from EnvBinding
func (e *EnvBinding) GetCondition(conditionType condition.ConditionType) condition.Condition {
return e.Status.GetCondition(conditionType)
Envs []EnvStatus `json:"envs"`
}

View File

@@ -17,8 +17,6 @@
package v1alpha1
import (
"reflect"
"k8s.io/apimachinery/pkg/runtime/schema"
"sigs.k8s.io/controller-runtime/pkg/scheme"
)
@@ -37,14 +35,5 @@ var (
SchemeBuilder = &scheme.Builder{GroupVersion: SchemeGroupVersion}
)
// EnvBinding type metadata.
var (
EnvBindingKind = reflect.TypeOf(EnvBinding{}).Name()
EnvBindingGroupKind = schema.GroupKind{Group: Group, Kind: EnvBindingKind}.String()
EnvBindingKindAPIVersion = EnvBindingKind + "." + SchemeGroupVersion.String()
EnvBindingKindVersionKind = SchemeGroupVersion.WithKind(EnvBindingKind)
)
func init() {
SchemeBuilder.Register(&EnvBinding{}, &EnvBindingList{})
}

View File

@@ -21,121 +21,12 @@ limitations under the License.
package v1alpha1
import (
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/runtime"
"github.com/oam-dev/kubevela/apis/core.oam.dev/common"
)
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *AppTemplate) DeepCopyInto(out *AppTemplate) {
*out = *in
in.RawExtension.DeepCopyInto(&out.RawExtension)
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AppTemplate.
func (in *AppTemplate) DeepCopy() *AppTemplate {
if in == nil {
return nil
}
out := new(AppTemplate)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ClusterDecision) DeepCopyInto(out *ClusterDecision) {
*out = *in
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterDecision.
func (in *ClusterDecision) DeepCopy() *ClusterDecision {
if in == nil {
return nil
}
out := new(ClusterDecision)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ConfigMapReference) DeepCopyInto(out *ConfigMapReference) {
*out = *in
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConfigMapReference.
func (in *ConfigMapReference) DeepCopy() *ConfigMapReference {
if in == nil {
return nil
}
out := new(ConfigMapReference)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *EnvBinding) DeepCopyInto(out *EnvBinding) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
in.Spec.DeepCopyInto(&out.Spec)
in.Status.DeepCopyInto(&out.Status)
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EnvBinding.
func (in *EnvBinding) DeepCopy() *EnvBinding {
if in == nil {
return nil
}
out := new(EnvBinding)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *EnvBinding) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *EnvBindingList) DeepCopyInto(out *EnvBindingList) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ListMeta.DeepCopyInto(&out.ListMeta)
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]EnvBinding, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EnvBindingList.
func (in *EnvBindingList) DeepCopy() *EnvBindingList {
if in == nil {
return nil
}
out := new(EnvBindingList)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *EnvBindingList) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *EnvBindingSpec) DeepCopyInto(out *EnvBindingSpec) {
*out = *in
in.AppTemplate.DeepCopyInto(&out.AppTemplate)
if in.Envs != nil {
in, out := &in.Envs, &out.Envs
*out = make([]EnvConfig, len(*in))
@@ -143,11 +34,6 @@ func (in *EnvBindingSpec) DeepCopyInto(out *EnvBindingSpec) {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
if in.OutputResourcesTo != nil {
in, out := &in.OutputResourcesTo, &out.OutputResourcesTo
*out = new(ConfigMapReference)
**out = **in
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EnvBindingSpec.
@@ -163,16 +49,12 @@ func (in *EnvBindingSpec) DeepCopy() *EnvBindingSpec {
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *EnvBindingStatus) DeepCopyInto(out *EnvBindingStatus) {
*out = *in
in.ConditionedStatus.DeepCopyInto(&out.ConditionedStatus)
if in.ClusterDecisions != nil {
in, out := &in.ClusterDecisions, &out.ClusterDecisions
*out = make([]ClusterDecision, len(*in))
copy(*out, *in)
}
if in.ResourceTracker != nil {
in, out := &in.ResourceTracker, &out.ResourceTracker
*out = new(v1.ObjectReference)
**out = **in
if in.Envs != nil {
in, out := &in.Envs, &out.Envs
*out = make([]EnvStatus, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
}
@@ -275,6 +157,26 @@ func (in *EnvSelector) DeepCopy() *EnvSelector {
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *EnvStatus) DeepCopyInto(out *EnvStatus) {
*out = *in
if in.Placements != nil {
in, out := &in.Placements, &out.Placements
*out = make([]PlacementDecision, len(*in))
copy(*out, *in)
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EnvStatus.
func (in *EnvStatus) DeepCopy() *EnvStatus {
if in == nil {
return nil
}
out := new(EnvStatus)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *NamespaceSelector) DeepCopyInto(out *NamespaceSelector) {
*out = *in
@@ -296,3 +198,18 @@ func (in *NamespaceSelector) DeepCopy() *NamespaceSelector {
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *PlacementDecision) DeepCopyInto(out *PlacementDecision) {
*out = *in
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PlacementDecision.
func (in *PlacementDecision) DeepCopy() *PlacementDecision {
if in == nil {
return nil
}
out := new(PlacementDecision)
in.DeepCopyInto(out)
return out
}

View File

@@ -622,6 +622,23 @@ spec:
description: The generation observed by the application controller.
format: int64
type: integer
policy:
description: PolicyStatus records the status of policy
items:
description: PolicyStatus records the status of policy
properties:
name:
type: string
status:
type: object
x-kubernetes-preserve-unknown-fields: true
type:
type: string
required:
- name
- type
type: object
type: array
resourceTracker:
description: ResourceTracker record the status of the ResourceTracker
properties:
@@ -2778,6 +2795,23 @@ spec:
description: The generation observed by the application controller.
format: int64
type: integer
policy:
description: PolicyStatus records the status of policy
items:
description: PolicyStatus records the status of policy
properties:
name:
type: string
status:
type: object
x-kubernetes-preserve-unknown-fields: true
type:
type: string
required:
- name
- type
type: object
type: array
resourceTracker:
description: ResourceTracker record the status of the ResourceTracker
properties:

View File

@@ -443,6 +443,23 @@ spec:
description: The generation observed by the application controller.
format: int64
type: integer
policy:
description: PolicyStatus records the status of policy
items:
description: PolicyStatus records the status of policy
properties:
name:
type: string
status:
type: object
x-kubernetes-preserve-unknown-fields: true
type:
type: string
required:
- name
- type
type: object
type: array
resourceTracker:
description: ResourceTracker record the status of the ResourceTracker
properties:
@@ -1250,6 +1267,23 @@ spec:
description: The generation observed by the application controller.
format: int64
type: integer
policy:
description: PolicyStatus records the status of policy
items:
description: PolicyStatus records the status of policy
properties:
name:
type: string
status:
type: object
x-kubernetes-preserve-unknown-fields: true
type:
type: string
required:
- name
- type
type: object
type: array
resourceTracker:
description: ResourceTracker record the status of the ResourceTracker
properties:

View File

@@ -622,6 +622,23 @@ spec:
description: The generation observed by the application controller.
format: int64
type: integer
policy:
description: PolicyStatus records the status of policy
items:
description: PolicyStatus records the status of policy
properties:
name:
type: string
status:
type: object
x-kubernetes-preserve-unknown-fields: true
type:
type: string
required:
- name
- type
type: object
type: array
resourceTracker:
description: ResourceTracker record the status of the ResourceTracker
properties:
@@ -2778,6 +2795,23 @@ spec:
description: The generation observed by the application controller.
format: int64
type: integer
policy:
description: PolicyStatus records the status of policy
items:
description: PolicyStatus records the status of policy
properties:
name:
type: string
status:
type: object
x-kubernetes-preserve-unknown-fields: true
type:
type: string
required:
- name
- type
type: object
type: array
resourceTracker:
description: ResourceTracker record the status of the ResourceTracker
properties:

View File

@@ -443,6 +443,23 @@ spec:
description: The generation observed by the application controller.
format: int64
type: integer
policy:
description: PolicyStatus records the status of policy
items:
description: PolicyStatus records the status of policy
properties:
name:
type: string
status:
type: object
x-kubernetes-preserve-unknown-fields: true
type:
type: string
required:
- name
- type
type: object
type: array
resourceTracker:
description: ResourceTracker record the status of the ResourceTracker
properties:
@@ -1250,6 +1267,23 @@ spec:
description: The generation observed by the application controller.
format: int64
type: integer
policy:
description: PolicyStatus records the status of policy
items:
description: PolicyStatus records the status of policy
properties:
name:
type: string
status:
type: object
x-kubernetes-preserve-unknown-fields: true
type:
type: string
required:
- name
- type
type: object
type: array
resourceTracker:
description: ResourceTracker record the status of the ResourceTracker
properties:

View File

@@ -622,6 +622,23 @@ spec:
description: The generation observed by the application controller.
format: int64
type: integer
policy:
description: PolicyStatus records the status of policy
items:
description: PolicyStatus records the status of policy
properties:
name:
type: string
status:
type: object
type:
type: string
required:
- name
- type
type: object
type: array
resourceTracker:
description: ResourceTracker record the status of the ResourceTracker
properties:
@@ -2778,6 +2795,23 @@ spec:
description: The generation observed by the application controller.
format: int64
type: integer
policy:
description: PolicyStatus records the status of policy
items:
description: PolicyStatus records the status of policy
properties:
name:
type: string
status:
type: object
type:
type: string
required:
- name
- type
type: object
type: array
resourceTracker:
description: ResourceTracker record the status of the ResourceTracker
properties:

View File

@@ -564,6 +564,23 @@ spec:
description: The generation observed by the application controller.
format: int64
type: integer
policy:
description: PolicyStatus records the status of policy
items:
description: PolicyStatus records the status of policy
properties:
name:
type: string
status:
type: object
type:
type: string
required:
- name
- type
type: object
type: array
resourceTracker:
description: ResourceTracker record the status of the ResourceTracker
properties:
@@ -1638,6 +1655,23 @@ spec:
description: The generation observed by the application controller.
format: int64
type: integer
policy:
description: PolicyStatus records the status of policy
items:
description: PolicyStatus records the status of policy
properties:
name:
type: string
status:
type: object
type:
type: string
required:
- name
- type
type: object
type: array
resourceTracker:
description: ResourceTracker record the status of the ResourceTracker
properties:

View File

@@ -37,6 +37,7 @@ import (
"sigs.k8s.io/controller-runtime/pkg/client"
"github.com/oam-dev/kubevela/apis/core.oam.dev/common"
"github.com/oam-dev/kubevela/apis/core.oam.dev/v1alpha1"
"github.com/oam-dev/kubevela/apis/core.oam.dev/v1beta1"
"github.com/oam-dev/kubevela/apis/types"
"github.com/oam-dev/kubevela/pkg/appfile/helm"
@@ -180,6 +181,9 @@ func (af *Appfile) PrepareWorkflowAndPolicy() (policies []*unstructured.Unstruct
func (af *Appfile) generateUnstructureds(workloads []*Workload) ([]*unstructured.Unstructured, error) {
var uns []*unstructured.Unstructured
for _, wl := range workloads {
if wl.Type == v1alpha1.EnvBindingPolicyType {
continue
}
un, err := generateUnstructuredFromCUEModule(wl, af.Name, af.AppRevisionName, af.Namespace, af.Components, af.Artifacts)
if err != nil {
return nil, err

View File

@@ -1,327 +0,0 @@
/*
Copyright 2021. The KubeVela Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package envbinding
import (
"context"
"encoding/json"
"github.com/imdario/mergo"
"github.com/pkg/errors"
corev1 "k8s.io/api/core/v1"
kerrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/utils/pointer"
"sigs.k8s.io/controller-runtime/pkg/client"
"github.com/oam-dev/kubevela/apis/core.oam.dev/common"
"github.com/oam-dev/kubevela/apis/core.oam.dev/v1alpha1"
"github.com/oam-dev/kubevela/apis/core.oam.dev/v1beta1"
"github.com/oam-dev/kubevela/apis/types"
"github.com/oam-dev/kubevela/pkg/appfile"
"github.com/oam-dev/kubevela/pkg/oam"
"github.com/oam-dev/kubevela/pkg/oam/util"
)
// EnvBindApp describes the app bound to the environment
type EnvBindApp struct {
baseApp *v1beta1.Application
PatchedApp *v1beta1.Application
envConfig *v1alpha1.EnvConfig
componentManifests []*types.ComponentManifest
assembledManifests map[string][]*unstructured.Unstructured
ScheduledManifests map[string]*unstructured.Unstructured
}
// NewEnvBindApp create EnvBindApp
func NewEnvBindApp(base *v1beta1.Application, envConfig *v1alpha1.EnvConfig) *EnvBindApp {
return &EnvBindApp{
baseApp: base,
envConfig: envConfig,
}
}
// GenerateConfiguredApplication patch component parameters to base Application
func (e *EnvBindApp) GenerateConfiguredApplication() error {
newApp := e.baseApp.DeepCopy()
var baseComponent *common.ApplicationComponent
var misMatchedIdxs []int
for patchIdx := range e.envConfig.Patch.Components {
var matchedIdx int
isMatched := false
patchComponent := e.envConfig.Patch.Components[patchIdx]
for baseIdx := range e.baseApp.Spec.Components {
component := e.baseApp.Spec.Components[baseIdx]
if patchComponent.Name == component.Name && patchComponent.Type == component.Type {
matchedIdx, baseComponent = baseIdx, &component
isMatched = true
break
}
}
if !isMatched || baseComponent == nil {
misMatchedIdxs = append(misMatchedIdxs, patchIdx)
continue
}
targetComponent, err := PatchComponent(baseComponent, &patchComponent)
if err != nil {
return err
}
newApp.Spec.Components[matchedIdx] = *targetComponent
}
for _, idx := range misMatchedIdxs {
newApp.Spec.Components = append(newApp.Spec.Components, e.envConfig.Patch.Components[idx])
}
// select which components to use
if e.envConfig.Selector != nil {
compMap := make(map[string]bool)
if len(e.envConfig.Selector.Components) > 0 {
for _, comp := range e.envConfig.Selector.Components {
compMap[comp] = true
}
}
comps := make([]common.ApplicationComponent, 0)
for _, comp := range newApp.Spec.Components {
if _, ok := compMap[comp.Name]; ok {
comps = append(comps, comp)
}
}
newApp.Spec.Components = comps
}
e.PatchedApp = newApp
return nil
}
func (e *EnvBindApp) render(ctx context.Context, appParser *appfile.Parser) error {
if e.PatchedApp == nil {
return errors.New("EnvBindApp must has been generated a configured Application")
}
ctx = util.SetNamespaceInCtx(ctx, e.PatchedApp.Namespace)
appFile, err := appParser.GenerateAppFile(ctx, e.PatchedApp)
if err != nil {
return err
}
comps, err := appFile.GenerateComponentManifests()
if err != nil {
return err
}
e.componentManifests = comps
return nil
}
func (e *EnvBindApp) assemble() error {
if e.componentManifests == nil {
return errors.New("EnvBindApp must has been rendered")
}
assembledManifests := make(map[string][]*unstructured.Unstructured, len(e.componentManifests))
for _, comp := range e.componentManifests {
resources := make([]*unstructured.Unstructured, len(comp.Traits)+1)
workload := comp.StandardWorkload
workload.SetName(comp.Name)
e.SetNamespace(workload)
util.AddLabels(workload, map[string]string{oam.LabelOAMResourceType: oam.ResourceTypeWorkload})
resources[0] = workload
for i := 0; i < len(comp.Traits); i++ {
trait := comp.Traits[i]
util.AddLabels(trait, map[string]string{oam.LabelOAMResourceType: oam.ResourceTypeTrait})
e.SetTraitName(comp.Name, trait)
e.SetNamespace(trait)
resources[i+1] = trait
}
assembledManifests[comp.Name] = resources
if len(comp.PackagedWorkloadResources) != 0 {
assembledManifests[comp.Name] = append(assembledManifests[comp.Name], comp.PackagedWorkloadResources...)
}
}
e.assembledManifests = assembledManifests
return nil
}
// SetTraitName set name for Trait
func (e *EnvBindApp) SetTraitName(compName string, trait *unstructured.Unstructured) {
if len(trait.GetName()) == 0 {
traitType := trait.GetLabels()[oam.TraitTypeLabel]
traitName := util.GenTraitNameCompatible(compName, trait, traitType)
trait.SetName(traitName)
}
}
// SetNamespace set namespace for *unstructured.Unstructured
func (e *EnvBindApp) SetNamespace(resource *unstructured.Unstructured) {
if len(resource.GetNamespace()) != 0 {
return
}
appNs := e.PatchedApp.Namespace
if len(appNs) == 0 {
appNs = "default"
}
resource.SetNamespace(appNs)
}
// CreateEnvBindApps create EnvBindApps from different envs
func CreateEnvBindApps(envBinding *v1alpha1.EnvBinding, baseApp *v1beta1.Application) ([]*EnvBindApp, error) {
envBindApps := make([]*EnvBindApp, len(envBinding.Spec.Envs))
for i := range envBinding.Spec.Envs {
env := envBinding.Spec.Envs[i]
envBindApp := NewEnvBindApp(baseApp, &env)
err := envBindApp.GenerateConfiguredApplication()
if err != nil {
return nil, errors.WithMessagef(err, "failed to patch parameter for env %s", env.Name)
}
envBindApps[i] = envBindApp
}
return envBindApps, nil
}
// RenderEnvBindApps render EnvBindApps
func RenderEnvBindApps(ctx context.Context, envBindApps []*EnvBindApp, appParser *appfile.Parser) error {
for _, envBindApp := range envBindApps {
err := envBindApp.render(ctx, appParser)
if err != nil {
return errors.WithMessagef(err, "fail to render application for env %s", envBindApp.envConfig.Name)
}
}
return nil
}
// AssembleEnvBindApps assemble resource for EnvBindApp
func AssembleEnvBindApps(envBindApps []*EnvBindApp) error {
for _, envBindApp := range envBindApps {
err := envBindApp.assemble()
if err != nil {
return errors.WithMessagef(err, "fail to assemble resource for application in env %s", envBindApp.envConfig.Name)
}
}
return nil
}
// PatchComponent patch component parameter to target component parameter
func PatchComponent(baseComponent *common.ApplicationComponent, patchComponent *common.ApplicationComponent) (*common.ApplicationComponent, error) {
targetComponent := baseComponent.DeepCopy()
mergedProperties, err := PatchProperties(baseComponent.Properties, patchComponent.Properties)
if err != nil {
return nil, errors.WithMessagef(err, "fail to patch properties for component %s", baseComponent.Name)
}
targetComponent.Properties = util.Object2RawExtension(mergedProperties)
var baseTrait *common.ApplicationTrait
var misMatchedIdxs []int
for patchIdx := range patchComponent.Traits {
var matchedIdx int
isMatched := false
patchTrait := patchComponent.Traits[patchIdx]
for index := range targetComponent.Traits {
trait := targetComponent.Traits[index]
if patchTrait.Type == trait.Type {
matchedIdx, baseTrait = index, &trait
isMatched = true
break
}
}
if !isMatched || baseTrait == nil {
misMatchedIdxs = append(misMatchedIdxs, patchIdx)
continue
}
mergedProperties, err = PatchProperties(baseTrait.Properties, patchTrait.Properties)
if err != nil {
return nil, err
}
targetComponent.Traits[matchedIdx].Properties = util.Object2RawExtension(mergedProperties)
}
for _, idx := range misMatchedIdxs {
targetComponent.Traits = append(targetComponent.Traits, patchComponent.Traits[idx])
}
return targetComponent, nil
}
// PatchProperties merge patch parameter for dst parameter
func PatchProperties(dst *runtime.RawExtension, patch *runtime.RawExtension) (map[string]interface{}, error) {
patchParameter, err := util.RawExtension2Map(patch)
if err != nil {
return nil, err
}
baseParameter, err := util.RawExtension2Map(dst)
if err != nil {
return nil, err
}
if baseParameter == nil {
baseParameter = make(map[string]interface{})
}
opts := []func(*mergo.Config){
// WithOverride will make merge override non-empty dst attributes with non-empty src attributes values.
mergo.WithOverride,
}
err = mergo.Merge(&baseParameter, patchParameter, opts...)
if err != nil {
return nil, err
}
return baseParameter, nil
}
// StoreManifest2ConfigMap store manifest to configmap
func StoreManifest2ConfigMap(ctx context.Context, cli client.Client, envBinding *v1alpha1.EnvBinding, apps []*EnvBindApp) error {
cm := new(corev1.ConfigMap)
data := make(map[string]string)
for _, app := range apps {
m := make(map[string]map[string]interface{})
for name, manifest := range app.ScheduledManifests {
m[name] = manifest.UnstructuredContent()
}
d, err := json.Marshal(m)
if err != nil {
return errors.Wrapf(err, "fail to marshal patched application for env %s", app.envConfig.Name)
}
data[app.envConfig.Name] = string(d)
}
cm.Data = data
cm.SetName(envBinding.Spec.OutputResourcesTo.Name)
if len(envBinding.Spec.OutputResourcesTo.Namespace) == 0 {
cm.SetNamespace("default")
} else {
cm.SetNamespace(envBinding.Spec.OutputResourcesTo.Namespace)
}
ownerReference := []metav1.OwnerReference{{
APIVersion: envBinding.APIVersion,
Kind: envBinding.Kind,
Name: envBinding.Name,
UID: envBinding.GetUID(),
Controller: pointer.BoolPtr(true),
BlockOwnerDeletion: pointer.BoolPtr(true),
}}
cm.SetOwnerReferences(ownerReference)
cmCopy := cm.DeepCopy()
if err := cli.Get(ctx, client.ObjectKey{Namespace: cmCopy.Namespace, Name: cmCopy.Name}, cmCopy); err != nil {
if kerrors.IsNotFound(err) {
return cli.Create(ctx, cm)
}
return err
}
return cli.Update(ctx, cm)
}

View File

@@ -1,131 +0,0 @@
/*
Copyright 2021. The KubeVela Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package envbinding
import (
"context"
"github.com/pkg/errors"
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/types"
"sigs.k8s.io/controller-runtime/pkg/client"
"github.com/oam-dev/kubevela/apis/core.oam.dev/v1alpha1"
"github.com/oam-dev/kubevela/apis/core.oam.dev/v1beta1"
"github.com/oam-dev/kubevela/pkg/appfile"
"github.com/oam-dev/kubevela/pkg/multicluster"
)
const (
// OverrideNamespaceLabelKey identifies the override namespace for patched Application
OverrideNamespaceLabelKey = "envbinding.oam.dev/override-namespace"
)
// ClusterGatewayEngine construct the multicluster engine of using cluster-gateway
type ClusterGatewayEngine struct {
client.Client
envBindingName string
clusterDecisions map[string]v1alpha1.ClusterDecision
}
// NewClusterGatewayEngine create multicluster engine to use cluster-gateway
func NewClusterGatewayEngine(cli client.Client, envBindingName string) ClusterManagerEngine {
return &ClusterGatewayEngine{
Client: cli,
envBindingName: envBindingName,
}
}
// TODO only support single cluster name and namespace name now, should support label selector
func (engine *ClusterGatewayEngine) prepare(ctx context.Context, configs []v1alpha1.EnvConfig) error {
engine.clusterDecisions = make(map[string]v1alpha1.ClusterDecision)
locationToConfig := make(map[string]string)
for _, config := range configs {
var namespace, clusterName string
// check if namespace selector is valid
if config.Placement.NamespaceSelector != nil {
if len(config.Placement.NamespaceSelector.Labels) != 0 {
return errors.Errorf("invalid env %s: namespace selector in cluster-gateway does not support label selector for now", config.Name)
}
namespace = config.Placement.NamespaceSelector.Name
}
// check if cluster selector is valid
if config.Placement.ClusterSelector != nil {
if len(config.Placement.ClusterSelector.Labels) != 0 {
return errors.Errorf("invalid env %s: cluster selector does not support label selector for now", config.Name)
}
clusterName = config.Placement.ClusterSelector.Name
}
// set fallback cluster
if clusterName == "" {
clusterName = multicluster.ClusterLocalName
}
// check if current environment uses the same cluster and namespace as resource destination with other environment, if yes, a conflict occurs
location := clusterName + "/" + namespace
if dupConfigName, ok := locationToConfig[location]; ok {
return errors.Errorf("invalid env %s: location %s conflict with env %s", config.Name, location, dupConfigName)
}
locationToConfig[clusterName] = config.Name
// check if target cluster exists
if clusterName != multicluster.ClusterLocalName {
if err := engine.Get(ctx, types.NamespacedName{Namespace: multicluster.ClusterGatewaySecretNamespace, Name: clusterName}, &v1.Secret{}); err != nil {
return errors.Wrapf(err, "failed to get cluster %s for env %s", clusterName, config.Name)
}
}
engine.clusterDecisions[config.Name] = v1alpha1.ClusterDecision{Env: config.Name, Cluster: clusterName, Namespace: namespace}
}
return nil
}
func (engine *ClusterGatewayEngine) initEnvBindApps(ctx context.Context, envBinding *v1alpha1.EnvBinding, baseApp *v1beta1.Application, appParser *appfile.Parser) ([]*EnvBindApp, error) {
return CreateEnvBindApps(envBinding, baseApp)
}
func (engine *ClusterGatewayEngine) schedule(ctx context.Context, apps []*EnvBindApp) ([]v1alpha1.ClusterDecision, error) {
for _, app := range apps {
app.ScheduledManifests = make(map[string]*unstructured.Unstructured)
clusterName := engine.clusterDecisions[app.envConfig.Name].Cluster
namespace := engine.clusterDecisions[app.envConfig.Name].Namespace
raw, err := runtime.DefaultUnstructuredConverter.ToUnstructured(app.PatchedApp)
if err != nil {
return nil, errors.Wrapf(err, "failed to convert app [Env: %s](%s/%s) into unstructured", app.envConfig.Name, app.PatchedApp.Namespace, app.PatchedApp.Name)
}
patchedApp := &unstructured.Unstructured{Object: raw}
multicluster.SetClusterName(patchedApp, clusterName)
SetOverrideNamespace(patchedApp, namespace)
app.ScheduledManifests[patchedApp.GetName()] = patchedApp
}
var decisions []v1alpha1.ClusterDecision
for _, decision := range engine.clusterDecisions {
decisions = append(decisions, decision)
}
return decisions, nil
}
// SetOverrideNamespace set the override namespace for object in its label
func SetOverrideNamespace(obj *unstructured.Unstructured, overrideNamespace string) {
if overrideNamespace != "" {
labels := obj.GetLabels()
if labels == nil {
labels = map[string]string{}
}
labels[OverrideNamespaceLabelKey] = overrideNamespace
obj.SetLabels(labels)
}
}

View File

@@ -1,377 +0,0 @@
/*
Copyright 2021. The KubeVela Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package envbinding
import (
"context"
"fmt"
"reflect"
"github.com/pkg/errors"
corev1 "k8s.io/api/core/v1"
kerrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/klog/v2"
ocmclusterv1alpha1 "open-cluster-management.io/api/cluster/v1alpha1"
ocmworkv1 "open-cluster-management.io/api/work/v1"
"sigs.k8s.io/controller-runtime/pkg/client"
"github.com/oam-dev/kubevela/apis/core.oam.dev/v1alpha1"
"github.com/oam-dev/kubevela/apis/core.oam.dev/v1beta1"
"github.com/oam-dev/kubevela/pkg/appfile"
"github.com/oam-dev/kubevela/pkg/oam/util"
)
// ClusterManagerEngine defines Cluster Manage interface
type ClusterManagerEngine interface {
prepare(ctx context.Context, configs []v1alpha1.EnvConfig) error
initEnvBindApps(ctx context.Context, envBinding *v1alpha1.EnvBinding, baseApp *v1beta1.Application, appParser *appfile.Parser) ([]*EnvBindApp, error)
schedule(ctx context.Context, apps []*EnvBindApp) ([]v1alpha1.ClusterDecision, error)
}
// OCMEngine represents Open-Cluster-Management multi-cluster management solution
type OCMEngine struct {
cli client.Client
clusterDecisions map[string]string
appNs string
envBindingName string
appName string
}
// NewOCMEngine create Open-Cluster-Management ClusterManagerEngine
func NewOCMEngine(cli client.Client, appName, appNs, envBindingName string) ClusterManagerEngine {
return &OCMEngine{
cli: cli,
appNs: appNs,
appName: appName,
envBindingName: envBindingName,
}
}
// prepare complete the pre-work of cluster scheduling and select the target cluster
// 1) if user directly specify the cluster name, Prepare will do nothing
// 2) if user use Labels to select the target cluster, Prepare will create the Placement to select cluster
func (o *OCMEngine) prepare(ctx context.Context, configs []v1alpha1.EnvConfig) error {
var err error
for _, config := range configs {
if len(config.Placement.ClusterSelector.Name) != 0 {
continue
}
err = o.dispatchPlacement(ctx, config)
if err != nil {
return err
}
}
clusterDecisions := make(map[string]string)
for _, config := range configs {
if len(config.Placement.ClusterSelector.Name) != 0 {
clusterDecisions[config.Name] = config.Placement.ClusterSelector.Name
continue
}
placementName := generatePlacementName(o.appName, config.Name)
clusterDecisions[config.Name], err = o.getSelectedCluster(ctx, placementName, o.appNs)
if err != nil {
return err
}
}
o.clusterDecisions = clusterDecisions
return nil
}
func (o *OCMEngine) initEnvBindApps(ctx context.Context, envBinding *v1alpha1.EnvBinding, baseApp *v1beta1.Application, appParser *appfile.Parser) ([]*EnvBindApp, error) {
envBindApps, err := CreateEnvBindApps(envBinding, baseApp)
if err != nil {
return nil, err
}
if err = RenderEnvBindApps(ctx, envBindApps, appParser); err != nil {
return nil, err
}
if err = AssembleEnvBindApps(envBindApps); err != nil {
return nil, err
}
return envBindApps, nil
}
// Schedule decides which cluster the apps is scheduled to
func (o *OCMEngine) schedule(ctx context.Context, apps []*EnvBindApp) ([]v1alpha1.ClusterDecision, error) {
var clusterDecisions []v1alpha1.ClusterDecision
for i := range apps {
app := apps[i]
app.ScheduledManifests = make(map[string]*unstructured.Unstructured, 1)
clusterName := o.clusterDecisions[app.envConfig.Name]
manifestWork := new(ocmworkv1.ManifestWork)
workloads := make([]ocmworkv1.Manifest, 0, len(app.assembledManifests))
for _, component := range app.PatchedApp.Spec.Components {
manifest := app.assembledManifests[component.Name]
for j := range manifest {
workloads = append(workloads, ocmworkv1.Manifest{
RawExtension: *util.Object2RawExtension(manifest[j]),
})
}
}
manifestWork.Spec.Workload.Manifests = workloads
obj, err := runtime.DefaultUnstructuredConverter.ToUnstructured(manifestWork)
if err != nil {
return nil, err
}
unstructuredManifestWork := &unstructured.Unstructured{
Object: obj,
}
unstructuredManifestWork.SetGroupVersionKind(ocmworkv1.GroupVersion.WithKind(reflect.TypeOf(ocmworkv1.ManifestWork{}).Name()))
envBindAppName := constructEnvBindAppName(o.envBindingName, app.envConfig.Name, o.appName)
unstructuredManifestWork.SetName(o.appName)
unstructuredManifestWork.SetNamespace(clusterName)
app.ScheduledManifests[envBindAppName] = unstructuredManifestWork
}
for env, cluster := range o.clusterDecisions {
clusterDecisions = append(clusterDecisions, v1alpha1.ClusterDecision{
Env: env,
Cluster: cluster,
})
}
return clusterDecisions, nil
}
// dispatchPlacement dispatch Placement Object of OCM for cluster selected
func (o *OCMEngine) dispatchPlacement(ctx context.Context, config v1alpha1.EnvConfig) error {
placement := new(ocmclusterv1alpha1.Placement)
placementName := generatePlacementName(o.appName, config.Name)
placement.SetName(placementName)
placement.SetNamespace(o.appNs)
clusterNum := int32(1)
placement.Spec.NumberOfClusters = &clusterNum
placement.Spec.Predicates = []ocmclusterv1alpha1.ClusterPredicate{{
RequiredClusterSelector: ocmclusterv1alpha1.ClusterSelector{
LabelSelector: metav1.LabelSelector{
MatchLabels: config.Placement.ClusterSelector.Labels,
},
},
}}
oldPd := new(ocmclusterv1alpha1.Placement)
if err := o.cli.Get(ctx, client.ObjectKey{Namespace: placement.Namespace, Name: placement.Name}, oldPd); err != nil {
if kerrors.IsNotFound(err) {
return o.cli.Create(ctx, placement)
}
return err
}
return o.cli.Patch(ctx, placement, client.Merge)
}
// getSelectedCluster get selected cluster from PlacementDecision
func (o *OCMEngine) getSelectedCluster(ctx context.Context, name, namespace string) (string, error) {
var clusterName string
listOpts := []client.ListOption{
client.MatchingLabels{
"cluster.open-cluster-management.io/placement": name,
},
client.InNamespace(namespace),
}
pdList := new(ocmclusterv1alpha1.PlacementDecisionList)
err := o.cli.List(ctx, pdList, listOpts...)
if err != nil {
return "", err
}
if len(pdList.Items) < 1 {
return "", errors.New("fail to get PlacementDecision")
}
if len(pdList.Items[0].Status.Decisions) < 1 {
return "", errors.New("no matched cluster")
}
clusterName = pdList.Items[0].Status.Decisions[0].ClusterName
return clusterName, nil
}
// generatePlacementName generate placementName from app Name and env Name
func generatePlacementName(appName, envName string) string {
return fmt.Sprintf("%s-%s", appName, envName)
}
// SingleClusterEngine represents deploy resources to the local cluster
type SingleClusterEngine struct {
cli client.Client
appNs string
appName string
envBindingName string
clusterDecisions map[string]string
namespaceDecisions map[string]string
}
// NewSingleClusterEngine create a single cluster ClusterManagerEngine
func NewSingleClusterEngine(cli client.Client, appName, appNs, envBindingName string) ClusterManagerEngine {
return &SingleClusterEngine{
cli: cli,
appNs: appNs,
appName: appName,
envBindingName: envBindingName,
}
}
func (s *SingleClusterEngine) prepare(ctx context.Context, configs []v1alpha1.EnvConfig) error {
clusterDecisions := make(map[string]string)
for _, config := range configs {
clusterDecisions[config.Name] = string(v1alpha1.SingleClusterEngine)
}
s.clusterDecisions = clusterDecisions
return nil
}
func (s *SingleClusterEngine) initEnvBindApps(ctx context.Context, envBinding *v1alpha1.EnvBinding, baseApp *v1beta1.Application, appParser *appfile.Parser) ([]*EnvBindApp, error) {
return CreateEnvBindApps(envBinding, baseApp)
}
func (s *SingleClusterEngine) schedule(ctx context.Context, apps []*EnvBindApp) ([]v1alpha1.ClusterDecision, error) {
var clusterDecisions []v1alpha1.ClusterDecision
namespaceDecisions := make(map[string]string)
for i := range apps {
app := apps[i]
selectedNamespace, err := s.getSelectedNamespace(ctx, app)
namespaceDecisions[app.envConfig.Name] = selectedNamespace
if err != nil {
return nil, err
}
app.ScheduledManifests = make(map[string]*unstructured.Unstructured, 1)
unstructuredApp, err := util.Object2Unstructured(app.PatchedApp)
if err != nil {
return nil, err
}
envBindAppName := constructEnvBindAppName(s.envBindingName, app.envConfig.Name, s.appName)
unstructuredApp.SetNamespace(selectedNamespace)
app.ScheduledManifests[envBindAppName] = unstructuredApp
}
s.namespaceDecisions = namespaceDecisions
for env, cluster := range s.clusterDecisions {
clusterDecisions = append(clusterDecisions, v1alpha1.ClusterDecision{
Env: env,
Cluster: cluster,
Namespace: s.namespaceDecisions[env],
})
}
return clusterDecisions, nil
}
func (s *SingleClusterEngine) getSelectedNamespace(ctx context.Context, envBindApp *EnvBindApp) (string, error) {
if envBindApp.envConfig.Placement.NamespaceSelector != nil {
selector := envBindApp.envConfig.Placement.NamespaceSelector
if len(selector.Name) != 0 {
return selector.Name, nil
}
if len(selector.Labels) != 0 {
namespaceList := new(corev1.NamespaceList)
listOpts := []client.ListOption{
client.MatchingLabels(selector.Labels),
}
err := s.cli.List(ctx, namespaceList, listOpts...)
if err != nil || len(namespaceList.Items) == 0 {
return "", errors.Wrapf(err, "fail to list selected namespace for env %s", envBindApp.envConfig.Name)
}
return namespaceList.Items[0].Name, nil
}
}
return envBindApp.PatchedApp.Namespace, nil
}
func validatePlacement(envBinding *v1alpha1.EnvBinding) error {
if envBinding.Spec.Engine == v1alpha1.OCMEngine || len(envBinding.Spec.Engine) == 0 {
for _, config := range envBinding.Spec.Envs {
if config.Placement.ClusterSelector == nil {
return errors.New("the cluster selector of placement shouldn't be empty")
}
}
}
return nil
}
func constructEnvBindAppName(envBindingName, envName, appName string) string {
return fmt.Sprintf("%s-%s-%s", envBindingName, envName, appName)
}
func constructResourceTrackerName(envBindingName, namespace string) string {
return fmt.Sprintf("%s-%s-%s", "envbinding", envBindingName, namespace)
}
func garbageCollect(ctx context.Context, k8sClient client.Client, envBinding *v1alpha1.EnvBinding, apps []*EnvBindApp) error {
rtRef := envBinding.Status.ResourceTracker
if rtRef == nil {
return nil
}
rt := new(v1beta1.ResourceTracker)
if envBinding.Spec.OutputResourcesTo != nil && len(envBinding.Spec.OutputResourcesTo.Name) != 0 {
rt.SetName(rtRef.Name)
err := k8sClient.Delete(ctx, rt)
return client.IgnoreNotFound(err)
}
rtKey := client.ObjectKey{Namespace: rtRef.Namespace, Name: rtRef.Name}
if err := k8sClient.Get(ctx, rtKey, rt); err != nil {
return err
}
var manifests []*unstructured.Unstructured
for _, app := range apps {
for _, obj := range app.ScheduledManifests {
manifests = append(manifests, obj)
}
}
for _, oldRsc := range rt.Status.TrackedResources {
isRemoved := true
for _, newRsc := range manifests {
if equalMateData(oldRsc, newRsc) {
isRemoved = false
break
}
}
if isRemoved {
if err := deleteOldResource(ctx, k8sClient, oldRsc); err != nil {
return err
}
klog.InfoS("Successfully GC a resource", "name", oldRsc.Name, "apiVersion", oldRsc.APIVersion, "kind", oldRsc.Kind)
}
}
return nil
}
func equalMateData(rscRef corev1.ObjectReference, newRsc *unstructured.Unstructured) bool {
if rscRef.APIVersion == newRsc.GetAPIVersion() && rscRef.Kind == newRsc.GetKind() &&
rscRef.Namespace == newRsc.GetNamespace() && rscRef.Name == newRsc.GetName() {
return true
}
return false
}
func deleteOldResource(ctx context.Context, k8sClient client.Client, ref corev1.ObjectReference) error {
obj := new(unstructured.Unstructured)
obj.SetAPIVersion(ref.APIVersion)
obj.SetKind(ref.Kind)
obj.SetNamespace(ref.Namespace)
obj.SetName(ref.Name)
if err := k8sClient.Delete(ctx, obj); err != nil && !kerrors.IsNotFound(err) {
return errors.Wrapf(err, "cannot delete resource %v", ref)
}
return nil
}

View File

@@ -1,321 +0,0 @@
/*
Copyright 2021. The KubeVela Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package envbinding
import (
"context"
"github.com/crossplane/crossplane-runtime/pkg/event"
"github.com/crossplane/crossplane-runtime/pkg/meta"
"github.com/pkg/errors"
v1 "k8s.io/api/core/v1"
kerrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/client-go/util/retry"
"k8s.io/klog/v2"
"k8s.io/utils/pointer"
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/controller"
"github.com/oam-dev/kubevela/apis/core.oam.dev/condition"
"github.com/oam-dev/kubevela/apis/core.oam.dev/v1alpha1"
"github.com/oam-dev/kubevela/apis/core.oam.dev/v1beta1"
"github.com/oam-dev/kubevela/pkg/appfile"
common2 "github.com/oam-dev/kubevela/pkg/controller/common"
oamctrl "github.com/oam-dev/kubevela/pkg/controller/core.oam.dev"
"github.com/oam-dev/kubevela/pkg/cue/packages"
"github.com/oam-dev/kubevela/pkg/oam/discoverymapper"
"github.com/oam-dev/kubevela/pkg/oam/util"
"github.com/oam-dev/kubevela/pkg/utils/apply"
)
const (
resourceTrackerFinalizer = "envbinding.oam.dev/resource-tracker-finalizer"
)
// Reconciler reconciles a EnvBinding object
type Reconciler struct {
client.Client
dm discoverymapper.DiscoveryMapper
pd *packages.PackageDiscover
Scheme *runtime.Scheme
record event.Recorder
concurrentReconciles int
}
// Reconcile is the main logic for EnvBinding controller
func (r *Reconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) {
ctx, cancel := common2.NewReconcileContext(ctx)
defer cancel()
klog.InfoS("Reconcile EnvBinding", "envbinding", klog.KRef(req.Namespace, req.Name))
envBinding := new(v1alpha1.EnvBinding)
if err := r.Client.Get(ctx, client.ObjectKey{Namespace: req.Namespace, Name: req.Name}, envBinding); err != nil {
return ctrl.Result{}, client.IgnoreNotFound(err)
}
endReconcile, err := r.handleFinalizers(ctx, envBinding)
if err != nil {
return r.endWithNegativeCondition(ctx, envBinding, condition.ReconcileError(err))
}
if endReconcile {
return ctrl.Result{}, nil
}
if err := validatePlacement(envBinding); err != nil {
klog.ErrorS(err, "The placement is not compliant")
r.record.Event(envBinding, event.Warning("The placement is not compliant", err))
return r.endWithNegativeCondition(ctx, envBinding, condition.ReconcileError(err))
}
baseApp, err := util.RawExtension2Application(envBinding.Spec.AppTemplate.RawExtension)
if err != nil {
klog.ErrorS(err, "Failed to parse AppTemplate of EnvBinding")
r.record.Event(envBinding, event.Warning("Failed to parse AppTemplate of EnvBinding", err))
return r.endWithNegativeCondition(ctx, envBinding, condition.ReconcileError(err))
}
var engine ClusterManagerEngine
switch envBinding.Spec.Engine {
case v1alpha1.OCMEngine:
engine = NewOCMEngine(r.Client, baseApp.Name, baseApp.Namespace, envBinding.Name)
case v1alpha1.SingleClusterEngine:
engine = NewSingleClusterEngine(r.Client, baseApp.Name, baseApp.Namespace, envBinding.Name)
case v1alpha1.ClusterGatewayEngine:
engine = NewClusterGatewayEngine(r.Client, envBinding.Name)
default:
engine = NewClusterGatewayEngine(r.Client, envBinding.Name)
}
// prepare the pre-work for cluster scheduling
envBinding.Status.Phase = v1alpha1.EnvBindingPrepare
if err = engine.prepare(ctx, envBinding.Spec.Envs); err != nil {
klog.ErrorS(err, "Failed to prepare the pre-work for cluster scheduling")
r.record.Event(envBinding, event.Warning("Failed to prepare the pre-work for cluster scheduling", err))
return r.endWithNegativeCondition(ctx, envBinding, condition.ReconcileError(err))
}
// patch the component parameters for application in different envs
envBinding.Status.Phase = v1alpha1.EnvBindingRendering
appParser := appfile.NewApplicationParser(r.Client, r.dm, r.pd)
envBindApps, err := engine.initEnvBindApps(ctx, envBinding, baseApp, appParser)
if err != nil {
klog.ErrorS(err, "Failed to patch the parameters for application in different envs")
r.record.Event(envBinding, event.Warning("Failed to patch the parameters for application in different envs", err))
return r.endWithNegativeCondition(ctx, envBinding, condition.ReconcileError(err))
}
// schedule resource of applications in different envs
envBinding.Status.Phase = v1alpha1.EnvBindingScheduling
clusterDecisions, err := engine.schedule(ctx, envBindApps)
if err != nil {
klog.ErrorS(err, "Failed to schedule resource of applications in different envs")
r.record.Event(envBinding, event.Warning("Failed to schedule resource of applications in different envs", err))
return r.endWithNegativeCondition(ctx, envBinding, condition.ReconcileError(err))
}
if err = garbageCollect(ctx, r.Client, envBinding, envBindApps); err != nil {
klog.ErrorS(err, "Failed to garbage collect old resource for envBinding")
r.record.Event(envBinding, event.Warning("Failed to garbage collect old resource for envBinding", err))
return r.endWithNegativeCondition(ctx, envBinding, condition.ReconcileError(err))
}
if err = r.applyOrRecordManifests(ctx, envBinding, envBindApps); err != nil {
klog.ErrorS(err, "Failed to apply or record manifests")
r.record.Event(envBinding, event.Warning("Failed to apply or record manifests", err))
return r.endWithNegativeCondition(ctx, envBinding, condition.ReconcileError(err))
}
envBinding.Status.Phase = v1alpha1.EnvBindingFinished
envBinding.Status.ClusterDecisions = clusterDecisions
if err = r.Client.Status().Patch(ctx, envBinding, client.Merge); err != nil {
klog.ErrorS(err, "Failed to update status")
r.record.Event(envBinding, event.Warning("Failed to update status", err))
return ctrl.Result{}, util.EndReconcileWithNegativeCondition(ctx, r, envBinding, condition.ReconcileError(err))
}
return ctrl.Result{}, nil
}
func (r *Reconciler) applyOrRecordManifests(ctx context.Context, envBinding *v1alpha1.EnvBinding, envBindApps []*EnvBindApp) error {
if envBinding.Spec.OutputResourcesTo != nil && len(envBinding.Spec.OutputResourcesTo.Name) != 0 {
if err := StoreManifest2ConfigMap(ctx, r.Client, envBinding, envBindApps); err != nil {
klog.ErrorS(err, "Failed to store manifest of different envs to configmap")
r.record.Event(envBinding, event.Warning("Failed to store manifest of different envs to configmap", err))
}
envBinding.Status.ResourceTracker = nil
return nil
}
rt, err := r.createOrGetResourceTracker(ctx, envBinding)
if err != nil {
return err
}
if err = r.dispatchManifests(ctx, rt, envBindApps); err != nil {
klog.ErrorS(err, "Failed to dispatch resources of different envs to cluster")
r.record.Event(envBinding, event.Warning("Failed to dispatch resources of different envs to cluster", err))
return err
}
if err = r.updateResourceTrackerStatus(ctx, rt.Name, envBindApps); err != nil {
return err
}
envBinding.Status.ResourceTracker = &v1.ObjectReference{
Kind: rt.Kind,
APIVersion: rt.APIVersion,
Name: rt.Name,
}
return nil
}
func (r *Reconciler) dispatchManifests(ctx context.Context, resourceTracker *v1beta1.ResourceTracker, envBindApps []*EnvBindApp) error {
ownerReference := []metav1.OwnerReference{{
APIVersion: resourceTracker.APIVersion,
Kind: resourceTracker.Kind,
Name: resourceTracker.Name,
UID: resourceTracker.GetUID(),
Controller: pointer.BoolPtr(true),
BlockOwnerDeletion: pointer.BoolPtr(true),
}}
applicator := apply.NewAPIApplicator(r.Client)
for _, app := range envBindApps {
for _, obj := range app.ScheduledManifests {
obj.SetOwnerReferences(ownerReference)
if err := applicator.Apply(ctx, obj); err != nil {
return err
}
}
}
return nil
}
func (r *Reconciler) createOrGetResourceTracker(ctx context.Context, envBinding *v1alpha1.EnvBinding) (*v1beta1.ResourceTracker, error) {
rt := new(v1beta1.ResourceTracker)
rtName := constructResourceTrackerName(envBinding.Name, envBinding.Namespace)
err := r.Client.Get(ctx, client.ObjectKey{Name: rtName}, rt)
if err == nil {
return rt, nil
}
if !kerrors.IsNotFound(err) {
return nil, errors.Wrap(err, "cannot get resource tracker")
}
klog.InfoS("Going to create a resource tracker", "resourceTracker", rtName)
rt.SetName(rtName)
if err = r.Client.Create(ctx, rt); err != nil {
return nil, err
}
return rt, nil
}
func (r *Reconciler) updateResourceTrackerStatus(ctx context.Context, rtName string, envBindApps []*EnvBindApp) error {
var refs []v1.ObjectReference
for _, app := range envBindApps {
for _, obj := range app.ScheduledManifests {
refs = append(refs, v1.ObjectReference{
APIVersion: obj.GetAPIVersion(),
Kind: obj.GetKind(),
Name: obj.GetName(),
Namespace: obj.GetNamespace(),
})
}
}
resourceTracker := new(v1beta1.ResourceTracker)
if err := retry.RetryOnConflict(retry.DefaultBackoff, func() (err error) {
if err = r.Client.Get(ctx, client.ObjectKey{Name: rtName}, resourceTracker); err != nil {
return
}
resourceTracker.Status.TrackedResources = refs
return r.Client.Status().Update(ctx, resourceTracker)
}); err != nil {
return err
}
klog.InfoS("Successfully update resource tracker status", "resourceTracker", rtName)
return nil
}
func (r *Reconciler) handleFinalizers(ctx context.Context, envBinding *v1alpha1.EnvBinding) (bool, error) {
if envBinding.ObjectMeta.DeletionTimestamp.IsZero() {
if !meta.FinalizerExists(envBinding, resourceTrackerFinalizer) {
meta.AddFinalizer(envBinding, resourceTrackerFinalizer)
klog.InfoS("Register new finalizer for envBinding", "envBinding", klog.KObj(envBinding), "finalizer", resourceTrackerFinalizer)
return true, errors.Wrap(r.Client.Update(ctx, envBinding), "cannot update envBinding finalizer")
}
} else {
if meta.FinalizerExists(envBinding, resourceTrackerFinalizer) {
rt := new(v1beta1.ResourceTracker)
rt.SetName(constructResourceTrackerName(envBinding.Name, envBinding.Namespace))
if err := r.Client.Get(ctx, client.ObjectKey{Name: rt.Name}, rt); err != nil && !kerrors.IsNotFound(err) {
klog.ErrorS(err, "Failed to get resource tracker of envBinding", "envBinding", klog.KObj(envBinding))
return true, errors.WithMessage(err, "cannot remove finalizer")
}
if err := r.Client.Delete(ctx, rt); err != nil && !kerrors.IsNotFound(err) {
klog.ErrorS(err, "Failed to delete resource tracker of envBinding", "envBinding", klog.KObj(envBinding))
return true, errors.WithMessage(err, "cannot remove finalizer")
}
if err := GarbageCollectionForAllResourceTrackersInSubCluster(ctx, r.Client, envBinding); err != nil {
return true, err
}
meta.RemoveFinalizer(envBinding, resourceTrackerFinalizer)
return true, errors.Wrap(r.Client.Update(ctx, envBinding), "cannot update envBinding finalizer")
}
}
return false, nil
}
func (r *Reconciler) endWithNegativeCondition(ctx context.Context, envBinding *v1alpha1.EnvBinding, cond condition.Condition) (ctrl.Result, error) {
envBinding.SetConditions(cond)
if err := r.Client.Status().Patch(ctx, envBinding, client.Merge); err != nil {
return ctrl.Result{}, errors.WithMessage(err, "cannot update envbinding status")
}
// if any condition is changed, patching status can trigger requeue the resource and we should return nil to
// avoid requeue it again
if util.IsConditionChanged([]condition.Condition{cond}, envBinding) {
return ctrl.Result{}, nil
}
// if no condition is changed, patching status can not trigger requeue, so we must return an error to
// requeue the resource
return ctrl.Result{}, errors.Errorf("object level reconcile error, type: %q, msg: %q", string(cond.Type), cond.Message)
}
// SetupWithManager will setup with event recorder
func (r *Reconciler) SetupWithManager(mgr ctrl.Manager) error {
r.record = event.NewAPIRecorder(mgr.GetEventRecorderFor("EnvBinding")).
WithAnnotations("controller", "EnvBinding")
return ctrl.NewControllerManagedBy(mgr).
WithOptions(controller.Options{
MaxConcurrentReconciles: r.concurrentReconciles,
}).
For(&v1alpha1.EnvBinding{}).
Complete(r)
}
// Setup adds a controller that reconciles EnvBinding.
func Setup(mgr ctrl.Manager, args oamctrl.Args) error {
r := Reconciler{
Client: mgr.GetClient(),
dm: args.DiscoveryMapper,
pd: args.PackageDiscover,
Scheme: mgr.GetScheme(),
concurrentReconciles: args.ConcurrentReconciles,
}
return r.SetupWithManager(mgr)
}

View File

@@ -1,115 +0,0 @@
/*
Copyright 2021. The KubeVela Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package envbinding
import (
"context"
"fmt"
"math/rand"
"path/filepath"
"strconv"
"testing"
"time"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"k8s.io/client-go/rest"
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/envtest"
"github.com/oam-dev/kubevela/pkg/cue/packages"
"github.com/oam-dev/kubevela/pkg/oam/discoverymapper"
"github.com/oam-dev/kubevela/pkg/utils/common"
)
var cfg *rest.Config
var k8sClient client.Client
var testEnv *envtest.Environment
var controllerDone context.CancelFunc
var r Reconciler
func TestEnvBinding(t *testing.T) {
RegisterFailHandler(Fail)
RunSpecs(t, "EnvBinding Suite")
}
var _ = BeforeSuite(func(done Done) {
By("Bootstrapping test environment")
useExistCluster := false
testEnv = &envtest.Environment{
ControlPlaneStartTimeout: time.Minute,
ControlPlaneStopTimeout: time.Minute,
CRDDirectoryPaths: []string{
filepath.Join("../../../../..", "charts/vela-core/crds"), // this has all the required CRDs,
"./testdata/crds",
},
UseExistingCluster: &useExistCluster,
}
var err error
cfg, err = testEnv.Start()
Expect(err).ToNot(HaveOccurred())
Expect(cfg).ToNot(BeNil())
By("Create the k8s client")
k8sClient, err = client.New(cfg, client.Options{Scheme: common.Scheme})
Expect(err).ToNot(HaveOccurred())
Expect(k8sClient).ToNot(BeNil())
By("Starting the controller in the background")
mgr, err := ctrl.NewManager(cfg, ctrl.Options{
Scheme: common.Scheme,
MetricsBindAddress: "0",
Port: 48081,
})
Expect(err).ToNot(HaveOccurred())
dm, err := discoverymapper.New(mgr.GetConfig())
Expect(err).ToNot(HaveOccurred())
_, err = dm.Refresh()
Expect(err).ToNot(HaveOccurred())
pd, err := packages.NewPackageDiscover(cfg)
Expect(err).ToNot(HaveOccurred())
r = Reconciler{
Client: mgr.GetClient(),
Scheme: mgr.GetScheme(),
dm: dm,
pd: pd,
}
Expect(r.SetupWithManager(mgr)).ToNot(HaveOccurred())
var ctx context.Context
ctx, controllerDone = context.WithCancel(context.Background())
go func() {
defer GinkgoRecover()
Expect(mgr.Start(ctx)).ToNot(HaveOccurred())
}()
close(done)
}, 120)
var _ = AfterSuite(func() {
By("Stop the controller")
controllerDone()
By("Tearing down the test environment")
err := testEnv.Stop()
Expect(err).ToNot(HaveOccurred())
})
func randomNamespaceName(basic string) string {
return fmt.Sprintf("%s-%s", basic, strconv.FormatInt(rand.Int63(), 16))
}

View File

@@ -1,341 +0,0 @@
apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
name: manifestworks.work.open-cluster-management.io
spec:
conversion:
strategy: None
group: work.open-cluster-management.io
names:
kind: ManifestWork
listKind: ManifestWorkList
plural: manifestworks
singular: manifestwork
scope: Namespaced
versions:
- name: v1
schema:
openAPIV3Schema:
description: ManifestWork represents a manifests workload that hub wants to
deploy on the managed cluster. A manifest workload is defined as a set of
Kubernetes resources. ManifestWork must be created in the cluster namespace
on the hub, so that agent on the corresponding managed cluster can access
this resource and deploy on the managed cluster.
properties:
apiVersion:
description: 'APIVersion defines the versioned schema of this representation
of an object. Servers should convert recognized schemas to the latest
internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
type: string
kind:
description: 'Kind is a string value representing the REST resource this
object represents. Servers may infer this from the endpoint the client
submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
type: string
metadata:
type: object
spec:
description: Spec represents a desired configuration of work to be deployed
on the managed cluster.
properties:
deleteOption:
description: DeleteOption represents deletion strategy when the manifestwork
is deleted. Foreground deletion strategy is applied to all the resource
in this manifestwork if it is not set.
properties:
propagationPolicy:
default: ForeGround
description: propagationPolicy can be Foreground, Orphan or SelectivelyOrphan
SelectivelyOrphan should be rarely used. It is provided for
cases where particular resources is transfering ownership from
one ManifestWork to another or another management unit. Setting
this value will allow a flow like 1. create manifestwork/2 to
manage foo 2. update manifestwork/1 to selectively orphan foo
3. remove foo from manifestwork/1 without impacting continuity
because manifestwork/2 adopts it.
type: string
selectivelyOrphans:
description: selectivelyOrphan represents a list of resources
following orphan deletion stratecy
properties:
orphaningRules:
description: orphaningRules defines a slice of orphaningrule.
Each orphaningrule identifies a single resource included
in this manifestwork
items:
description: OrphaningRule identifies a single resource
included in this manifestwork
properties:
Name:
description: Name is the names of the resources in the
workload that the strategy is applied
type: string
Namespace:
description: Namespace is the namespaces of the resources
in the workload that the strategy is applied
type: string
group:
description: Group is the api group of the resources
in the workload that the strategy is applied
type: string
resource:
description: Resource is the resources in the workload
that the strategy is applied
type: string
type: object
type: array
type: object
type: object
workload:
description: Workload represents the manifest workload to be deployed
on a managed cluster.
properties:
manifests:
description: Manifests represents a list of kuberenetes resources
to be deployed on a managed cluster.
items:
description: Manifest represents a resource to be deployed on
managed cluster.
type: object
x-kubernetes-embedded-resource: true
x-kubernetes-preserve-unknown-fields: true
type: array
type: object
type: object
status:
description: Status represents the current status of work.
properties:
conditions:
description: 'Conditions contains the different condition statuses
for this work. Valid condition types are: 1. Applied represents
workload in ManifestWork is applied successfully on managed cluster.
2. Progressing represents workload in ManifestWork is being applied
on managed cluster. 3. Available represents workload in ManifestWork
exists on the managed cluster. 4. Degraded represents the current
state of workload does not match the desired state for a certain
period.'
items:
description: "Condition contains details for one aspect of the current
state of this API Resource. --- This struct is intended for direct
use as an array at the field path .status.conditions. For example,
type FooStatus struct{ // Represents the observations of a
foo's current state. // Known .status.conditions.type are:
\"Available\", \"Progressing\", and \"Degraded\" // +patchMergeKey=type
\ // +patchStrategy=merge // +listType=map // +listMapKey=type
\ Conditions []metav1.Condition `json:\"conditions,omitempty\"
patchStrategy:\"merge\" patchMergeKey:\"type\" protobuf:\"bytes,1,rep,name=conditions\"`
\n // other fields }"
properties:
lastTransitionTime:
description: lastTransitionTime is the last time the condition
transitioned from one status to another. This should be when
the underlying condition changed. If that is not known, then
using the time when the API field changed is acceptable.
format: date-time
type: string
message:
description: message is a human readable message indicating
details about the transition. This may be an empty string.
maxLength: 32768
type: string
observedGeneration:
description: observedGeneration represents the .metadata.generation
that the condition was set based upon. For instance, if .metadata.generation
is currently 12, but the .status.conditions[x].observedGeneration
is 9, the condition is out of date with respect to the current
state of the instance.
format: int64
minimum: 0
type: integer
reason:
description: reason contains a programmatic identifier indicating
the reason for the condition's last transition. Producers
of specific condition types may define expected values and
meanings for this field, and whether the values are considered
a guaranteed API. The value should be a CamelCase string.
This field may not be empty.
maxLength: 1024
minLength: 1
pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$
type: string
status:
description: status of the condition, one of True, False, Unknown.
enum:
- "True"
- "False"
- Unknown
type: string
type:
description: type of condition in CamelCase or in foo.example.com/CamelCase.
--- Many .condition.type values are consistent across resources
like Available, but because arbitrary conditions can be useful
(see .node.status.conditions), the ability to deconflict is
important. The regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt)
maxLength: 316
pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$
type: string
required:
- lastTransitionTime
- message
- reason
- status
- type
type: object
type: array
resourceStatus:
description: ResourceStatus represents the status of each resource
in manifestwork deployed on a managed cluster. The Klusterlet agent
on managed cluster syncs the condition from the managed cluster
to the hub.
properties:
manifests:
description: 'Manifests represents the condition of manifests
deployed on managed cluster. Valid condition types are: 1. Progressing
represents the resource is being applied on managed cluster.
2. Applied represents the resource is applied successfully on
managed cluster. 3. Available represents the resource exists
on the managed cluster. 4. Degraded represents the current state
of resource does not match the desired state for a certain period.'
items:
description: ManifestCondition represents the conditions of
the resources deployed on a managed cluster.
properties:
conditions:
description: Conditions represents the conditions of this
resource on a managed cluster.
items:
description: "Condition contains details for one aspect
of the current state of this API Resource. --- This
struct is intended for direct use as an array at the
field path .status.conditions. For example, type FooStatus
struct{ // Represents the observations of a foo's
current state. // Known .status.conditions.type
are: \"Available\", \"Progressing\", and \"Degraded\"
\ // +patchMergeKey=type // +patchStrategy=merge
\ // +listType=map // +listMapKey=type Conditions
[]metav1.Condition `json:\"conditions,omitempty\" patchStrategy:\"merge\"
patchMergeKey:\"type\" protobuf:\"bytes,1,rep,name=conditions\"`
\n // other fields }"
properties:
lastTransitionTime:
description: lastTransitionTime is the last time the
condition transitioned from one status to another.
This should be when the underlying condition changed. If
that is not known, then using the time when the
API field changed is acceptable.
format: date-time
type: string
message:
description: message is a human readable message indicating
details about the transition. This may be an empty
string.
maxLength: 32768
type: string
observedGeneration:
description: observedGeneration represents the .metadata.generation
that the condition was set based upon. For instance,
if .metadata.generation is currently 12, but the
.status.conditions[x].observedGeneration is 9, the
condition is out of date with respect to the current
state of the instance.
format: int64
minimum: 0
type: integer
reason:
description: reason contains a programmatic identifier
indicating the reason for the condition's last transition.
Producers of specific condition types may define
expected values and meanings for this field, and
whether the values are considered a guaranteed API.
The value should be a CamelCase string. This field
may not be empty.
maxLength: 1024
minLength: 1
pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$
type: string
status:
description: status of the condition, one of True,
False, Unknown.
enum:
- "True"
- "False"
- Unknown
type: string
type:
description: type of condition in CamelCase or in
foo.example.com/CamelCase. --- Many .condition.type
values are consistent across resources like Available,
but because arbitrary conditions can be useful (see
.node.status.conditions), the ability to deconflict
is important. The regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt)
maxLength: 316
pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$
type: string
required:
- lastTransitionTime
- message
- reason
- status
- type
type: object
type: array
resourceMeta:
description: ResourceMeta represents the group, version,
kind, name and namespace of a resoure.
properties:
group:
description: Group is the API Group of the Kubernetes
resource.
type: string
kind:
description: Kind is the kind of the Kubernetes resource.
type: string
name:
description: Name is the name of the Kubernetes resource.
type: string
namespace:
description: Name is the namespace of the Kubernetes
resource.
type: string
ordinal:
description: Ordinal represents the index of the manifest
on spec.
format: int32
type: integer
resource:
description: Resource is the resource name of the Kubernetes
resource.
type: string
version:
description: Version is the version of the Kubernetes
resource.
type: string
type: object
type: object
type: array
type: object
type: object
type: object
served: true
storage: true
subresources:
status: {}
status:
acceptedNames:
kind: ManifestWork
listKind: ManifestWorkList
plural: manifestworks
singular: manifestwork
conditions:
- lastTransitionTime: "2021-08-12T08:23:40Z"
message: no conflicts found
reason: NoConflicts
status: "True"
type: NamesAccepted
- lastTransitionTime: "2021-08-12T08:23:40Z"
message: the initial names have been accepted
reason: InitialNamesAccepted
status: "True"
type: Established
storedVersions:
- v1

View File

@@ -1,288 +0,0 @@
apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
name: placements.cluster.open-cluster-management.io
spec:
conversion:
strategy: None
group: cluster.open-cluster-management.io
names:
kind: Placement
listKind: PlacementList
plural: placements
singular: placement
scope: Namespaced
versions:
- name: v1alpha1
schema:
openAPIV3Schema:
description: "Placement defines a rule to select a set of ManagedClusters
from the ManagedClusterSets bound to the placement namespace. \n Here is
how the placement policy combines with other selection methods to determine
a matching list of ManagedClusters: 1) Kubernetes clusters are registered
with hub as cluster-scoped ManagedClusters; 2) ManagedClusters are organized
into cluster-scoped ManagedClusterSets; 3) ManagedClusterSets are bound
to workload namespaces; 4) Namespace-scoped Placements specify a slice of
ManagedClusterSets which select a working set of potential ManagedClusters;
5) Then Placements subselect from that working set using label/claim selection.
\n No ManagedCluster will be selected if no ManagedClusterSet is bound to
the placement namespace. User is able to bind a ManagedClusterSet to a namespace
by creating a ManagedClusterSetBinding in that namespace if they have a
RBAC rule to CREATE on the virtual subresource of `managedclustersets/bind`.
\n A slice of PlacementDecisions with label cluster.open-cluster-management.io/placement={placement
name} will be created to represent the ManagedClusters selected by this
placement. \n If a ManagedCluster is selected and added into the PlacementDecisions,
other components may apply workload on it; once it is removed from the PlacementDecisions,
the workload applied on this ManagedCluster should be evicted accordingly."
properties:
apiVersion:
description: 'APIVersion defines the versioned schema of this representation
of an object. Servers should convert recognized schemas to the latest
internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
type: string
kind:
description: 'Kind is a string value representing the REST resource this
object represents. Servers may infer this from the endpoint the client
submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
type: string
metadata:
type: object
spec:
description: Spec defines the attributes of Placement.
properties:
clusterSets:
description: ClusterSets represent the ManagedClusterSets from which
the ManagedClusters are selected. If the slice is empty, ManagedClusters
will be selected from the ManagedClusterSets bound to the placement
namespace, otherwise ManagedClusters will be selected from the intersection
of this slice and the ManagedClusterSets bound to the placement
namespace.
items:
type: string
type: array
numberOfClusters:
description: NumberOfClusters represents the desired number of ManagedClusters
to be selected which meet the placement requirements. 1) If not
specified, all ManagedClusters which meet the placement requirements
(including ClusterSets, and Predicates) will be selected; 2)
Otherwise if the nubmer of ManagedClusters meet the placement requirements
is larger than NumberOfClusters, a random subset with desired
number of ManagedClusters will be selected; 3) If the nubmer of
ManagedClusters meet the placement requirements is equal to NumberOfClusters, all
of them will be selected; 4) If the nubmer of ManagedClusters meet
the placement requirements is less than NumberOfClusters, all
of them will be selected, and the status of condition `PlacementConditionSatisfied`
will be set to false;
format: int32
type: integer
predicates:
description: Predicates represent a slice of predicates to select
ManagedClusters. The predicates are ORed.
items:
description: ClusterPredicate represents a predicate to select ManagedClusters.
properties:
requiredClusterSelector:
description: RequiredClusterSelector represents a selector of
ManagedClusters by label and claim. If specified, 1) Any ManagedCluster,
which does not match the selector, should not be selected
by this ClusterPredicate; 2) If a selected ManagedCluster
(of this ClusterPredicate) ceases to match the selector (e.g.
due to an update) of any ClusterPredicate, it will be eventually
removed from the placement decisions; 3) If a ManagedCluster
(not selected previously) starts to match the selector, it
will either be selected or at least has a chance to be
selected (when NumberOfClusters is specified);
properties:
claimSelector:
description: ClaimSelector represents a selector of ManagedClusters
by clusterClaims in status
properties:
matchExpressions:
description: matchExpressions is a list of cluster claim
selector requirements. The requirements are ANDed.
items:
description: A label selector requirement is a selector
that contains values, a key, and an operator that
relates the key and values.
properties:
key:
description: key is the label key that the selector
applies to.
type: string
operator:
description: operator represents a key's relationship
to a set of values. Valid operators are In,
NotIn, Exists and DoesNotExist.
type: string
values:
description: values is an array of string values.
If the operator is In or NotIn, the values array
must be non-empty. If the operator is Exists
or DoesNotExist, the values array must be empty.
This array is replaced during a strategic merge
patch.
items:
type: string
type: array
required:
- key
- operator
type: object
type: array
type: object
labelSelector:
description: LabelSelector represents a selector of ManagedClusters
by label
properties:
matchExpressions:
description: matchExpressions is a list of label selector
requirements. The requirements are ANDed.
items:
description: A label selector requirement is a selector
that contains values, a key, and an operator that
relates the key and values.
properties:
key:
description: key is the label key that the selector
applies to.
type: string
operator:
description: operator represents a key's relationship
to a set of values. Valid operators are In,
NotIn, Exists and DoesNotExist.
type: string
values:
description: values is an array of string values.
If the operator is In or NotIn, the values array
must be non-empty. If the operator is Exists
or DoesNotExist, the values array must be empty.
This array is replaced during a strategic merge
patch.
items:
type: string
type: array
required:
- key
- operator
type: object
type: array
matchLabels:
additionalProperties:
type: string
description: matchLabels is a map of {key,value} pairs.
A single {key,value} in the matchLabels map is equivalent
to an element of matchExpressions, whose key field
is "key", the operator is "In", and the values array
contains only "value". The requirements are ANDed.
type: object
type: object
type: object
type: object
type: array
type: object
status:
description: Status represents the current status of the Placement
properties:
conditions:
description: Conditions contains the different condition statuses
for this Placement.
items:
description: "Condition contains details for one aspect of the current
state of this API Resource. --- This struct is intended for direct
use as an array at the field path .status.conditions. For example,
type FooStatus struct{ // Represents the observations of a
foo's current state. // Known .status.conditions.type are:
\"Available\", \"Progressing\", and \"Degraded\" // +patchMergeKey=type
\ // +patchStrategy=merge // +listType=map // +listMapKey=type
\ Conditions []metav1.Condition `json:\"conditions,omitempty\"
patchStrategy:\"merge\" patchMergeKey:\"type\" protobuf:\"bytes,1,rep,name=conditions\"`
\n // other fields }"
properties:
lastTransitionTime:
description: lastTransitionTime is the last time the condition
transitioned from one status to another. This should be when
the underlying condition changed. If that is not known, then
using the time when the API field changed is acceptable.
format: date-time
type: string
message:
description: message is a human readable message indicating
details about the transition. This may be an empty string.
maxLength: 32768
type: string
observedGeneration:
description: observedGeneration represents the .metadata.generation
that the condition was set based upon. For instance, if .metadata.generation
is currently 12, but the .status.conditions[x].observedGeneration
is 9, the condition is out of date with respect to the current
state of the instance.
format: int64
minimum: 0
type: integer
reason:
description: reason contains a programmatic identifier indicating
the reason for the condition's last transition. Producers
of specific condition types may define expected values and
meanings for this field, and whether the values are considered
a guaranteed API. The value should be a CamelCase string.
This field may not be empty.
maxLength: 1024
minLength: 1
pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$
type: string
status:
description: status of the condition, one of True, False, Unknown.
enum:
- "True"
- "False"
- Unknown
type: string
type:
description: type of condition in CamelCase or in foo.example.com/CamelCase.
--- Many .condition.type values are consistent across resources
like Available, but because arbitrary conditions can be useful
(see .node.status.conditions), the ability to deconflict is
important. The regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt)
maxLength: 316
pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$
type: string
required:
- lastTransitionTime
- message
- reason
- status
- type
type: object
type: array
numberOfSelectedClusters:
description: NumberOfSelectedClusters represents the number of selected
ManagedClusters
format: int32
type: integer
type: object
required:
- spec
type: object
served: true
storage: true
subresources:
status: {}
status:
acceptedNames:
kind: Placement
listKind: PlacementList
plural: placements
singular: placement
conditions:
- lastTransitionTime: "2021-08-04T08:37:09Z"
message: no conflicts found
reason: NoConflicts
status: "True"
type: NamesAccepted
- lastTransitionTime: "2021-08-04T08:37:09Z"
message: the initial names have been accepted
reason: InitialNamesAccepted
status: "True"
type: Established
storedVersions:
- v1alpha1

View File

@@ -1,90 +0,0 @@
apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
name: placementdecisions.cluster.open-cluster-management.io
spec:
conversion:
strategy: None
group: cluster.open-cluster-management.io
names:
kind: PlacementDecision
listKind: PlacementDecisionList
plural: placementdecisions
singular: placementdecision
scope: Namespaced
versions:
- name: v1alpha1
schema:
openAPIV3Schema:
description: "PlacementDecision indicates a decision from a placement PlacementDecision
should has a label cluster.open-cluster-management.io/placement={placement
name} to reference a certain placement. \n If a placement has spec.numberOfClusters
specified, the total number of decisions contained in status.decisions of
PlacementDecisions should always be NumberOfClusters; otherwise, the total
number of decisions should be the number of ManagedClusters which match
the placement requirements. \n Some of the decisions might be empty when
there are no enough ManagedClusters meet the placement requirements."
properties:
apiVersion:
description: 'APIVersion defines the versioned schema of this representation
of an object. Servers should convert recognized schemas to the latest
internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
type: string
kind:
description: 'Kind is a string value representing the REST resource this
object represents. Servers may infer this from the endpoint the client
submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
type: string
metadata:
type: object
status:
description: Status represents the current status of the PlacementDecision
properties:
decisions:
description: Decisions is a slice of decisions according to a placement
The number of decisions should not be larger than 100
items:
description: ClusterDecision represents a decision from a placement
An empty ClusterDecision indicates it is not scheduled yet.
properties:
clusterName:
description: ClusterName is the name of the ManagedCluster.
If it is not empty, its value should be unique cross all placement
decisions for the Placement.
type: string
reason:
description: Reason represents the reason why the ManagedCluster
is selected.
type: string
required:
- clusterName
- reason
type: object
type: array
required:
- decisions
type: object
type: object
served: true
storage: true
subresources:
status: {}
status:
acceptedNames:
kind: PlacementDecision
listKind: PlacementDecisionList
plural: placementdecisions
singular: placementdecision
conditions:
- lastTransitionTime: "2021-08-04T08:37:09Z"
message: no conflicts found
reason: NoConflicts
status: "True"
type: NamesAccepted
- lastTransitionTime: "2021-08-04T08:37:09Z"
message: the initial names have been accepted
reason: InitialNamesAccepted
status: "True"
type: Established
storedVersions:
- v1alpha1

View File

@@ -22,20 +22,20 @@ import (
"time"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/util/retry"
"github.com/crossplane/crossplane-runtime/pkg/event"
"github.com/crossplane/crossplane-runtime/pkg/meta"
"github.com/pkg/errors"
kerrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/client-go/util/retry"
"k8s.io/klog/v2"
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/controller"
"sigs.k8s.io/controller-runtime/pkg/reconcile"
"github.com/crossplane/crossplane-runtime/pkg/event"
"github.com/crossplane/crossplane-runtime/pkg/meta"
"github.com/pkg/errors"
"github.com/oam-dev/kubevela/apis/core.oam.dev/common"
"github.com/oam-dev/kubevela/apis/core.oam.dev/condition"
"github.com/oam-dev/kubevela/apis/core.oam.dev/v1beta1"
@@ -43,9 +43,9 @@ import (
"github.com/oam-dev/kubevela/pkg/appfile"
common2 "github.com/oam-dev/kubevela/pkg/controller/common"
core "github.com/oam-dev/kubevela/pkg/controller/core.oam.dev"
"github.com/oam-dev/kubevela/pkg/controller/core.oam.dev/v1alpha1/envbinding"
"github.com/oam-dev/kubevela/pkg/controller/core.oam.dev/v1alpha2/application/assemble"
"github.com/oam-dev/kubevela/pkg/cue/packages"
"github.com/oam-dev/kubevela/pkg/multicluster"
"github.com/oam-dev/kubevela/pkg/oam"
"github.com/oam-dev/kubevela/pkg/oam/discoverymapper"
oamutil "github.com/oam-dev/kubevela/pkg/oam/util"
@@ -204,7 +204,7 @@ func (r *Reconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Resu
if wfStatus != nil {
ref, err := handler.DispatchAndGC(ctx)
if err == nil {
err = envbinding.GarbageCollectionForOutdatedResourcesInSubClusters(ctx, r.Client, policies, func(c context.Context) error {
err = multicluster.GarbageCollectionForOutdatedResourcesInSubClusters(ctx, app, func(c context.Context) error {
_, e := handler.DispatchAndGC(c)
return e
})
@@ -333,6 +333,9 @@ func (r *Reconciler) handleFinalizers(ctx context.Context, app *v1beta1.Applicat
return true, errors.WithMessage(err, "cannot remove finalizer")
}
}
if err := multicluster.GarbageCollectionForAllResourceTrackersInSubCluster(ctx, r.Client, app); err != nil {
return true, err
}
meta.RemoveFinalizer(app, resourceTrackerFinalizer)
// legacyOnlyRevisionFinalizer will be deprecated in the future
// this is for backward compatibility

View File

@@ -35,6 +35,7 @@ import (
"github.com/oam-dev/kubevela/pkg/utils"
"github.com/oam-dev/kubevela/pkg/workflow/providers"
"github.com/oam-dev/kubevela/pkg/workflow/providers/kube"
multiclusterProvider "github.com/oam-dev/kubevela/pkg/workflow/providers/multicluster"
oamProvider "github.com/oam-dev/kubevela/pkg/workflow/providers/oam"
"github.com/oam-dev/kubevela/pkg/workflow/tasks"
wfTypes "github.com/oam-dev/kubevela/pkg/workflow/types"
@@ -52,6 +53,7 @@ func (h *AppHandler) GenerateApplicationSteps(ctx context.Context,
oamProvider.Install(handlerProviders, app, h.applyComponentFunc(
appParser, appRev, af), h.renderComponentFunc(appParser, appRev, af))
taskDiscover := tasks.NewTaskDiscover(handlerProviders, h.r.pd, h.r.Client, h.r.dm)
multiclusterProvider.Install(handlerProviders, h.r.Client, app)
var tasks []wfTypes.TaskRunner
for _, step := range af.WorkflowSteps {
options := &wfTypes.GeneratorOptions{

View File

@@ -24,6 +24,7 @@ import (
"strings"
"github.com/oam-dev/kubevela/pkg/cue/model"
"github.com/oam-dev/kubevela/pkg/multicluster"
"github.com/pkg/errors"
appsv1 "k8s.io/api/apps/v1"
@@ -859,7 +860,8 @@ func cleanUpWorkflowComponentRevision(ctx context.Context, h *AppHandler) error
ns := resource.Namespace
r := &unstructured.Unstructured{}
r.GetObjectKind().SetGroupVersionKind(resource.GroupVersionKind())
err := h.r.Get(ctx, ktypes.NamespacedName{Name: compName, Namespace: ns}, r)
_ctx := multicluster.ContextWithClusterName(ctx, resource.Cluster)
err := h.r.Get(_ctx, ktypes.NamespacedName{Name: compName, Namespace: ns}, r)
if err != nil {
return err
}
@@ -877,7 +879,8 @@ func cleanUpWorkflowComponentRevision(ctx context.Context, h *AppHandler) error
listOpts := []client.ListOption{client.MatchingLabels{
oam.LabelControllerRevisionComponent: curComp.Name,
}, client.InNamespace(h.app.Namespace)}
if err := h.r.List(ctx, crList, listOpts...); err != nil {
_ctx := multicluster.ContextWithClusterName(ctx, curComp.Cluster)
if err := h.r.List(_ctx, crList, listOpts...); err != nil {
return err
}
needKill := len(crList.Items) - h.r.appRevisionLimit - len(compRevisionInUse[curComp.Name])
@@ -893,7 +896,7 @@ func cleanUpWorkflowComponentRevision(ctx context.Context, h *AppHandler) error
if _, inUse := compRevisionInUse[curComp.Name][rev.Name]; inUse {
continue
}
if err := h.r.Delete(ctx, rev.DeepCopy()); err != nil && !apierrors.IsNotFound(err) {
if err := h.r.Delete(_ctx, rev.DeepCopy()); err != nil && !apierrors.IsNotFound(err) {
return err
}
needKill--

View File

@@ -39,17 +39,16 @@ import (
commonapis "github.com/oam-dev/kubevela/apis/core.oam.dev/common"
"github.com/oam-dev/kubevela/apis/core.oam.dev/condition"
"github.com/oam-dev/kubevela/apis/core.oam.dev/v1alpha1"
"github.com/oam-dev/kubevela/apis/core.oam.dev/v1alpha2"
"github.com/oam-dev/kubevela/apis/core.oam.dev/v1beta1"
af "github.com/oam-dev/kubevela/pkg/appfile"
"github.com/oam-dev/kubevela/pkg/controller/common"
controller "github.com/oam-dev/kubevela/pkg/controller/core.oam.dev"
"github.com/oam-dev/kubevela/pkg/controller/core.oam.dev/v1alpha1/envbinding"
"github.com/oam-dev/kubevela/pkg/cue/packages"
"github.com/oam-dev/kubevela/pkg/multicluster"
"github.com/oam-dev/kubevela/pkg/oam"
"github.com/oam-dev/kubevela/pkg/oam/discoverymapper"
"github.com/oam-dev/kubevela/pkg/policy/envbinding"
)
const (
@@ -462,60 +461,18 @@ func (r *Reconciler) patchHealthStatusToApplications(ctx context.Context, appHea
return nil
}
func (r *Reconciler) getEnvBinding(ctx context.Context, appName string, ns string) (*v1alpha1.EnvBinding, *v1beta1.Application, error) {
app := new(v1beta1.Application)
appKey := client.ObjectKey{Name: appName, Namespace: ns}
if err := r.client.Get(ctx, appKey, app); err != nil {
return nil, nil, err
}
var envBindingName string
for _, policy := range app.Spec.Policies {
if policy.Type == "env-binding" {
envBindingName = policy.Name
break
}
}
if len(envBindingName) == 0 {
return nil, app, nil
}
envBinding := new(v1alpha1.EnvBinding)
envBindingKey := client.ObjectKey{Name: envBindingName, Namespace: ns}
if err := r.client.Get(ctx, envBindingKey, envBinding); err != nil {
return nil, nil, err
}
if envBinding.Status.Phase != v1alpha1.EnvBindingFinished {
return nil, nil, errors.Errorf("policy env-binding was not ready")
}
return envBinding, app, nil
}
func (r *Reconciler) createAppfile(ctx context.Context, appName, ns, envName string) (*af.Appfile, error) {
appParser := af.NewApplicationParser(r.client, r.dm, r.pd)
if len(envName) != 0 {
envBinding, baseApp, err := r.getEnvBinding(ctx, appName, ns)
app := &v1beta1.Application{}
if err := r.client.Get(ctx, types.NamespacedName{Namespace: ns, Name: appName}, app); err != nil {
return nil, err
}
patchedApp, err := envbinding.PatchApplicationByEnvBindingEnv(app, "", envName)
if err != nil {
return nil, err
}
var targetEnvConfig *v1alpha1.EnvConfig
for i := range envBinding.Spec.Envs {
envConfig := envBinding.Spec.Envs[i]
if envConfig.Name == envName {
targetEnvConfig = &envConfig
break
}
}
if targetEnvConfig == nil {
return nil, errors.Errorf("policy env-binding doesn't contains env %s", envName)
}
envBindApp := envbinding.NewEnvBindApp(baseApp, targetEnvConfig)
if err = envBindApp.GenerateConfiguredApplication(); err != nil {
return nil, err
}
return appParser.GenerateAppFile(ctx, envBindApp.PatchedApp)
return appParser.GenerateAppFile(ctx, patchedApp)
}
app := &v1beta1.Application{}
@@ -571,20 +528,31 @@ func constructAppCompStatus(appC *AppHealthCondition, hsRef corev1.ObjectReferen
func (r *Reconciler) createWorkloadRefs(ctx context.Context, appRef v1alpha2.AppReference, ns string) []WorkloadReference {
wlRefs := make([]WorkloadReference, 0)
envBinding, application, err := r.getEnvBinding(ctx, appRef.AppName, ns)
if err != nil {
klog.ErrorS(err, "Failed to get envBinding")
application := &v1beta1.Application{}
if err := r.client.Get(ctx, types.NamespacedName{Namespace: ns, Name: appRef.AppName}, application); err != nil {
klog.ErrorS(err, "Failed to get application")
return wlRefs
}
var decisions []v1alpha1.ClusterDecision
decisionsMap := make(map[string]string)
if envBinding == nil {
decisions = make([]v1alpha1.ClusterDecision, 1)
} else {
decisions = envBinding.Status.ClusterDecisions
for _, decision := range decisions {
decisionsMap[decision.Cluster] = decision.Env
// ugly implementation, should be reworked in future
decisionsMap := map[string]string{}
var decisions []struct {
Cluster string
Env string
}
policyStatus, err := envbinding.GetEnvBindingPolicyStatus(application, "")
if err == nil && policyStatus != nil {
for _, env := range policyStatus.Envs {
for _, placement := range env.Placements {
decisionsMap[placement.Cluster] = env.Env
decisions = append(decisions, struct {
Cluster string
Env string
}{
Cluster: placement.Cluster,
Env: env.Env,
})
}
}
}

View File

@@ -21,7 +21,6 @@ import (
"github.com/oam-dev/kubevela/pkg/controller/common"
controller "github.com/oam-dev/kubevela/pkg/controller/core.oam.dev"
"github.com/oam-dev/kubevela/pkg/controller/core.oam.dev/v1alpha1/envbinding"
"github.com/oam-dev/kubevela/pkg/controller/core.oam.dev/v1alpha2/core/scopes/healthscope"
"github.com/oam-dev/kubevela/pkg/controller/core.oam.dev/v1alpha2/core/traits/manualscalertrait"
"github.com/oam-dev/kubevela/pkg/controller/standard.oam.dev/v1alpha1/rollout"
@@ -37,7 +36,6 @@ func Setup(mgr ctrl.Manager, disableCaps string, args controller.Args) error {
manualscalertrait.Setup,
healthscope.Setup,
rollout.Setup,
envbinding.Setup,
}
case common.DisableAllCaps:
default:
@@ -52,9 +50,6 @@ func Setup(mgr ctrl.Manager, disableCaps string, args controller.Args) error {
if !disableCapsSet.Contains(common.RolloutControllerName) {
functions = append(functions, rollout.Setup)
}
if !disableCapsSet.Contains(common.EnvBindingControllerName) {
functions = append(functions, envbinding.Setup)
}
}
for _, setup := range functions {

View File

@@ -14,52 +14,38 @@
limitations under the License.
*/
package envbinding
package multicluster
import (
"context"
"github.com/pkg/errors"
kerrors "k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/apimachinery/pkg/types"
"k8s.io/klog/v2"
"sigs.k8s.io/controller-runtime/pkg/client"
"github.com/oam-dev/kubevela/apis/core.oam.dev/v1alpha1"
"github.com/oam-dev/kubevela/apis/core.oam.dev/v1beta1"
"github.com/oam-dev/kubevela/pkg/multicluster"
"github.com/oam-dev/kubevela/pkg/oam"
"github.com/oam-dev/kubevela/pkg/oam/util"
errors2 "github.com/oam-dev/kubevela/pkg/utils/errors"
)
func isEnvBindingPolicy(policy *unstructured.Unstructured) bool {
policyKindAPIVersion := policy.GetKind() + "." + policy.GetAPIVersion()
return policyKindAPIVersion == v1alpha1.EnvBindingKindAPIVersion
func getAppliedClusters(app *v1beta1.Application) []string {
appliedClusters := map[string]bool{}
for _, v := range app.Status.AppliedResources {
appliedClusters[v.Cluster] = true
}
var clusters []string
for cluster := range appliedClusters {
clusters = append(clusters, cluster)
}
return clusters
}
// GarbageCollectionForOutdatedResourcesInSubClusters run garbage collection in sub clusters and remove outdated ResourceTrackers with their associated resources
func GarbageCollectionForOutdatedResourcesInSubClusters(ctx context.Context, c client.Client, policies []*unstructured.Unstructured, gcHandler func(context.Context) error) error {
subClusters := make(map[string]bool)
for _, raw := range policies {
if !isEnvBindingPolicy(raw) {
continue
}
policy := &v1alpha1.EnvBinding{}
if err := c.Get(ctx, types.NamespacedName{Namespace: raw.GetNamespace(), Name: raw.GetName()}, policy); err != nil {
klog.Infof("failed to run gc for envBinding subClusters: %v", err)
}
if policy.Status.ClusterDecisions == nil {
continue
}
for _, decision := range policy.Status.ClusterDecisions {
subClusters[decision.Cluster] = true
}
}
func GarbageCollectionForOutdatedResourcesInSubClusters(ctx context.Context, app *v1beta1.Application, gcHandler func(context.Context) error) error {
var errs errors2.ErrorList
for clusterName := range subClusters {
if err := gcHandler(multicluster.ContextWithClusterName(ctx, clusterName)); err != nil {
for _, clusterName := range getAppliedClusters(app) {
if err := gcHandler(ContextWithClusterName(ctx, clusterName)); err != nil {
if !errors.As(err, &errors2.ResourceTrackerNotExistError{}) {
errs.Append(errors.Wrapf(err, "failed to run gc in subCluster %s", clusterName))
}
@@ -72,23 +58,18 @@ func GarbageCollectionForOutdatedResourcesInSubClusters(ctx context.Context, c c
}
// GarbageCollectionForAllResourceTrackersInSubCluster run garbage collection in sub clusters and remove all ResourceTrackers for the EnvBinding
func GarbageCollectionForAllResourceTrackersInSubCluster(ctx context.Context, c client.Client, envBinding *v1alpha1.EnvBinding) error {
baseApp, err := util.RawExtension2Application(envBinding.Spec.AppTemplate.RawExtension)
if err != nil {
klog.ErrorS(err, "failed to parse AppTemplate of EnvBinding")
return errors.WithMessage(err, "cannot remove finalizer")
}
func GarbageCollectionForAllResourceTrackersInSubCluster(ctx context.Context, c client.Client, app *v1beta1.Application) error {
// delete subCluster resourceTracker
for _, decision := range envBinding.Status.ClusterDecisions {
subCtx := multicluster.ContextWithClusterName(ctx, decision.Cluster)
for _, cluster := range getAppliedClusters(app) {
subCtx := ContextWithClusterName(ctx, cluster)
listOpts := []client.ListOption{
client.MatchingLabels{
oam.LabelAppName: baseApp.Name,
oam.LabelAppNamespace: baseApp.Namespace,
oam.LabelAppName: app.Name,
oam.LabelAppNamespace: app.Namespace,
}}
rtList := &v1beta1.ResourceTrackerList{}
if err := c.List(subCtx, rtList, listOpts...); err != nil {
klog.ErrorS(err, "failed to list resource tracker of app", "name", baseApp.Name, "env", decision.Env)
klog.ErrorS(err, "failed to list resource tracker of app", "name", app.Name, "cluster", cluster)
return errors.WithMessage(err, "cannot remove finalizer")
}
for _, rt := range rtList.Items {

View File

@@ -0,0 +1,178 @@
/*
Copyright 2021 The KubeVela Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package envbinding
import (
"encoding/json"
"fmt"
"github.com/imdario/mergo"
"github.com/pkg/errors"
"k8s.io/apimachinery/pkg/runtime"
"github.com/oam-dev/kubevela/apis/core.oam.dev/common"
"github.com/oam-dev/kubevela/apis/core.oam.dev/v1alpha1"
"github.com/oam-dev/kubevela/apis/core.oam.dev/v1beta1"
"github.com/oam-dev/kubevela/pkg/oam/util"
errors2 "github.com/oam-dev/kubevela/pkg/utils/errors"
)
// MergeRawExtension merge two raw extension
func MergeRawExtension(base *runtime.RawExtension, patch *runtime.RawExtension) (*runtime.RawExtension, error) {
patchParameter, err := util.RawExtension2Map(patch)
if err != nil {
return nil, errors.Wrapf(err, "failed to convert patch parameters to map")
}
baseParameter, err := util.RawExtension2Map(base)
if err != nil {
return nil, errors.Wrapf(err, "failed to convert base parameters to map")
}
if baseParameter == nil {
baseParameter = make(map[string]interface{})
}
err = mergo.Merge(&baseParameter, patchParameter, mergo.WithOverride)
if err != nil {
return nil, errors.Wrapf(err, "failed to do merge with override")
}
bs, err := json.Marshal(baseParameter)
if err != nil {
return nil, errors.Wrapf(err, "failed to marshal merged properties")
}
return &runtime.RawExtension{Raw: bs}, nil
}
// MergeComponent merge two component, it will first merge their properties and then merge their traits
func MergeComponent(base *common.ApplicationComponent, patch *common.ApplicationComponent) (*common.ApplicationComponent, error) {
newComponent := base.DeepCopy()
var err error
// merge component properties
newComponent.Properties, err = MergeRawExtension(base.Properties, patch.Properties)
if err != nil {
return nil, errors.Wrapf(err, "failed to merge component properties")
}
// prepare traits
traitMaps := map[string]*common.ApplicationTrait{}
var traitOrders []string
for _, trait := range base.Traits {
traitMaps[trait.Type] = trait.DeepCopy()
traitOrders = append(traitOrders, trait.Type)
}
// patch traits
var errs errors2.ErrorList
for _, trait := range patch.Traits {
if baseTrait, exists := traitMaps[trait.Type]; exists {
baseTrait.Properties, err = MergeRawExtension(baseTrait.Properties, trait.Properties)
if err != nil {
errs.Append(errors.Wrapf(err, "failed to merge trait %s", trait.Type))
}
} else {
traitMaps[trait.Type] = trait.DeepCopy()
traitOrders = append(traitOrders, trait.Type)
}
}
if errs.HasError() {
return nil, errors.Wrapf(err, "failed to merge component traits")
}
// fill in traits
newComponent.Traits = []common.ApplicationTrait{}
for _, traitType := range traitOrders {
newComponent.Traits = append(newComponent.Traits, *traitMaps[traitType])
}
return newComponent, nil
}
func filterComponents(components []string, selector *v1alpha1.EnvSelector) []string {
if selector != nil && len(selector.Components) > 0 {
filter := map[string]bool{}
for _, compName := range selector.Components {
filter[compName] = true
}
var _comps []string
for _, compName := range components {
if _, ok := filter[compName]; ok {
_comps = append(_comps, compName)
}
}
return _comps
}
return components
}
// PatchApplication patch base application with patch and selector
func PatchApplication(base *v1beta1.Application, patch *v1alpha1.EnvPatch, selector *v1alpha1.EnvSelector) (*v1beta1.Application, error) {
newApp := base.DeepCopy()
// init components
compMaps := map[string]*common.ApplicationComponent{}
var compOrders []string
for _, comp := range base.Spec.Components {
compMaps[comp.Name] = comp.DeepCopy()
compOrders = append(compOrders, comp.Name)
}
// patch components
var errs errors2.ErrorList
var err error
for _, comp := range patch.Components {
if baseComp, exists := compMaps[comp.Name]; exists {
if baseComp.Type != comp.Type {
compMaps[comp.Name] = comp.DeepCopy()
} else {
compMaps[comp.Name], err = MergeComponent(baseComp, comp.DeepCopy())
if err != nil {
errs.Append(errors.Wrapf(err, "failed to merge component %s", comp.Name))
}
}
} else {
compMaps[comp.Name] = comp.DeepCopy()
compOrders = append(compOrders, comp.Name)
}
}
if errs.HasError() {
return nil, errors.Wrapf(err, "failed to merge application components")
}
newApp.Spec.Components = []common.ApplicationComponent{}
// if selector is enabled, filter
compOrders = filterComponents(compOrders, selector)
// fill in new application
for _, compName := range compOrders {
newApp.Spec.Components = append(newApp.Spec.Components, *compMaps[compName])
}
return newApp, nil
}
// PatchApplicationByEnvBindingEnv get patched application directly through policyName and envName
func PatchApplicationByEnvBindingEnv(app *v1beta1.Application, policyName string, envName string) (*v1beta1.Application, error) {
policy, err := GetEnvBindingPolicy(app, policyName)
if err != nil {
return nil, err
}
if policy != nil {
for _, env := range policy.Envs {
if env.Name == envName {
return PatchApplication(app, &env.Patch, env.Selector)
}
}
}
return nil, fmt.Errorf("target env %s in policy %s not found", envName, policyName)
}

View File

@@ -31,27 +31,26 @@ import (
func Test_EnvBindApp_GenerateConfiguredApplication(t *testing.T) {
testcases := []struct {
baseApp *v1beta1.Application
envConfig *v1alpha1.EnvConfig
envName string
envPatch v1alpha1.EnvPatch
expectedApp *v1beta1.Application
}{{
baseApp: baseApp,
envConfig: &v1alpha1.EnvConfig{
Name: "prod",
Patch: v1alpha1.EnvPatch{
Components: []common.ApplicationComponent{{
Name: "express-server",
Type: "webservice",
envName: "prod",
envPatch: v1alpha1.EnvPatch{
Components: []common.ApplicationComponent{{
Name: "express-server",
Type: "webservice",
Properties: util.Object2RawExtension(map[string]interface{}{
"image": "busybox",
}),
Traits: []common.ApplicationTrait{{
Type: "ingress-1-20",
Properties: util.Object2RawExtension(map[string]interface{}{
"image": "busybox",
"domain": "newTestsvc.example.com",
}),
Traits: []common.ApplicationTrait{{
Type: "ingress-1-20",
Properties: util.Object2RawExtension(map[string]interface{}{
"domain": "newTestsvc.example.com",
}),
}},
}},
},
}},
},
expectedApp: &v1beta1.Application{
TypeMeta: metav1.TypeMeta{
@@ -83,33 +82,31 @@ func Test_EnvBindApp_GenerateConfiguredApplication(t *testing.T) {
},
}, {
baseApp: baseApp,
envConfig: &v1alpha1.EnvConfig{
Name: "prod",
Patch: v1alpha1.EnvPatch{
Components: []common.ApplicationComponent{{
Name: "express-server",
Type: "webservice",
Traits: []common.ApplicationTrait{{
Type: "labels",
Properties: util.Object2RawExtension(map[string]interface{}{
"test": "label",
}),
}},
}, {
Name: "new-server",
Type: "worker",
envName: "prod",
envPatch: v1alpha1.EnvPatch{
Components: []common.ApplicationComponent{{
Name: "express-server",
Type: "webservice",
Traits: []common.ApplicationTrait{{
Type: "labels",
Properties: util.Object2RawExtension(map[string]interface{}{
"image": "busybox",
"cmd": []string{"sleep", "1000"},
"test": "label",
}),
Traits: []common.ApplicationTrait{{
Type: "labels",
Properties: util.Object2RawExtension(map[string]interface{}{
"test": "label",
}),
}},
}},
},
}, {
Name: "new-server",
Type: "worker",
Properties: util.Object2RawExtension(map[string]interface{}{
"image": "busybox",
"cmd": []string{"sleep", "1000"},
}),
Traits: []common.ApplicationTrait{{
Type: "labels",
Properties: util.Object2RawExtension(map[string]interface{}{
"test": "label",
}),
}},
}},
},
expectedApp: &v1beta1.Application{
TypeMeta: metav1.TypeMeta{
@@ -160,10 +157,9 @@ func Test_EnvBindApp_GenerateConfiguredApplication(t *testing.T) {
}}
for _, testcase := range testcases {
envBindApp := NewEnvBindApp(testcase.baseApp, testcase.envConfig)
err := envBindApp.GenerateConfiguredApplication()
app, err := PatchApplication(testcase.baseApp, &testcase.envPatch, nil)
assert.NoError(t, err)
assert.Equal(t, envBindApp.PatchedApp, testcase.expectedApp)
assert.Equal(t, app, testcase.expectedApp)
}
}

View File

@@ -0,0 +1,94 @@
/*
Copyright 2021 The KubeVela Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package envbinding
import (
"encoding/json"
"k8s.io/apimachinery/pkg/runtime"
"github.com/oam-dev/kubevela/apis/core.oam.dev/common"
"github.com/oam-dev/kubevela/apis/core.oam.dev/v1alpha1"
"github.com/oam-dev/kubevela/apis/core.oam.dev/v1beta1"
)
// ReadPlacementDecisions read placement decisions from application status, return (decisions, if decision is made, error)
func ReadPlacementDecisions(app *v1beta1.Application, policyName string, envName string) ([]v1alpha1.PlacementDecision, bool, error) {
envBindingStatus, err := GetEnvBindingPolicyStatus(app, policyName)
if err != nil || envBindingStatus == nil {
return nil, false, err
}
for _, envStatus := range envBindingStatus.Envs {
if envStatus.Env == envName {
return envStatus.Placements, true, nil
}
}
return nil, false, nil
}
// WritePlacementDecisions write placement decisions into application status
func WritePlacementDecisions(app *v1beta1.Application, policyName string, envName string, decisions []v1alpha1.PlacementDecision) error {
statusExists := false
for idx, policyStatus := range app.Status.PolicyStatus {
if policyStatus.Name == policyName && policyStatus.Type == v1alpha1.EnvBindingPolicyType {
envBindingStatus := &v1alpha1.EnvBindingStatus{}
err := json.Unmarshal(policyStatus.Status.Raw, envBindingStatus)
if err != nil {
return err
}
insert := true
for _idx, envStatus := range envBindingStatus.Envs {
if envStatus.Env == envName {
// TODO gc
envBindingStatus.Envs[_idx].Placements = decisions
insert = false
break
}
}
if insert {
envBindingStatus.Envs = append(envBindingStatus.Envs, v1alpha1.EnvStatus{
Env: envName,
Placements: decisions,
})
}
bs, err := json.Marshal(envBindingStatus)
if err != nil {
return err
}
app.Status.PolicyStatus[idx].Status = &runtime.RawExtension{Raw: bs}
statusExists = true
break
}
}
if !statusExists {
bs, err := json.Marshal(&v1alpha1.EnvBindingStatus{
Envs: []v1alpha1.EnvStatus{{
Env: envName,
Placements: decisions,
}},
})
if err != nil {
return err
}
app.Status.PolicyStatus = append(app.Status.PolicyStatus, common.PolicyStatus{
Name: policyName,
Type: v1alpha1.EnvBindingPolicyType,
Status: &runtime.RawExtension{Raw: bs},
})
}
return nil
}

View File

@@ -0,0 +1,95 @@
/*
Copyright 2021 The KubeVela Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package envbinding
import (
"encoding/json"
"testing"
"github.com/stretchr/testify/require"
"k8s.io/apimachinery/pkg/runtime"
"github.com/oam-dev/kubevela/apis/core.oam.dev/common"
"github.com/oam-dev/kubevela/apis/core.oam.dev/v1alpha1"
"github.com/oam-dev/kubevela/apis/core.oam.dev/v1beta1"
)
func TestReadPlacementDecisions(t *testing.T) {
pld := []v1alpha1.PlacementDecision{{
Cluster: "example-cluster",
Namespace: "example-namespace",
}}
testCases := []struct {
Status *v1alpha1.EnvBindingStatus
StatusRaw []byte
ExpectedExists bool
ExpectedHasError bool
}{{
Status: nil,
StatusRaw: []byte(`bad value`),
ExpectedExists: false,
ExpectedHasError: true,
}, {
Status: &v1alpha1.EnvBindingStatus{
Envs: []v1alpha1.EnvStatus{{
Env: "example-env",
Placements: pld,
}},
},
ExpectedExists: true,
ExpectedHasError: false,
}, {
Status: &v1alpha1.EnvBindingStatus{
Envs: []v1alpha1.EnvStatus{{
Env: "bad-env",
Placements: pld,
}},
},
ExpectedExists: false,
ExpectedHasError: false,
}}
r := require.New(t)
for _, testCase := range testCases {
app := &v1beta1.Application{}
_status := common.PolicyStatus{
Name: "example-policy",
Type: v1alpha1.EnvBindingPolicyType,
}
if testCase.Status == nil {
_status.Status = &runtime.RawExtension{Raw: testCase.StatusRaw}
} else {
bs, err := json.Marshal(testCase.Status)
r.NoError(err)
_status.Status = &runtime.RawExtension{Raw: bs}
}
app.Status.PolicyStatus = []common.PolicyStatus{_status}
pds, exists, err := ReadPlacementDecisions(app, "", "example-env")
r.Equal(testCase.ExpectedExists, exists)
if testCase.ExpectedHasError {
r.Error(err)
continue
}
r.NoError(err)
if exists {
r.Equal(len(pld), len(pds))
for idx := range pld {
r.Equal(pld[idx].Cluster, pds[idx].Cluster)
r.Equal(pld[idx].Namespace, pds[idx].Namespace)
}
}
}
}

View File

@@ -0,0 +1,51 @@
/*
Copyright 2021 The KubeVela Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package envbinding
import (
"encoding/json"
"github.com/oam-dev/kubevela/apis/core.oam.dev/v1alpha1"
"github.com/oam-dev/kubevela/apis/core.oam.dev/v1beta1"
)
// GetEnvBindingPolicy extract env-binding policy with given policy name, if policy name is empty, the first env-binding policy will be used
func GetEnvBindingPolicy(app *v1beta1.Application, policyName string) (*v1alpha1.EnvBindingSpec, error) {
for _, policy := range app.Spec.Policies {
if (policy.Name == policyName || policyName == "") && policy.Type == v1alpha1.EnvBindingPolicyType {
envBindingSpec := &v1alpha1.EnvBindingSpec{}
err := json.Unmarshal(policy.Properties.Raw, envBindingSpec)
return envBindingSpec, err
}
}
return nil, nil
}
// GetEnvBindingPolicyStatus extract env-binding policy status with given policy name, if policy name is empty, the first env-binding policy will be used
func GetEnvBindingPolicyStatus(app *v1beta1.Application, policyName string) (*v1alpha1.EnvBindingStatus, error) {
for _, policyStatus := range app.Status.PolicyStatus {
if (policyStatus.Name == policyName || policyName == "") && policyStatus.Type == v1alpha1.EnvBindingPolicyType {
envBindingStatus := &v1alpha1.EnvBindingStatus{}
if policyStatus.Status != nil {
err := json.Unmarshal(policyStatus.Status.Raw, envBindingStatus)
return envBindingStatus, err
}
return nil, nil
}
}
return nil, nil
}

View File

@@ -1,5 +1,4 @@
import (
"encoding/yaml"
"encoding/json"
"encoding/base64"
"strings"
@@ -106,57 +105,7 @@ import (
}
}
#ApplyEnvBindApp: #Steps & {
env: string
policy: string
app: string
namespace: string
_namespace: namespace
envBinding: kube.#Read & {
value: {
apiVersion: "core.oam.dev/v1alpha1"
kind: "EnvBinding"
metadata: {
name: policy
namespace: _namespace
}
}
} @step(1)
// wait until envBinding.value.status equal "finished"
wait: #ConditionalWait & {
continue: envBinding.value.status.phase == "finished"
} @step(2)
configMap: kube.#Read & {
value: {
apiVersion: "v1"
kind: "ConfigMap"
metadata: {
name: policy
namespace: _namespace
}
data?: _
}
} @step(3)
patchedApp: yaml.Unmarshal(configMap.value.data["\(env)"])[context.name]
components: patchedApp.spec.components
apply: #Steps & {
for key, comp in components {
"\(key)": #ApplyComponent & {
value: comp
if patchedApp.metadata.labels != _|_ && patchedApp.metadata.labels["cluster.oam.dev/clusterName"] != _|_ {
cluster: patchedApp.metadata.labels["cluster.oam.dev/clusterName"]
}
if patchedApp.metadata.labels != _|_ && patchedApp.metadata.labels["envbinding.oam.dev/override-namespace"] != _|_ {
namespace: patchedApp.metadata.labels["envbinding.oam.dev/override-namespace"]
}
} @step(4)
}
}
}
#ApplyEnvBindApp: multicluster.#ApplyEnvBindApp
#HTTPGet: http.#Do & {method: "GET"}

View File

@@ -0,0 +1,124 @@
#Placement: {
clusterSelector?: {
labels?: [string]: string
name?: string
}
namespaceSelector?: {
labels?: [string]: string
name?: string
}
}
#PlacementDecision: {
namespace?: string
cluster?: string
}
#Component: {
name: string
type: string
properties?: {...}
traits?: [...{
type: string
properties: {...}
}]
}
#ReadPlacementDecisions: {
#provider: "multicluster"
#do: "read-placement-decisions"
inputs: {
policy: string
envName: string
}
outputs: {
decisions?: [...#PlacementDecision]
}
}
#MakePlacementDecisions: {
#provider: "multicluster"
#do: "make-placement-decisions"
inputs: {
policyName: string
envName: string
placement: #Placement
}
outputs: {
decisions: [...#PlacementDecision]
}
}
#PatchApplication: {
#provider: "multicluster"
#do: "patch-application"
inputs: {
envName: string
patch?: components: [...#Component]
selector?: components: [...string]
}
outputs: {...}
...
}
#ApplyEnvBindApp: {
#do: "steps"
env: string
policy: string
app: string
namespace: string
loadPolicies: oam.#LoadPolicies @step(1)
loadPolicy: loadPolicies.value["\(policy)"]
envMap: {
for ev in loadPolicy.properties.envs {
"\(ev.name)": ev
}
...
}
envConfig: envMap["\(env)"]
placementDecisions: multicluster.#MakePlacementDecisions & {
inputs: {
policyName: policy
envName: env
placement: envConfig.placement
}
} @step(2)
patchedApp: multicluster.#PatchApplication & {
inputs: {
envName: env
if envConfig.selector != _|_ {
selector: envConfig.selector
}
if envConfig.patch != _|_ {
patch: envConfig.patch
}
}
} @step(3)
components: patchedApp.outputs.spec.components
apply: #Steps & {
for decision in placementDecisions.outputs.decisions {
for key, comp in components {
"\(decision.cluster)-\(decision.namespace)-\(key)": #ApplyComponent & {
value: comp
if decision.cluster != _|_ {
cluster: decision.cluster
}
if decision.namespace != _|_ {
namespace: decision.namespace
}
} @step(4)
}
}
}
}

View File

@@ -23,3 +23,10 @@
#do: "load"
...
}
#LoadPolicies: {
#provider: "oam"
#do: "load-policies"
value?: {...}
...
}

View File

@@ -0,0 +1,160 @@
/*
Copyright 2021 The KubeVela Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package multicluster
import (
"context"
"github.com/pkg/errors"
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/types"
"sigs.k8s.io/controller-runtime/pkg/client"
"github.com/oam-dev/kubevela/apis/core.oam.dev/v1alpha1"
"github.com/oam-dev/kubevela/apis/core.oam.dev/v1beta1"
"github.com/oam-dev/kubevela/pkg/cue/model/value"
"github.com/oam-dev/kubevela/pkg/multicluster"
"github.com/oam-dev/kubevela/pkg/policy/envbinding"
wfContext "github.com/oam-dev/kubevela/pkg/workflow/context"
"github.com/oam-dev/kubevela/pkg/workflow/providers"
wfTypes "github.com/oam-dev/kubevela/pkg/workflow/types"
)
const (
// ProviderName is provider name for install.
ProviderName = "multicluster"
)
type provider struct {
client.Client
app *v1beta1.Application
}
func (p *provider) ReadPlacementDecisions(ctx wfContext.Context, v *value.Value, act wfTypes.Action) error {
policy, err := v.GetString("inputs", "policyName")
if err != nil {
return err
}
env, err := v.GetString("inputs", "envName")
if err != nil {
return err
}
decisions, exists, err := envbinding.ReadPlacementDecisions(p.app, policy, env)
if err != nil {
return err
}
if exists {
return v.FillObject(map[string]interface{}{"decisions": decisions}, "outputs")
}
return v.FillObject(map[string]interface{}{}, "outputs")
}
func (p *provider) MakePlacementDecisions(ctx wfContext.Context, v *value.Value, act wfTypes.Action) error {
policy, err := v.GetString("inputs", "policyName")
if err != nil {
return err
}
env, err := v.GetString("inputs", "envName")
if err != nil {
return err
}
val, err := v.LookupValue("inputs", "placement")
if err != nil {
return err
}
// TODO detect env change
placement := &v1alpha1.EnvPlacement{}
if err = val.UnmarshalTo(placement); err != nil {
return errors.Wrapf(err, "failed to parse placement while making placement decision")
}
var namespace, clusterName string
// check if namespace selector is valid
if placement.NamespaceSelector != nil {
if len(placement.NamespaceSelector.Labels) != 0 {
return errors.Errorf("invalid env %s: namespace selector in cluster-gateway does not support label selector for now", env)
}
namespace = placement.NamespaceSelector.Name
}
// check if cluster selector is valid
if placement.ClusterSelector != nil {
if len(placement.ClusterSelector.Labels) != 0 {
return errors.Errorf("invalid env %s: cluster selector does not support label selector for now", env)
}
clusterName = placement.ClusterSelector.Name
}
// set fallback cluster
if clusterName == "" {
clusterName = multicluster.ClusterLocalName
}
// check if target cluster exists
if clusterName != multicluster.ClusterLocalName {
if err = p.Get(context.Background(), types.NamespacedName{Namespace: multicluster.ClusterGatewaySecretNamespace, Name: clusterName}, &v1.Secret{}); err != nil {
return errors.Wrapf(err, "failed to get cluster %s for env %s", clusterName, env)
}
}
// write result back
decisions := []v1alpha1.PlacementDecision{{
Cluster: clusterName,
Namespace: namespace,
}}
if err = envbinding.WritePlacementDecisions(p.app, policy, env, decisions); err != nil {
return err
}
return v.FillObject(map[string]interface{}{"decisions": decisions}, "outputs")
}
func (p *provider) PatchApplication(ctx wfContext.Context, v *value.Value, act wfTypes.Action) error {
env, err := v.GetString("inputs", "envName")
if err != nil {
return err
}
patch := v1alpha1.EnvPatch{}
selector := &v1alpha1.EnvSelector{}
obj, err := v.LookupValue("inputs", "patch")
if err == nil {
if err = obj.UnmarshalTo(&patch); err != nil {
return errors.Wrapf(err, "failed to unmarshal patch for env %s", env)
}
}
obj, err = v.LookupValue("inputs", "selector")
if err == nil {
if err = obj.UnmarshalTo(selector); err != nil {
return errors.Wrapf(err, "failed to unmarshal selector for env %s", env)
}
} else {
selector = nil
}
newApp, err := envbinding.PatchApplication(p.app, &patch, selector)
if err != nil {
return errors.Wrapf(err, "failed to patch app for env %s", env)
}
return v.FillObject(newApp, "outputs")
}
// Install register handlers to provider discover.
func Install(p providers.Providers, c client.Client, app *v1beta1.Application) {
prd := &provider{Client: c, app: app}
p.Register(ProviderName, map[string]providers.Handler{
"read-placement-decisions": prd.ReadPlacementDecisions,
"make-placement-decisions": prd.MakePlacementDecisions,
"patch-application": prd.PatchApplication,
})
}

View File

@@ -0,0 +1,479 @@
/*
Copyright 2021 The KubeVela Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package multicluster
import (
"context"
"encoding/json"
"testing"
"github.com/stretchr/testify/require"
v1 "k8s.io/api/core/v1"
v12 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"sigs.k8s.io/controller-runtime/pkg/client/fake"
common2 "github.com/oam-dev/kubevela/apis/core.oam.dev/common"
"github.com/oam-dev/kubevela/apis/core.oam.dev/v1alpha1"
"github.com/oam-dev/kubevela/apis/core.oam.dev/v1beta1"
"github.com/oam-dev/kubevela/apis/types"
"github.com/oam-dev/kubevela/pkg/cue/model/value"
"github.com/oam-dev/kubevela/pkg/multicluster"
"github.com/oam-dev/kubevela/pkg/utils/common"
"github.com/oam-dev/kubevela/pkg/workflow/providers/mock"
)
func TestReadPlacementDecisions(t *testing.T) {
testCases := []struct {
InputVal map[string]interface{}
OldCluster string
OldNamespace string
ExpectError string
ExpectDecisionExists bool
ExpectCluster string
ExpectNamespace string
}{{
InputVal: map[string]interface{}{},
ExpectError: "var(path=inputs.policyName) not exist",
}, {
InputVal: map[string]interface{}{
"policyName": "example-policy",
},
ExpectError: "var(path=inputs.envName) not exist",
}, {
InputVal: map[string]interface{}{
"policyName": "example-policy",
"envName": "example-env",
},
ExpectError: "",
ExpectDecisionExists: false,
}, {
InputVal: map[string]interface{}{
"policyName": "example-policy",
"envName": "example-env",
},
OldCluster: "example-cluster",
OldNamespace: "example-namespace",
ExpectError: "",
ExpectDecisionExists: true,
ExpectCluster: "example-cluster",
ExpectNamespace: "example-namespace",
}}
r := require.New(t)
for _, testCase := range testCases {
cli := fake.NewClientBuilder().WithScheme(common.Scheme).Build()
app := &v1beta1.Application{}
p := &provider{
Client: cli,
app: app,
}
act := &mock.Action{}
v, err := value.NewValue("", nil, "")
r.NoError(err)
r.NoError(v.FillObject(testCase.InputVal, "inputs"))
if testCase.ExpectCluster != "" || testCase.ExpectNamespace != "" {
pd := v1alpha1.PlacementDecision{
Cluster: testCase.OldCluster,
Namespace: testCase.OldNamespace,
}
bs, err := json.Marshal(&v1alpha1.EnvBindingStatus{
Envs: []v1alpha1.EnvStatus{{
Env: "example-env",
Placements: []v1alpha1.PlacementDecision{pd},
}},
})
r.NoError(err)
app.Status.PolicyStatus = []common2.PolicyStatus{{
Name: "example-policy",
Type: v1alpha1.EnvBindingPolicyType,
Status: &runtime.RawExtension{Raw: bs},
}}
}
err = p.ReadPlacementDecisions(nil, v, act)
if testCase.ExpectError == "" {
r.NoError(err)
} else {
r.Contains(err.Error(), testCase.ExpectError)
continue
}
outputs, err := v.LookupValue("outputs")
r.NoError(err)
md := map[string][]v1alpha1.PlacementDecision{}
r.NoError(outputs.UnmarshalTo(&md))
if !testCase.ExpectDecisionExists {
r.Equal(0, len(md))
} else {
r.Equal(1, len(md["decisions"]))
r.Equal(testCase.ExpectCluster, md["decisions"][0].Cluster)
r.Equal(testCase.ExpectNamespace, md["decisions"][0].Namespace)
}
}
}
func TestMakePlacementDecisions(t *testing.T) {
multicluster.ClusterGatewaySecretNamespace = types.DefaultKubeVelaNS
testCases := []struct {
InputVal map[string]interface{}
OldCluster string
OldNamespace string
ExpectError string
ExpectCluster string
ExpectNamespace string
PreAddCluster string
}{{
InputVal: map[string]interface{}{},
ExpectError: "var(path=inputs.policyName) not exist",
}, {
InputVal: map[string]interface{}{
"policyName": "example-policy",
},
ExpectError: "var(path=inputs.envName) not exist",
}, {
InputVal: map[string]interface{}{
"policyName": "example-policy",
"envName": "example-env",
},
ExpectError: "var(path=inputs.placement) not exist",
}, {
InputVal: map[string]interface{}{
"policyName": "example-policy",
"envName": "example-env",
"placement": "example-placement",
},
ExpectError: "failed to parse placement while making placement decision",
}, {
InputVal: map[string]interface{}{
"policyName": "example-policy",
"envName": "example-env",
"placement": map[string]interface{}{
"namespaceSelector": map[string]interface{}{
"labels": map[string]string{"key": "value"},
},
},
},
ExpectError: "namespace selector in cluster-gateway does not support label selector for now",
}, {
InputVal: map[string]interface{}{
"policyName": "example-policy",
"envName": "example-env",
"placement": map[string]interface{}{
"clusterSelector": map[string]interface{}{
"labels": map[string]string{"key": "value"},
},
},
},
ExpectError: "cluster selector does not support label selector for now",
}, {
InputVal: map[string]interface{}{
"policyName": "example-policy",
"envName": "example-env",
"placement": map[string]interface{}{},
},
ExpectError: "",
ExpectCluster: "local",
ExpectNamespace: "",
}, {
InputVal: map[string]interface{}{
"policyName": "example-policy",
"envName": "example-env",
"placement": map[string]interface{}{
"clusterSelector": map[string]interface{}{
"name": "example-cluster",
},
"namespaceSelector": map[string]interface{}{
"name": "example-namespace",
},
},
},
ExpectError: "failed to get cluster",
}, {
InputVal: map[string]interface{}{
"policyName": "example-policy",
"envName": "example-env",
"placement": map[string]interface{}{
"clusterSelector": map[string]interface{}{
"name": "example-cluster",
},
"namespaceSelector": map[string]interface{}{
"name": "example-namespace",
},
},
},
ExpectError: "",
ExpectCluster: "example-cluster",
ExpectNamespace: "example-namespace",
PreAddCluster: "example-cluster",
}, {
InputVal: map[string]interface{}{
"policyName": "example-policy",
"envName": "example-env",
"placement": map[string]interface{}{
"clusterSelector": map[string]interface{}{
"name": "example-cluster",
},
"namespaceSelector": map[string]interface{}{
"name": "example-namespace",
},
},
},
OldCluster: "old-cluster",
OldNamespace: "old-namespace",
ExpectError: "",
ExpectCluster: "example-cluster",
ExpectNamespace: "example-namespace",
PreAddCluster: "example-cluster",
}, {
InputVal: map[string]interface{}{
"policyName": "example-policy",
"envName": "example-env",
"placement": map[string]interface{}{
"clusterSelector": map[string]interface{}{
"name": "example-cluster",
},
"namespaceSelector": map[string]interface{}{
"name": "example-namespace",
},
},
},
ExpectError: "",
ExpectCluster: "example-cluster",
ExpectNamespace: "example-namespace",
PreAddCluster: "example-cluster",
}}
r := require.New(t)
for _, testCase := range testCases {
cli := fake.NewClientBuilder().WithScheme(common.Scheme).Build()
app := &v1beta1.Application{}
p := &provider{
Client: cli,
app: app,
}
act := &mock.Action{}
v, err := value.NewValue("", nil, "")
r.NoError(err)
r.NoError(v.FillObject(testCase.InputVal, "inputs"))
if testCase.PreAddCluster != "" {
r.NoError(cli.Create(context.Background(), &v1.Secret{
ObjectMeta: v12.ObjectMeta{
Namespace: multicluster.ClusterGatewaySecretNamespace,
Name: testCase.PreAddCluster,
},
}))
}
if testCase.OldNamespace != "" || testCase.OldCluster != "" {
pd := v1alpha1.PlacementDecision{
Cluster: testCase.OldNamespace,
Namespace: testCase.OldCluster,
}
bs, err := json.Marshal(&v1alpha1.EnvBindingStatus{
Envs: []v1alpha1.EnvStatus{{
Env: "example-env",
Placements: []v1alpha1.PlacementDecision{pd},
}},
})
r.NoError(err)
app.Status.PolicyStatus = []common2.PolicyStatus{{
Name: "example-policy",
Type: v1alpha1.EnvBindingPolicyType,
Status: &runtime.RawExtension{Raw: bs},
}}
}
err = p.MakePlacementDecisions(nil, v, act)
if testCase.ExpectError == "" {
r.NoError(err)
} else {
r.Contains(err.Error(), testCase.ExpectError)
continue
}
outputs, err := v.LookupValue("outputs")
r.NoError(err)
md := map[string][]v1alpha1.PlacementDecision{}
r.NoError(outputs.UnmarshalTo(&md))
r.Equal(1, len(md["decisions"]))
r.Equal(testCase.ExpectCluster, md["decisions"][0].Cluster)
r.Equal(testCase.ExpectNamespace, md["decisions"][0].Namespace)
r.Equal(1, len(app.Status.PolicyStatus))
r.Equal(testCase.InputVal["policyName"], app.Status.PolicyStatus[0].Name)
r.Equal(v1alpha1.EnvBindingPolicyType, app.Status.PolicyStatus[0].Type)
status := &v1alpha1.EnvBindingStatus{}
r.NoError(json.Unmarshal(app.Status.PolicyStatus[0].Status.Raw, status))
r.Equal(1, len(status.Envs))
r.Equal(testCase.InputVal["envName"], status.Envs[0].Env)
r.Equal(1, len(status.Envs[0].Placements))
r.Equal(testCase.ExpectNamespace, status.Envs[0].Placements[0].Namespace)
r.Equal(testCase.ExpectCluster, status.Envs[0].Placements[0].Cluster)
}
}
func TestPatchApplication(t *testing.T) {
baseApp := &v1beta1.Application{Spec: v1beta1.ApplicationSpec{
Components: []common2.ApplicationComponent{{
Name: "comp-1",
Type: "webservice",
Properties: &runtime.RawExtension{Raw: []byte(`{"image":"base"}`)},
}, {
Name: "comp-3",
Type: "webservice",
Properties: &runtime.RawExtension{Raw: []byte(`{"image":"ext"}`)},
Traits: []common2.ApplicationTrait{{
Type: "scaler",
Properties: &runtime.RawExtension{Raw: []byte(`{"replicas":3}`)},
}, {
Type: "env",
Properties: &runtime.RawExtension{Raw: []byte(`{"env":{"key":"value"}}`)},
}, {
Type: "labels",
Properties: &runtime.RawExtension{Raw: []byte(`{"lKey":"lVal"}`)},
}},
}},
}}
testCases := []struct {
InputVal map[string]interface{}
ExpectError string
ExpectComponents []common2.ApplicationComponent
}{{
InputVal: map[string]interface{}{},
ExpectError: "var(path=inputs.envName) not exist",
}, {
InputVal: map[string]interface{}{
"envName": "example-env",
},
ExpectComponents: baseApp.Spec.Components,
}, {
InputVal: map[string]interface{}{
"envName": "example-env",
"patch": "bad patch",
},
ExpectError: "failed to unmarshal patch for env",
}, {
InputVal: map[string]interface{}{
"envName": "example-env",
"selector": "bad selector",
},
ExpectError: "failed to unmarshal selector for env",
}, {
InputVal: map[string]interface{}{
"envName": "example-env",
"patch": map[string]interface{}{
"components": []map[string]interface{}{{
"name": "comp-0",
"type": "webservice",
}, {
"name": "comp-1",
"type": "worker",
"properties": map[string]interface{}{
"image": "patch",
"port": 8080,
},
}, {
"name": "comp-3",
"type": "webservice",
"properties": map[string]interface{}{
"image": "patch",
"port": 8090,
},
"traits": []map[string]interface{}{{
"type": "scaler",
"properties": map[string]interface{}{"replicas": 5},
}, {
"type": "env",
"properties": map[string]interface{}{"env": map[string]string{"Key": "Value"}},
}, {
"type": "annotations",
"properties": map[string]interface{}{"aKey": "aVal"}},
},
}, {
"name": "comp-4",
"type": "webservice",
}},
},
"selector": map[string]interface{}{
"components": []string{"comp-2", "comp-1", "comp-3", "comp-0"},
},
},
ExpectComponents: []common2.ApplicationComponent{{
Name: "comp-1",
Type: "worker",
Properties: &runtime.RawExtension{Raw: []byte(`{"image":"patch","port":8080}`)},
}, {
Name: "comp-3",
Type: "webservice",
Properties: &runtime.RawExtension{Raw: []byte(`{"image":"patch","port":8090}`)},
Traits: []common2.ApplicationTrait{{
Type: "scaler",
Properties: &runtime.RawExtension{Raw: []byte(`{"replicas":5}`)},
}, {
Type: "env",
Properties: &runtime.RawExtension{Raw: []byte(`{"env":{"Key":"Value","key":"value"}}`)},
}, {
Type: "labels",
Properties: &runtime.RawExtension{Raw: []byte(`{"lKey":"lVal"}`)},
}, {
Type: "annotations",
Properties: &runtime.RawExtension{Raw: []byte(`{"aKey":"aVal"}`)},
}},
}, {
Name: "comp-0",
Type: "webservice",
}},
}}
r := require.New(t)
for _, testCase := range testCases {
cli := fake.NewClientBuilder().WithScheme(common.Scheme).Build()
p := &provider{
Client: cli,
app: baseApp,
}
act := &mock.Action{}
v, err := value.NewValue("", nil, "")
r.NoError(err)
r.NoError(v.FillObject(testCase.InputVal, "inputs"))
err = p.PatchApplication(nil, v, act)
if testCase.ExpectError == "" {
r.NoError(err)
} else {
r.Contains(err.Error(), testCase.ExpectError)
continue
}
outputs, err := v.LookupValue("outputs")
r.NoError(err)
patchApp := &v1beta1.Application{}
r.NoError(outputs.UnmarshalTo(patchApp))
r.Equal(len(testCase.ExpectComponents), len(patchApp.Spec.Components))
for idx, comp := range testCase.ExpectComponents {
_comp := patchApp.Spec.Components[idx]
r.Equal(comp.Name, _comp.Name)
r.Equal(comp.Type, _comp.Type)
if comp.Properties == nil {
r.Equal(comp.Properties, _comp.Properties)
} else {
r.Equal(string(comp.Properties.Raw), string(_comp.Properties.Raw))
}
r.Equal(len(comp.Traits), len(_comp.Traits))
for _idx, trait := range comp.Traits {
_trait := _comp.Traits[_idx]
r.Equal(trait.Type, _trait.Type)
if trait.Properties == nil {
r.Equal(trait.Properties, _trait.Properties)
} else {
r.Equal(string(trait.Properties.Raw), string(_trait.Properties.Raw))
}
}
}
}
}

View File

@@ -157,6 +157,16 @@ func (p *provider) LoadComponent(ctx wfContext.Context, v *value.Value, act wfTy
return nil
}
// LoadPolicies load policy describe info in application.
func (p *provider) LoadPolicies(ctx wfContext.Context, v *value.Value, act wfTypes.Action) error {
for _, po := range p.app.Spec.Policies {
if err := v.FillObject(po, "value", po.Name); err != nil {
return err
}
}
return nil
}
// Install register handlers to provider discover.
func Install(p providers.Providers, app *v1beta1.Application, apply ComponentApply, render ComponentRender) {
prd := &provider{
@@ -168,5 +178,6 @@ func Install(p providers.Providers, app *v1beta1.Application, apply ComponentApp
"component-render": prd.RenderComponent,
"component-apply": prd.ApplyComponent,
"load": prd.LoadComponent,
"load-policies": prd.LoadPolicies,
})
}

View File

@@ -36,11 +36,10 @@ import (
ocmclusterv1 "open-cluster-management.io/api/cluster/v1"
"sigs.k8s.io/controller-runtime/pkg/client"
"github.com/oam-dev/kubevela/apis/core.oam.dev/v1alpha1"
"github.com/oam-dev/kubevela/apis/core.oam.dev/v1beta1"
"github.com/oam-dev/kubevela/apis/types"
"github.com/oam-dev/kubevela/pkg/multicluster"
"github.com/oam-dev/kubevela/pkg/oam"
"github.com/oam-dev/kubevela/pkg/policy/envbinding"
"github.com/oam-dev/kubevela/pkg/utils/common"
errors3 "github.com/oam-dev/kubevela/pkg/utils/errors"
"github.com/oam-dev/kubevela/references/a/preimport"
@@ -385,15 +384,20 @@ func getMutableClusterSecret(c client.Client, clusterName string) (*v1.Secret, e
if labels == nil || labels[v1alpha12.LabelKeyClusterCredentialType] == "" {
return nil, fmt.Errorf("invalid cluster secret %s: cluster credential type label %s is not set", clusterName, v1alpha12.LabelKeyClusterCredentialType)
}
ebs := &v1alpha1.EnvBindingList{}
if err := c.List(context.Background(), ebs); err != nil {
return nil, errors.Wrap(err, "failed to find EnvBindings to check clusters")
apps := &v1beta1.ApplicationList{}
if err := c.List(context.Background(), apps); err != nil {
return nil, errors.Wrap(err, "failed to find applications to check clusters")
}
errs := errors3.ErrorList{}
for _, eb := range ebs.Items {
for _, decision := range eb.Status.ClusterDecisions {
if decision.Cluster == clusterName {
errs.Append(fmt.Errorf("application %s/%s (env: %s, envBinding: %s) is currently using cluster %s", eb.Namespace, eb.Labels[oam.LabelAppName], decision.Env, eb.Name, clusterName))
for _, app := range apps.Items {
status, err := envbinding.GetEnvBindingPolicyStatus(app.DeepCopy(), "")
if err == nil && status != nil {
for _, env := range status.Envs {
for _, placement := range env.Placements {
if placement.Cluster == clusterName {
errs.Append(fmt.Errorf("application %s/%s (env: %s) is currently using cluster %s", app.Namespace, app.Name, env.Env, clusterName))
}
}
}
}
}

View File

@@ -1,12 +1,9 @@
/*
Copyright 2021 The KubeVela Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
@@ -38,7 +35,7 @@ import (
"sigs.k8s.io/yaml"
)
var _ = Describe("Test MultiClustet Rollout", func() {
var _ = Describe("Test MultiCluster Rollout", func() {
Context("Test Runtime Cluster Rollout", func() {
var namespace string
var hubCtx context.Context
@@ -56,7 +53,7 @@ var _ = Describe("Test MultiClustet Rollout", func() {
AfterEach(func() {
cleanUpNamespace(hubCtx, workerCtx, namespace)
ns := v1.Namespace{}
Eventually(func() error { return k8sClient.Get(hubCtx, types.NamespacedName{Name: namespace}, &ns) }, 300*time.Second, 300*time.Millisecond).Should(util.NotFoundMatcher{})
Eventually(func() error { return k8sClient.Get(hubCtx, types.NamespacedName{Name: namespace}, &ns) }, 300*time.Second).Should(util.NotFoundMatcher{})
})
verifySucceed := func(componentRevision string) {
@@ -99,7 +96,7 @@ var _ = Describe("Test MultiClustet Rollout", func() {
return fmt.Errorf("source deploy still exist")
}
return nil
}, time.Second*360, 300*time.Millisecond).Should(BeNil())
}, time.Second*360).Should(BeNil())
}
It("Test Rollout whole feature in runtime cluster ", func() {
@@ -123,7 +120,7 @@ var _ = Describe("Test MultiClustet Rollout", func() {
return err
}
return nil
}, 500*time.Millisecond, 30*time.Second).Should(BeNil())
}, 30*time.Second).Should(BeNil())
verifySucceed(componentName + "-v2")
By("revert to v1, should guarantee compRev v1 still exist")
@@ -141,7 +138,7 @@ var _ = Describe("Test MultiClustet Rollout", func() {
return err
}
return nil
}, 500*time.Millisecond, 30*time.Second).Should(BeNil())
}, 30*time.Second).Should(BeNil())
verifySucceed(componentName + "-v1")
})
@@ -173,7 +170,7 @@ var _ = Describe("Test MultiClustet Rollout", func() {
return fmt.Errorf("comp status workload check don't work")
}
return nil
}, 300*time.Millisecond, 30*time.Second).Should(BeNil())
}, 30*time.Second).Should(BeNil())
By("update application to v2")
checkApp := &v1beta1.Application{}
Eventually(func() error {
@@ -185,7 +182,7 @@ var _ = Describe("Test MultiClustet Rollout", func() {
return err
}
return nil
}, 500*time.Millisecond, 30*time.Second).Should(BeNil())
}, 30*time.Second).Should(BeNil())
verifySucceed(componentName + "-v2")
Eventually(func() error {
// Note: KubeVela will only check the workload of the target revision
@@ -207,7 +204,7 @@ var _ = Describe("Test MultiClustet Rollout", func() {
return fmt.Errorf("comp status workload check don't work")
}
return nil
}, 300*time.Millisecond, 30*time.Second).Should(BeNil())
}, 60*time.Second).Should(BeNil())
})
})
})

View File

@@ -29,6 +29,7 @@ import (
v13 "k8s.io/api/apps/v1"
v1 "k8s.io/api/core/v1"
v14 "k8s.io/api/rbac/v1"
kerrors "k8s.io/apimachinery/pkg/api/errors"
v12 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
"k8s.io/client-go/tools/clientcmd"
@@ -90,6 +91,35 @@ var _ = Describe("Test multicluster scenario", func() {
Expect(out).ShouldNot(ContainSubstring(newClusterName))
})
It("Test detach cluster with application use", func() {
const testClusterName = "test-cluster"
_, err := execCommand("cluster", "join", "/tmp/worker.kubeconfig", "--name", testClusterName)
Expect(err).Should(Succeed())
app := &v1beta1.Application{}
bs, err := ioutil.ReadFile("./testdata/app/example-lite-envbinding-app.yaml")
Expect(err).Should(Succeed())
appYaml := strings.ReplaceAll(string(bs), "TEST_CLUSTER_NAME", testClusterName)
Expect(yaml.Unmarshal([]byte(appYaml), app)).Should(Succeed())
ctx := context.Background()
err = k8sClient.Create(ctx, app)
Expect(err).Should(Succeed())
namespacedName := client.ObjectKeyFromObject(app)
Eventually(func(g Gomega) {
g.Expect(k8sClient.Get(ctx, namespacedName, app)).Should(Succeed())
g.Expect(len(app.Status.PolicyStatus)).ShouldNot(Equal(0))
}, 30*time.Second).Should(Succeed())
_, err = execCommand("cluster", "detach", testClusterName)
Expect(err).ShouldNot(Succeed())
err = k8sClient.Delete(ctx, app)
Expect(err).Should(Succeed())
Eventually(func(g Gomega) {
err := k8sClient.Get(ctx, namespacedName, app)
g.Expect(kerrors.IsNotFound(err)).Should(BeTrue())
}, 30*time.Second).Should(Succeed())
_, err = execCommand("cluster", "detach", testClusterName)
Expect(err).Should(Succeed())
})
It("Test generate service account kubeconfig", func() {
_, workerCtx := initializeContext()
// create service account kubeconfig in worker cluster
@@ -173,6 +203,7 @@ var _ = Describe("Test multicluster scenario", func() {
// 2. Namespace selector.
// 3. A special cluster: local cluster
// 4. Component selector.
By("apply application")
app := &v1beta1.Application{}
bs, err := ioutil.ReadFile("./testdata/app/example-envbinding-app.yaml")
Expect(err).Should(Succeed())
@@ -182,6 +213,7 @@ var _ = Describe("Test multicluster scenario", func() {
err = k8sClient.Create(hubCtx, app)
Expect(err).Should(Succeed())
var hubDeployName string
By("wait application resource ready")
Eventually(func(g Gomega) {
// check deployments in clusters
deploys := &v13.DeploymentList{}
@@ -194,10 +226,12 @@ var _ = Describe("Test multicluster scenario", func() {
deploys = &v13.DeploymentList{}
g.Expect(k8sClient.List(workerCtx, deploys, client.InNamespace(prodNamespace))).Should(Succeed())
g.Expect(len(deploys.Items)).Should(Equal(2))
}, 2*time.Minute).Should(Succeed())
}, time.Minute).Should(Succeed())
Expect(hubDeployName).Should(Equal("data-worker"))
// delete application
By("delete application")
Expect(k8sClient.Delete(hubCtx, app)).Should(Succeed())
By("wait application resource delete")
Eventually(func(g Gomega) {
// check deployments in clusters
deploys := &v13.DeploymentList{}
@@ -206,7 +240,7 @@ var _ = Describe("Test multicluster scenario", func() {
deploys = &v13.DeploymentList{}
g.Expect(k8sClient.List(workerCtx, deploys, client.InNamespace(namespace))).Should(Succeed())
g.Expect(len(deploys.Items)).Should(Equal(0))
}, 2*time.Minute).Should(Succeed())
}, time.Minute).Should(Succeed())
})
})

View File

@@ -0,0 +1,32 @@
apiVersion: core.oam.dev/v1beta1
kind: Application
metadata:
name: example-lite-app
namespace: default
spec:
components:
- name: data-worker
type: worker
properties:
image: busybox
cmd:
- sleep
- '1000000'
policies:
- name: example-multi-env-policy
type: env-binding
properties:
envs:
- name: test
placement:
clusterSelector:
name: TEST_CLUSTER_NAME
workflow:
steps:
# deploy to test env
- name: deploy-test
type: deploy2env
properties:
policy: example-multi-env-policy
env: test