mirror of
https://github.com/kubevela/kubevela.git
synced 2026-02-14 18:10:21 +00:00
add webhooks to the rollout plan and use AC as App snapshot (#1031)
* add webhooks * app controller change * add component revision and appconfig revision and test * solidify the component revision logic and fix component revisoin bugs * fix command cli e2e failure * fix the bug caused by rawExtention * fix UT test * retry on component not found * lint * revert component revision create order
This commit is contained in:
@@ -21,6 +21,8 @@ import (
|
||||
|
||||
runtimev1alpha1 "github.com/crossplane/crossplane-runtime/apis/core/v1alpha1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
|
||||
"github.com/oam-dev/kubevela/apis/standard.oam.dev/v1alpha1"
|
||||
)
|
||||
|
||||
// EDIT THIS FILE! THIS IS SCAFFOLDING FOR YOU TO OWN!
|
||||
@@ -44,8 +46,7 @@ const (
|
||||
type AppStatus struct {
|
||||
// INSERT ADDITIONAL STATUS FIELD - define observed state of cluster
|
||||
// Important: Run "make" to regenerate code after modifying this file
|
||||
|
||||
runtimev1alpha1.ConditionedStatus `json:",inline"`
|
||||
v1alpha1.RolloutStatus `json:",inline"`
|
||||
|
||||
Phase ApplicationPhase `json:"status,omitempty"`
|
||||
|
||||
@@ -54,6 +55,10 @@ type AppStatus struct {
|
||||
|
||||
// Services record the status of the application services
|
||||
Services []ApplicationComponentStatus `json:"services,omitempty"`
|
||||
|
||||
// LatestRevision of the application configuration it generates
|
||||
// +optional
|
||||
LatestRevision *Revision `json:"latestRevision,omitempty"`
|
||||
}
|
||||
|
||||
// ApplicationComponentStatus record the health status of App component
|
||||
@@ -99,6 +104,11 @@ type ApplicationSpec struct {
|
||||
Components []ApplicationComponent `json:"components"`
|
||||
|
||||
// TODO(wonderflow): we should have application level scopes supported here
|
||||
|
||||
// RolloutPlan is the details on how to rollout the resources
|
||||
// The controller simply replace the old resources with the new one if there is no rollout plan involved
|
||||
// +optional
|
||||
RolloutPlan *v1alpha1.RolloutPlan `json:"rolloutPlan,omitempty"`
|
||||
}
|
||||
|
||||
// +kubebuilder:object:root=true
|
||||
|
||||
@@ -22,13 +22,14 @@ package v1alpha2
|
||||
|
||||
import (
|
||||
"github.com/crossplane/crossplane-runtime/apis/core/v1alpha1"
|
||||
standard_oam_devv1alpha1 "github.com/oam-dev/kubevela/apis/standard.oam.dev/v1alpha1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
)
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *AppStatus) DeepCopyInto(out *AppStatus) {
|
||||
*out = *in
|
||||
in.ConditionedStatus.DeepCopyInto(&out.ConditionedStatus)
|
||||
in.RolloutStatus.DeepCopyInto(&out.RolloutStatus)
|
||||
if in.Components != nil {
|
||||
in, out := &in.Components, &out.Components
|
||||
*out = make([]v1alpha1.TypedReference, len(*in))
|
||||
@@ -41,6 +42,11 @@ func (in *AppStatus) DeepCopyInto(out *AppStatus) {
|
||||
(*in)[i].DeepCopyInto(&(*out)[i])
|
||||
}
|
||||
}
|
||||
if in.LatestRevision != nil {
|
||||
in, out := &in.LatestRevision, &out.LatestRevision
|
||||
*out = new(Revision)
|
||||
**out = **in
|
||||
}
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AppStatus.
|
||||
@@ -429,6 +435,11 @@ func (in *ApplicationSpec) DeepCopyInto(out *ApplicationSpec) {
|
||||
(*in)[i].DeepCopyInto(&(*out)[i])
|
||||
}
|
||||
}
|
||||
if in.RolloutPlan != nil {
|
||||
in, out := &in.RolloutPlan, &out.RolloutPlan
|
||||
*out = new(standard_oam_devv1alpha1.RolloutPlan)
|
||||
(*in).DeepCopyInto(*out)
|
||||
}
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ApplicationSpec.
|
||||
|
||||
@@ -82,12 +82,11 @@ type RolloutPlan struct {
|
||||
TargetSize *int32 `json:"targetSize,omitempty"`
|
||||
|
||||
// The number of batches, default = 1
|
||||
// mutually exclusive to RolloutBatches
|
||||
// +optional
|
||||
NumBatches *int32 `json:"numBatches,omitempty"`
|
||||
|
||||
// The exact distribution among batches.
|
||||
// mutually exclusive to NumBatches.
|
||||
// its size has to be exactly the same as the NumBatches (if set)
|
||||
// The total number cannot exceed the targetSize or the size of the source resource
|
||||
// We will IGNORE the last batch's replica field if it's a percentage since round errors can lead to inaccurate sum
|
||||
// We highly recommend to leave the last batch's replica field empty
|
||||
@@ -161,9 +160,8 @@ type RolloutWebhook struct {
|
||||
// URL address of this webhook
|
||||
URL string `json:"url"`
|
||||
|
||||
// Request timeout for this webhook
|
||||
Timeout string `json:"timeout,omitempty"`
|
||||
|
||||
// ExpectedStatus a
|
||||
ExpectedStatus []int `json:"expectedStatus,omitempty"`
|
||||
// Metadata (key-value pairs) for this webhook
|
||||
// +optional
|
||||
Metadata *map[string]string `json:"metadata,omitempty"`
|
||||
@@ -171,11 +169,14 @@ type RolloutWebhook struct {
|
||||
|
||||
// RolloutWebhookPayload holds the info and metadata sent to webhooks
|
||||
type RolloutWebhookPayload struct {
|
||||
// ResourceRef refers to the resource we are operating on
|
||||
ResourceRef *runtimev1alpha1.TypedReference `json:"resourceRef"`
|
||||
// Name of the upgrading resource
|
||||
Name string `json:"name"`
|
||||
|
||||
// RolloutRef refers to the rollout that is controlling the rollout
|
||||
RolloutRef *runtimev1alpha1.TypedReference `json:"rolloutRef"`
|
||||
// Namespace of the upgrading resource
|
||||
Namespace string `json:"namespace"`
|
||||
|
||||
// Phase of the rollout
|
||||
Phase RollingState `json:"phase"`
|
||||
|
||||
// Metadata (key-value pairs) are the extra data send to this webhook
|
||||
Metadata map[string]string `json:"metadata,omitempty"`
|
||||
|
||||
@@ -39,9 +39,6 @@ const (
|
||||
// FinishedOneBatchEvent indicates that we have successfully rolled out one batch
|
||||
FinishedOneBatchEvent RolloutEvent = "FinishedOneBatchEvent"
|
||||
|
||||
// BatchRolloutContinueEvent indicates that we need to continue to upgrade the pods in the batch
|
||||
BatchRolloutContinueEvent RolloutEvent = "BatchRolloutContinueEvent"
|
||||
|
||||
// BatchRolloutVerifyingEvent indicates that we are waiting for the approval of resume one batch
|
||||
BatchRolloutVerifyingEvent RolloutEvent = "BatchRolloutVerifyingEvent"
|
||||
|
||||
@@ -183,6 +180,7 @@ func (r *RolloutStatus) StateTransition(event RolloutEvent) {
|
||||
case InitializingState:
|
||||
if event == RollingInitializedEvent {
|
||||
r.RollingState = RollingInBatchesState
|
||||
r.BatchRollingState = BatchInitializingState
|
||||
r.SetConditions(NewPositiveCondition(r.getRolloutConditionType()))
|
||||
return
|
||||
}
|
||||
@@ -206,10 +204,6 @@ func (r *RolloutStatus) StateTransition(event RolloutEvent) {
|
||||
r.SetConditions(NewPositiveCondition(r.getRolloutConditionType()))
|
||||
return
|
||||
}
|
||||
if event == RollingFinalizedEvent {
|
||||
// no op
|
||||
return
|
||||
}
|
||||
panic(fmt.Errorf(invalidRollingStateTransition, rollingState, event))
|
||||
|
||||
case RolloutFailedState:
|
||||
@@ -247,10 +241,6 @@ func (r *RolloutStatus) batchStateTransition(event RolloutEvent) {
|
||||
panic(fmt.Errorf(invalidBatchRollingStateTransition, batchRollingState, event))
|
||||
|
||||
case BatchInRollingState:
|
||||
if event == BatchRolloutContinueEvent {
|
||||
// no op
|
||||
return
|
||||
}
|
||||
if event == BatchRolloutVerifyingEvent {
|
||||
r.BatchRollingState = BatchVerifyingState
|
||||
r.SetConditions(NewPositiveCondition(r.getRolloutConditionType()))
|
||||
@@ -264,10 +254,6 @@ func (r *RolloutStatus) batchStateTransition(event RolloutEvent) {
|
||||
r.SetConditions(NewPositiveCondition(r.getRolloutConditionType()))
|
||||
return
|
||||
}
|
||||
if event == BatchRolloutVerifyingEvent {
|
||||
// no op
|
||||
return
|
||||
}
|
||||
panic(fmt.Errorf(invalidBatchRollingStateTransition, batchRollingState, event))
|
||||
|
||||
case BatchFinalizingState:
|
||||
@@ -278,6 +264,7 @@ func (r *RolloutStatus) batchStateTransition(event RolloutEvent) {
|
||||
}
|
||||
if event == AllBatchFinishedEvent {
|
||||
// transition out of the batch loop
|
||||
r.BatchRollingState = BatchReadyState
|
||||
r.RollingState = FinalisingState
|
||||
r.SetConditions(NewPositiveCondition(r.getRolloutConditionType()))
|
||||
return
|
||||
@@ -287,6 +274,7 @@ func (r *RolloutStatus) batchStateTransition(event RolloutEvent) {
|
||||
case BatchReadyState:
|
||||
if event == BatchRolloutApprovedEvent {
|
||||
r.BatchRollingState = BatchInitializingState
|
||||
r.CurrentBatch++
|
||||
r.SetConditions(NewPositiveCondition(r.getRolloutConditionType()))
|
||||
return
|
||||
}
|
||||
|
||||
@@ -623,6 +623,11 @@ func (in *RolloutTraitSpec) DeepCopy() *RolloutTraitSpec {
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *RolloutWebhook) DeepCopyInto(out *RolloutWebhook) {
|
||||
*out = *in
|
||||
if in.ExpectedStatus != nil {
|
||||
in, out := &in.ExpectedStatus, &out.ExpectedStatus
|
||||
*out = make([]int, len(*in))
|
||||
copy(*out, *in)
|
||||
}
|
||||
if in.Metadata != nil {
|
||||
in, out := &in.Metadata, &out.Metadata
|
||||
*out = new(map[string]string)
|
||||
@@ -649,16 +654,6 @@ func (in *RolloutWebhook) DeepCopy() *RolloutWebhook {
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *RolloutWebhookPayload) DeepCopyInto(out *RolloutWebhookPayload) {
|
||||
*out = *in
|
||||
if in.ResourceRef != nil {
|
||||
in, out := &in.ResourceRef, &out.ResourceRef
|
||||
*out = new(corev1alpha1.TypedReference)
|
||||
**out = **in
|
||||
}
|
||||
if in.RolloutRef != nil {
|
||||
in, out := &in.RolloutRef, &out.RolloutRef
|
||||
*out = new(corev1alpha1.TypedReference)
|
||||
**out = **in
|
||||
}
|
||||
if in.Metadata != nil {
|
||||
in, out := &in.Metadata, &out.Metadata
|
||||
*out = make(map[string]string, len(*in))
|
||||
|
||||
@@ -101,14 +101,14 @@ spec:
|
||||
format: int32
|
||||
type: integer
|
||||
numBatches:
|
||||
description: The number of batches, default = 1 mutually exclusive to RolloutBatches
|
||||
description: The number of batches, default = 1
|
||||
format: int32
|
||||
type: integer
|
||||
paused:
|
||||
description: Paused the rollout, default is false
|
||||
type: boolean
|
||||
rolloutBatches:
|
||||
description: The exact distribution among batches. mutually exclusive to NumBatches. The total number cannot exceed the targetSize or the size of the source resource We will IGNORE the last batch's replica field if it's a percentage since round errors can lead to inaccurate sum We highly recommend to leave the last batch's replica field empty
|
||||
description: The exact distribution among batches. its size has to be exactly the same as the NumBatches (if set) The total number cannot exceed the targetSize or the size of the source resource We will IGNORE the last batch's replica field if it's a percentage since round errors can lead to inaccurate sum We highly recommend to leave the last batch's replica field empty
|
||||
items:
|
||||
description: RolloutBatch is used to describe how the each batch rollout should be
|
||||
properties:
|
||||
@@ -117,6 +117,11 @@ spec:
|
||||
items:
|
||||
description: RolloutWebhook holds the reference to external checks used for canary analysis
|
||||
properties:
|
||||
expectedStatus:
|
||||
description: ExpectedStatus a
|
||||
items:
|
||||
type: integer
|
||||
type: array
|
||||
metadata:
|
||||
additionalProperties:
|
||||
type: string
|
||||
@@ -125,9 +130,6 @@ spec:
|
||||
name:
|
||||
description: Name of this webhook
|
||||
type: string
|
||||
timeout:
|
||||
description: Request timeout for this webhook
|
||||
type: string
|
||||
type:
|
||||
description: Type of this webhook
|
||||
type: string
|
||||
@@ -222,6 +224,11 @@ spec:
|
||||
items:
|
||||
description: RolloutWebhook holds the reference to external checks used for canary analysis
|
||||
properties:
|
||||
expectedStatus:
|
||||
description: ExpectedStatus a
|
||||
items:
|
||||
type: integer
|
||||
type: array
|
||||
metadata:
|
||||
additionalProperties:
|
||||
type: string
|
||||
@@ -230,9 +237,6 @@ spec:
|
||||
name:
|
||||
description: Name of this webhook
|
||||
type: string
|
||||
timeout:
|
||||
description: Request timeout for this webhook
|
||||
type: string
|
||||
type:
|
||||
description: Type of this webhook
|
||||
type: string
|
||||
|
||||
@@ -70,12 +70,227 @@ spec:
|
||||
- type
|
||||
type: object
|
||||
type: array
|
||||
rolloutPlan:
|
||||
description: RolloutPlan is the details on how to rollout the resources The controller simply replace the old resources with the new one if there is no rollout plan involved
|
||||
properties:
|
||||
canaryMetric:
|
||||
description: CanaryMetric provides a way for the rollout process to automatically check certain metrics before complete the process
|
||||
items:
|
||||
description: CanaryMetric holds the reference to metrics used for canary analysis
|
||||
properties:
|
||||
interval:
|
||||
description: Interval represents the windows size
|
||||
type: string
|
||||
metricsRange:
|
||||
description: Range value accepted for this metric
|
||||
properties:
|
||||
max:
|
||||
anyOf:
|
||||
- type: integer
|
||||
- type: string
|
||||
description: Maximum value
|
||||
x-kubernetes-int-or-string: true
|
||||
min:
|
||||
anyOf:
|
||||
- type: integer
|
||||
- type: string
|
||||
description: Minimum value
|
||||
x-kubernetes-int-or-string: true
|
||||
type: object
|
||||
name:
|
||||
description: Name of the metric
|
||||
type: string
|
||||
templateRef:
|
||||
description: TemplateRef references a metric template object
|
||||
properties:
|
||||
apiVersion:
|
||||
description: APIVersion of the referenced object.
|
||||
type: string
|
||||
kind:
|
||||
description: Kind of the referenced object.
|
||||
type: string
|
||||
name:
|
||||
description: Name of the referenced object.
|
||||
type: string
|
||||
uid:
|
||||
description: UID of the referenced object.
|
||||
type: string
|
||||
required:
|
||||
- apiVersion
|
||||
- kind
|
||||
- name
|
||||
type: object
|
||||
required:
|
||||
- name
|
||||
type: object
|
||||
type: array
|
||||
lastBatchToRollout:
|
||||
description: All pods in the batches up to the batchPartition (included) will have the target resource specification while the rest still have the source resource This is designed for the operators to manually rollout Default is the the number of batches which will rollout all the batches
|
||||
format: int32
|
||||
type: integer
|
||||
numBatches:
|
||||
description: The number of batches, default = 1
|
||||
format: int32
|
||||
type: integer
|
||||
paused:
|
||||
description: Paused the rollout, default is false
|
||||
type: boolean
|
||||
rolloutBatches:
|
||||
description: The exact distribution among batches. its size has to be exactly the same as the NumBatches (if set) The total number cannot exceed the targetSize or the size of the source resource We will IGNORE the last batch's replica field if it's a percentage since round errors can lead to inaccurate sum We highly recommend to leave the last batch's replica field empty
|
||||
items:
|
||||
description: RolloutBatch is used to describe how the each batch rollout should be
|
||||
properties:
|
||||
batchRolloutWebhooks:
|
||||
description: RolloutWebhooks provides a way for the batch rollout to interact with an external process
|
||||
items:
|
||||
description: RolloutWebhook holds the reference to external checks used for canary analysis
|
||||
properties:
|
||||
expectedStatus:
|
||||
description: ExpectedStatus a
|
||||
items:
|
||||
type: integer
|
||||
type: array
|
||||
metadata:
|
||||
additionalProperties:
|
||||
type: string
|
||||
description: Metadata (key-value pairs) for this webhook
|
||||
type: object
|
||||
name:
|
||||
description: Name of this webhook
|
||||
type: string
|
||||
type:
|
||||
description: Type of this webhook
|
||||
type: string
|
||||
url:
|
||||
description: URL address of this webhook
|
||||
type: string
|
||||
required:
|
||||
- name
|
||||
- type
|
||||
- url
|
||||
type: object
|
||||
type: array
|
||||
canaryMetric:
|
||||
description: CanaryMetric provides a way for the batch rollout process to automatically check certain metrics before moving to the next batch
|
||||
items:
|
||||
description: CanaryMetric holds the reference to metrics used for canary analysis
|
||||
properties:
|
||||
interval:
|
||||
description: Interval represents the windows size
|
||||
type: string
|
||||
metricsRange:
|
||||
description: Range value accepted for this metric
|
||||
properties:
|
||||
max:
|
||||
anyOf:
|
||||
- type: integer
|
||||
- type: string
|
||||
description: Maximum value
|
||||
x-kubernetes-int-or-string: true
|
||||
min:
|
||||
anyOf:
|
||||
- type: integer
|
||||
- type: string
|
||||
description: Minimum value
|
||||
x-kubernetes-int-or-string: true
|
||||
type: object
|
||||
name:
|
||||
description: Name of the metric
|
||||
type: string
|
||||
templateRef:
|
||||
description: TemplateRef references a metric template object
|
||||
properties:
|
||||
apiVersion:
|
||||
description: APIVersion of the referenced object.
|
||||
type: string
|
||||
kind:
|
||||
description: Kind of the referenced object.
|
||||
type: string
|
||||
name:
|
||||
description: Name of the referenced object.
|
||||
type: string
|
||||
uid:
|
||||
description: UID of the referenced object.
|
||||
type: string
|
||||
required:
|
||||
- apiVersion
|
||||
- kind
|
||||
- name
|
||||
type: object
|
||||
required:
|
||||
- name
|
||||
type: object
|
||||
type: array
|
||||
instanceInterval:
|
||||
description: The wait time, in seconds, between instances upgrades, default = 0
|
||||
format: int32
|
||||
type: integer
|
||||
maxUnavailable:
|
||||
anyOf:
|
||||
- type: integer
|
||||
- type: string
|
||||
description: MaxUnavailable is the max allowed number of pods that is unavailable during the upgrade. We will mark the batch as ready as long as there are less or equal number of pods unavailable than this number. default = 0
|
||||
x-kubernetes-int-or-string: true
|
||||
podList:
|
||||
description: The list of Pods to get upgraded it is mutually exclusive with the Replicas field
|
||||
items:
|
||||
type: string
|
||||
type: array
|
||||
replicas:
|
||||
anyOf:
|
||||
- type: integer
|
||||
- type: string
|
||||
description: 'Replicas is the number of pods to upgrade in this batch it can be an absolute number (ex: 5) or a percentage of total pods we will ignore the percentage of the last batch to just fill the gap it is mutually exclusive with the PodList field'
|
||||
x-kubernetes-int-or-string: true
|
||||
type: object
|
||||
type: array
|
||||
rolloutStrategy:
|
||||
description: RolloutStrategy defines strategies for the rollout plan
|
||||
type: string
|
||||
rolloutWebhooks:
|
||||
description: RolloutWebhooks provide a way for the rollout to interact with an external process
|
||||
items:
|
||||
description: RolloutWebhook holds the reference to external checks used for canary analysis
|
||||
properties:
|
||||
expectedStatus:
|
||||
description: ExpectedStatus a
|
||||
items:
|
||||
type: integer
|
||||
type: array
|
||||
metadata:
|
||||
additionalProperties:
|
||||
type: string
|
||||
description: Metadata (key-value pairs) for this webhook
|
||||
type: object
|
||||
name:
|
||||
description: Name of this webhook
|
||||
type: string
|
||||
type:
|
||||
description: Type of this webhook
|
||||
type: string
|
||||
url:
|
||||
description: URL address of this webhook
|
||||
type: string
|
||||
required:
|
||||
- name
|
||||
- type
|
||||
- url
|
||||
type: object
|
||||
type: array
|
||||
targetSize:
|
||||
description: The size of the target resource. The default is the same as the size of the source resource.
|
||||
format: int32
|
||||
type: integer
|
||||
type: object
|
||||
required:
|
||||
- components
|
||||
type: object
|
||||
status:
|
||||
description: AppStatus defines the observed state of Application
|
||||
properties:
|
||||
batchRollingState:
|
||||
description: BatchRollingState only meaningful when the Status is rolling
|
||||
type: string
|
||||
components:
|
||||
description: Components record the related Components created by Application Controller
|
||||
items:
|
||||
@@ -127,6 +342,28 @@ spec:
|
||||
- type
|
||||
type: object
|
||||
type: array
|
||||
currentBatch:
|
||||
description: The current batch the rollout is working on/blocked it starts from 0
|
||||
format: int32
|
||||
type: integer
|
||||
lastAppliedPodTemplateIdentifier:
|
||||
description: lastAppliedPodTemplateIdentifier is a string that uniquely represent the last pod template each workload type could use different ways to identify that so we cannot compare between resources We update this field only after a successful rollout
|
||||
type: string
|
||||
latestRevision:
|
||||
description: LatestRevision of the application configuration it generates
|
||||
properties:
|
||||
name:
|
||||
type: string
|
||||
revision:
|
||||
format: int64
|
||||
type: integer
|
||||
required:
|
||||
- name
|
||||
- revision
|
||||
type: object
|
||||
rollingState:
|
||||
description: RollingState is the Rollout State
|
||||
type: string
|
||||
services:
|
||||
description: Services record the status of the application services
|
||||
items:
|
||||
@@ -161,6 +398,22 @@ spec:
|
||||
status:
|
||||
description: ApplicationPhase is a label for the condition of a application at the current time
|
||||
type: string
|
||||
targetGeneration:
|
||||
description: NewPodTemplateIdentifier is a string that uniquely represent the new pod template each workload type could use different ways to identify that so we cannot compare between resources
|
||||
type: string
|
||||
upgradedReadyReplicas:
|
||||
description: UpgradedReplicas is the number of Pods upgraded by the rollout controller that have a Ready Condition.
|
||||
format: int32
|
||||
type: integer
|
||||
upgradedReplicas:
|
||||
description: UpgradedReplicas is the number of Pods upgraded by the rollout controller
|
||||
format: int32
|
||||
type: integer
|
||||
required:
|
||||
- currentBatch
|
||||
- rollingState
|
||||
- upgradedReadyReplicas
|
||||
- upgradedReplicas
|
||||
type: object
|
||||
type: object
|
||||
served: true
|
||||
|
||||
@@ -91,14 +91,14 @@ spec:
|
||||
format: int32
|
||||
type: integer
|
||||
numBatches:
|
||||
description: The number of batches, default = 1 mutually exclusive to RolloutBatches
|
||||
description: The number of batches, default = 1
|
||||
format: int32
|
||||
type: integer
|
||||
paused:
|
||||
description: Paused the rollout, default is false
|
||||
type: boolean
|
||||
rolloutBatches:
|
||||
description: The exact distribution among batches. mutually exclusive to NumBatches. The total number cannot exceed the targetSize or the size of the source resource We will IGNORE the last batch's replica field if it's a percentage since round errors can lead to inaccurate sum We highly recommend to leave the last batch's replica field empty
|
||||
description: The exact distribution among batches. its size has to be exactly the same as the NumBatches (if set) The total number cannot exceed the targetSize or the size of the source resource We will IGNORE the last batch's replica field if it's a percentage since round errors can lead to inaccurate sum We highly recommend to leave the last batch's replica field empty
|
||||
items:
|
||||
description: RolloutBatch is used to describe how the each batch rollout should be
|
||||
properties:
|
||||
@@ -107,6 +107,11 @@ spec:
|
||||
items:
|
||||
description: RolloutWebhook holds the reference to external checks used for canary analysis
|
||||
properties:
|
||||
expectedStatus:
|
||||
description: ExpectedStatus a
|
||||
items:
|
||||
type: integer
|
||||
type: array
|
||||
metadata:
|
||||
additionalProperties:
|
||||
type: string
|
||||
@@ -115,9 +120,6 @@ spec:
|
||||
name:
|
||||
description: Name of this webhook
|
||||
type: string
|
||||
timeout:
|
||||
description: Request timeout for this webhook
|
||||
type: string
|
||||
type:
|
||||
description: Type of this webhook
|
||||
type: string
|
||||
@@ -212,6 +214,11 @@ spec:
|
||||
items:
|
||||
description: RolloutWebhook holds the reference to external checks used for canary analysis
|
||||
properties:
|
||||
expectedStatus:
|
||||
description: ExpectedStatus a
|
||||
items:
|
||||
type: integer
|
||||
type: array
|
||||
metadata:
|
||||
additionalProperties:
|
||||
type: string
|
||||
@@ -220,9 +227,6 @@ spec:
|
||||
name:
|
||||
description: Name of this webhook
|
||||
type: string
|
||||
timeout:
|
||||
description: Request timeout for this webhook
|
||||
type: string
|
||||
type:
|
||||
description: Type of this webhook
|
||||
type: string
|
||||
|
||||
@@ -170,10 +170,10 @@ func main() {
|
||||
case "", "false", string(oamcontroller.ApplyOnceOnlyOff):
|
||||
controllerArgs.ApplyMode = oamcontroller.ApplyOnceOnlyOff
|
||||
setupLog.Info("ApplyOnceOnly is disabled")
|
||||
case "true", string(oamcontroller.ApplyOnceOnlyOn):
|
||||
case "true", oamcontroller.ApplyOnceOnlyOn:
|
||||
controllerArgs.ApplyMode = oamcontroller.ApplyOnceOnlyOn
|
||||
setupLog.Info("ApplyOnceOnly is enabled, that means workload or trait only apply once if no spec change even they are changed by others")
|
||||
case string(oamcontroller.ApplyOnceOnlyForce):
|
||||
case oamcontroller.ApplyOnceOnlyForce:
|
||||
controllerArgs.ApplyMode = oamcontroller.ApplyOnceOnlyForce
|
||||
setupLog.Info("ApplyOnceOnlyForce is enabled, that means workload or trait only apply once if no spec change even they are changed or deleted by others")
|
||||
default:
|
||||
@@ -235,7 +235,7 @@ func registerHealthChecks(mgr ctrl.Manager) error {
|
||||
if err := mgr.AddReadyzCheck("ping", healthz.Ping); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// TODO: change the health check to be different from readiness check
|
||||
if err := mgr.AddHealthzCheck("ping", healthz.Ping); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -165,16 +165,39 @@ upon. For example, the applicationDeployment controller will get the component f
|
||||
With that said, two controllers operate differently to extract the real workload. Here are the
|
||||
high level descriptions of how each works.
|
||||
|
||||
#### Application inplace upgrade workflow
|
||||
The most natural way to upgrade an application is to upgrade it in-place which means the users
|
||||
just change the application, and the system will pick up the change, then apply to the runtime
|
||||
. The implementation of this type of upgrade looks like this:
|
||||
- The application controller compute a hash value of the applicationConfiguration. The
|
||||
application controller **always** use the component revision name in the AC it generates. This
|
||||
guaranteed that the AC also changes when the component changes.
|
||||
- The application controller creates the applicationConfiguration with a new name (with a suffix
|
||||
) upon changing of its hash value and with a pre-determined annotation
|
||||
"app.oam.dev/appconfig-rollout" set to true.
|
||||
- The AC controller have special handle logic in the apply part of the logic. The exact logic
|
||||
depends on the workload type and we will list each in the
|
||||
[rollout with different workload](#Rollout plan work with different type of workloads) section
|
||||
. This special AC logic is also the real magic for the other rollout scenario to work as AC
|
||||
controller is the only entity that is directly responsible for emiting the workload to the k8s.
|
||||
|
||||
|
||||
#### ApplicationDeployment workflow
|
||||
When an appDeployment is used to do application level rollout, **the target application
|
||||
is not reconciled by the application controller yet**. This is to make sure the
|
||||
appDeployment controller has the full control of the new application from the beginning.
|
||||
We will use a pre-defined annotation "app.oam.dev/rollout" that equals to "true" to facilitate
|
||||
We will use a pre-defined annotation "app.oam.dev/rollout-template" that equals to "true" to facilitate
|
||||
that. We expect any system, such as the [kubevela apiserver](APIServer-Catalog.md), that
|
||||
utilizes an appDeployment object to follow this rule.
|
||||
- Upon creation, the appDeployment controller marks itself as the owner of the application. The
|
||||
application controller will have built-in logic to ignore any applications that has the
|
||||
"app.oam.dev/rollout-template" annotation set to true.
|
||||
- the appDeployment controller will also add another annotation "app.oam.dev/creating" to the
|
||||
application to be passed down to the ApplicationConfiguration CR it generates to mark
|
||||
that the AC is reconciled for the first time.
|
||||
- The ApplicationConfiguration controller recognizes this annotation, and it will see if there is
|
||||
anything it needs to do before emitting the workload to the k8s. The AC controller removes this
|
||||
annotation at the end of a successful reconcile.
|
||||
- The appDeployment controller can change the target application fields. For example,
|
||||
- It might remove all the conflict traits, such as HPA during upgrade.
|
||||
- It might modify the label selectors fields in the services to make sure there are ways to
|
||||
|
||||
@@ -12,6 +12,7 @@ import (
|
||||
|
||||
"github.com/oam-dev/kubevela/apis/core.oam.dev/v1alpha2"
|
||||
"github.com/oam-dev/kubevela/e2e"
|
||||
"github.com/oam-dev/kubevela/pkg/controller/common"
|
||||
)
|
||||
|
||||
var (
|
||||
@@ -64,10 +65,19 @@ var ApplicationStatusDeeplyContext = func(context string, applicationName, workl
|
||||
k8sclient, err := e2e.NewK8sClient()
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
|
||||
ginkgo.By("check Application reconciled ready")
|
||||
app := &v1alpha2.Application{}
|
||||
gomega.Eventually(func() bool {
|
||||
_ = k8sclient.Get(context2.Background(), client.ObjectKey{Name: applicationName, Namespace: "default"}, app)
|
||||
return app.Status.LatestRevision != nil
|
||||
}, 180*time.Second, 1*time.Second).Should(gomega.BeTrue())
|
||||
|
||||
ginkgo.By("check AppConfig reconciled ready")
|
||||
gomega.Eventually(func() int {
|
||||
appConfig := &v1alpha2.ApplicationConfiguration{}
|
||||
_ = k8sclient.Get(context2.Background(), client.ObjectKey{Name: applicationName, Namespace: "default"}, appConfig)
|
||||
_ = k8sclient.Get(context2.Background(), client.ObjectKey{
|
||||
Name: common.ConstructRevisionName(applicationName, app.Status.LatestRevision.Revision),
|
||||
Namespace: "default"}, appConfig)
|
||||
return len(appConfig.Status.Workloads)
|
||||
}, 180*time.Second, 1*time.Second).ShouldNot(gomega.Equal(0))
|
||||
|
||||
|
||||
@@ -101,14 +101,14 @@ spec:
|
||||
format: int32
|
||||
type: integer
|
||||
numBatches:
|
||||
description: The number of batches, default = 1 mutually exclusive to RolloutBatches
|
||||
description: The number of batches, default = 1
|
||||
format: int32
|
||||
type: integer
|
||||
paused:
|
||||
description: Paused the rollout, default is false
|
||||
type: boolean
|
||||
rolloutBatches:
|
||||
description: The exact distribution among batches. mutually exclusive to NumBatches. The total number cannot exceed the targetSize or the size of the source resource We will IGNORE the last batch's replica field if it's a percentage since round errors can lead to inaccurate sum We highly recommend to leave the last batch's replica field empty
|
||||
description: The exact distribution among batches. its size has to be exactly the same as the NumBatches (if set) The total number cannot exceed the targetSize or the size of the source resource We will IGNORE the last batch's replica field if it's a percentage since round errors can lead to inaccurate sum We highly recommend to leave the last batch's replica field empty
|
||||
items:
|
||||
description: RolloutBatch is used to describe how the each batch rollout should be
|
||||
properties:
|
||||
@@ -117,6 +117,11 @@ spec:
|
||||
items:
|
||||
description: RolloutWebhook holds the reference to external checks used for canary analysis
|
||||
properties:
|
||||
expectedStatus:
|
||||
description: ExpectedStatus a
|
||||
items:
|
||||
type: integer
|
||||
type: array
|
||||
metadata:
|
||||
additionalProperties:
|
||||
type: string
|
||||
@@ -125,9 +130,6 @@ spec:
|
||||
name:
|
||||
description: Name of this webhook
|
||||
type: string
|
||||
timeout:
|
||||
description: Request timeout for this webhook
|
||||
type: string
|
||||
type:
|
||||
description: Type of this webhook
|
||||
type: string
|
||||
@@ -222,6 +224,11 @@ spec:
|
||||
items:
|
||||
description: RolloutWebhook holds the reference to external checks used for canary analysis
|
||||
properties:
|
||||
expectedStatus:
|
||||
description: ExpectedStatus a
|
||||
items:
|
||||
type: integer
|
||||
type: array
|
||||
metadata:
|
||||
additionalProperties:
|
||||
type: string
|
||||
@@ -230,9 +237,6 @@ spec:
|
||||
name:
|
||||
description: Name of this webhook
|
||||
type: string
|
||||
timeout:
|
||||
description: Request timeout for this webhook
|
||||
type: string
|
||||
type:
|
||||
description: Type of this webhook
|
||||
type: string
|
||||
|
||||
@@ -70,12 +70,227 @@ spec:
|
||||
- type
|
||||
type: object
|
||||
type: array
|
||||
rolloutPlan:
|
||||
description: RolloutPlan is the details on how to rollout the resources The controller simply replace the old resources with the new one if there is no rollout plan involved
|
||||
properties:
|
||||
canaryMetric:
|
||||
description: CanaryMetric provides a way for the rollout process to automatically check certain metrics before complete the process
|
||||
items:
|
||||
description: CanaryMetric holds the reference to metrics used for canary analysis
|
||||
properties:
|
||||
interval:
|
||||
description: Interval represents the windows size
|
||||
type: string
|
||||
metricsRange:
|
||||
description: Range value accepted for this metric
|
||||
properties:
|
||||
max:
|
||||
anyOf:
|
||||
- type: integer
|
||||
- type: string
|
||||
description: Maximum value
|
||||
x-kubernetes-int-or-string: true
|
||||
min:
|
||||
anyOf:
|
||||
- type: integer
|
||||
- type: string
|
||||
description: Minimum value
|
||||
x-kubernetes-int-or-string: true
|
||||
type: object
|
||||
name:
|
||||
description: Name of the metric
|
||||
type: string
|
||||
templateRef:
|
||||
description: TemplateRef references a metric template object
|
||||
properties:
|
||||
apiVersion:
|
||||
description: APIVersion of the referenced object.
|
||||
type: string
|
||||
kind:
|
||||
description: Kind of the referenced object.
|
||||
type: string
|
||||
name:
|
||||
description: Name of the referenced object.
|
||||
type: string
|
||||
uid:
|
||||
description: UID of the referenced object.
|
||||
type: string
|
||||
required:
|
||||
- apiVersion
|
||||
- kind
|
||||
- name
|
||||
type: object
|
||||
required:
|
||||
- name
|
||||
type: object
|
||||
type: array
|
||||
lastBatchToRollout:
|
||||
description: All pods in the batches up to the batchPartition (included) will have the target resource specification while the rest still have the source resource This is designed for the operators to manually rollout Default is the the number of batches which will rollout all the batches
|
||||
format: int32
|
||||
type: integer
|
||||
numBatches:
|
||||
description: The number of batches, default = 1
|
||||
format: int32
|
||||
type: integer
|
||||
paused:
|
||||
description: Paused the rollout, default is false
|
||||
type: boolean
|
||||
rolloutBatches:
|
||||
description: The exact distribution among batches. its size has to be exactly the same as the NumBatches (if set) The total number cannot exceed the targetSize or the size of the source resource We will IGNORE the last batch's replica field if it's a percentage since round errors can lead to inaccurate sum We highly recommend to leave the last batch's replica field empty
|
||||
items:
|
||||
description: RolloutBatch is used to describe how the each batch rollout should be
|
||||
properties:
|
||||
batchRolloutWebhooks:
|
||||
description: RolloutWebhooks provides a way for the batch rollout to interact with an external process
|
||||
items:
|
||||
description: RolloutWebhook holds the reference to external checks used for canary analysis
|
||||
properties:
|
||||
expectedStatus:
|
||||
description: ExpectedStatus a
|
||||
items:
|
||||
type: integer
|
||||
type: array
|
||||
metadata:
|
||||
additionalProperties:
|
||||
type: string
|
||||
description: Metadata (key-value pairs) for this webhook
|
||||
type: object
|
||||
name:
|
||||
description: Name of this webhook
|
||||
type: string
|
||||
type:
|
||||
description: Type of this webhook
|
||||
type: string
|
||||
url:
|
||||
description: URL address of this webhook
|
||||
type: string
|
||||
required:
|
||||
- name
|
||||
- type
|
||||
- url
|
||||
type: object
|
||||
type: array
|
||||
canaryMetric:
|
||||
description: CanaryMetric provides a way for the batch rollout process to automatically check certain metrics before moving to the next batch
|
||||
items:
|
||||
description: CanaryMetric holds the reference to metrics used for canary analysis
|
||||
properties:
|
||||
interval:
|
||||
description: Interval represents the windows size
|
||||
type: string
|
||||
metricsRange:
|
||||
description: Range value accepted for this metric
|
||||
properties:
|
||||
max:
|
||||
anyOf:
|
||||
- type: integer
|
||||
- type: string
|
||||
description: Maximum value
|
||||
x-kubernetes-int-or-string: true
|
||||
min:
|
||||
anyOf:
|
||||
- type: integer
|
||||
- type: string
|
||||
description: Minimum value
|
||||
x-kubernetes-int-or-string: true
|
||||
type: object
|
||||
name:
|
||||
description: Name of the metric
|
||||
type: string
|
||||
templateRef:
|
||||
description: TemplateRef references a metric template object
|
||||
properties:
|
||||
apiVersion:
|
||||
description: APIVersion of the referenced object.
|
||||
type: string
|
||||
kind:
|
||||
description: Kind of the referenced object.
|
||||
type: string
|
||||
name:
|
||||
description: Name of the referenced object.
|
||||
type: string
|
||||
uid:
|
||||
description: UID of the referenced object.
|
||||
type: string
|
||||
required:
|
||||
- apiVersion
|
||||
- kind
|
||||
- name
|
||||
type: object
|
||||
required:
|
||||
- name
|
||||
type: object
|
||||
type: array
|
||||
instanceInterval:
|
||||
description: The wait time, in seconds, between instances upgrades, default = 0
|
||||
format: int32
|
||||
type: integer
|
||||
maxUnavailable:
|
||||
anyOf:
|
||||
- type: integer
|
||||
- type: string
|
||||
description: MaxUnavailable is the max allowed number of pods that is unavailable during the upgrade. We will mark the batch as ready as long as there are less or equal number of pods unavailable than this number. default = 0
|
||||
x-kubernetes-int-or-string: true
|
||||
podList:
|
||||
description: The list of Pods to get upgraded it is mutually exclusive with the Replicas field
|
||||
items:
|
||||
type: string
|
||||
type: array
|
||||
replicas:
|
||||
anyOf:
|
||||
- type: integer
|
||||
- type: string
|
||||
description: 'Replicas is the number of pods to upgrade in this batch it can be an absolute number (ex: 5) or a percentage of total pods we will ignore the percentage of the last batch to just fill the gap it is mutually exclusive with the PodList field'
|
||||
x-kubernetes-int-or-string: true
|
||||
type: object
|
||||
type: array
|
||||
rolloutStrategy:
|
||||
description: RolloutStrategy defines strategies for the rollout plan
|
||||
type: string
|
||||
rolloutWebhooks:
|
||||
description: RolloutWebhooks provide a way for the rollout to interact with an external process
|
||||
items:
|
||||
description: RolloutWebhook holds the reference to external checks used for canary analysis
|
||||
properties:
|
||||
expectedStatus:
|
||||
description: ExpectedStatus a
|
||||
items:
|
||||
type: integer
|
||||
type: array
|
||||
metadata:
|
||||
additionalProperties:
|
||||
type: string
|
||||
description: Metadata (key-value pairs) for this webhook
|
||||
type: object
|
||||
name:
|
||||
description: Name of this webhook
|
||||
type: string
|
||||
type:
|
||||
description: Type of this webhook
|
||||
type: string
|
||||
url:
|
||||
description: URL address of this webhook
|
||||
type: string
|
||||
required:
|
||||
- name
|
||||
- type
|
||||
- url
|
||||
type: object
|
||||
type: array
|
||||
targetSize:
|
||||
description: The size of the target resource. The default is the same as the size of the source resource.
|
||||
format: int32
|
||||
type: integer
|
||||
type: object
|
||||
required:
|
||||
- components
|
||||
type: object
|
||||
status:
|
||||
description: AppStatus defines the observed state of Application
|
||||
properties:
|
||||
batchRollingState:
|
||||
description: BatchRollingState only meaningful when the Status is rolling
|
||||
type: string
|
||||
components:
|
||||
description: Components record the related Components created by Application Controller
|
||||
items:
|
||||
@@ -127,6 +342,28 @@ spec:
|
||||
- type
|
||||
type: object
|
||||
type: array
|
||||
currentBatch:
|
||||
description: The current batch the rollout is working on/blocked it starts from 0
|
||||
format: int32
|
||||
type: integer
|
||||
lastAppliedPodTemplateIdentifier:
|
||||
description: lastAppliedPodTemplateIdentifier is a string that uniquely represent the last pod template each workload type could use different ways to identify that so we cannot compare between resources We update this field only after a successful rollout
|
||||
type: string
|
||||
latestRevision:
|
||||
description: LatestRevision of the application configuration it generates
|
||||
properties:
|
||||
name:
|
||||
type: string
|
||||
revision:
|
||||
format: int64
|
||||
type: integer
|
||||
required:
|
||||
- name
|
||||
- revision
|
||||
type: object
|
||||
rollingState:
|
||||
description: RollingState is the Rollout State
|
||||
type: string
|
||||
services:
|
||||
description: Services record the status of the application services
|
||||
items:
|
||||
@@ -161,6 +398,22 @@ spec:
|
||||
status:
|
||||
description: ApplicationPhase is a label for the condition of a application at the current time
|
||||
type: string
|
||||
targetGeneration:
|
||||
description: NewPodTemplateIdentifier is a string that uniquely represent the new pod template each workload type could use different ways to identify that so we cannot compare between resources
|
||||
type: string
|
||||
upgradedReadyReplicas:
|
||||
description: UpgradedReplicas is the number of Pods upgraded by the rollout controller that have a Ready Condition.
|
||||
format: int32
|
||||
type: integer
|
||||
upgradedReplicas:
|
||||
description: UpgradedReplicas is the number of Pods upgraded by the rollout controller
|
||||
format: int32
|
||||
type: integer
|
||||
required:
|
||||
- currentBatch
|
||||
- rollingState
|
||||
- upgradedReadyReplicas
|
||||
- upgradedReplicas
|
||||
type: object
|
||||
type: object
|
||||
version: v1alpha2
|
||||
|
||||
@@ -91,14 +91,14 @@ spec:
|
||||
format: int32
|
||||
type: integer
|
||||
numBatches:
|
||||
description: The number of batches, default = 1 mutually exclusive to RolloutBatches
|
||||
description: The number of batches, default = 1
|
||||
format: int32
|
||||
type: integer
|
||||
paused:
|
||||
description: Paused the rollout, default is false
|
||||
type: boolean
|
||||
rolloutBatches:
|
||||
description: The exact distribution among batches. mutually exclusive to NumBatches. The total number cannot exceed the targetSize or the size of the source resource We will IGNORE the last batch's replica field if it's a percentage since round errors can lead to inaccurate sum We highly recommend to leave the last batch's replica field empty
|
||||
description: The exact distribution among batches. its size has to be exactly the same as the NumBatches (if set) The total number cannot exceed the targetSize or the size of the source resource We will IGNORE the last batch's replica field if it's a percentage since round errors can lead to inaccurate sum We highly recommend to leave the last batch's replica field empty
|
||||
items:
|
||||
description: RolloutBatch is used to describe how the each batch rollout should be
|
||||
properties:
|
||||
@@ -107,6 +107,11 @@ spec:
|
||||
items:
|
||||
description: RolloutWebhook holds the reference to external checks used for canary analysis
|
||||
properties:
|
||||
expectedStatus:
|
||||
description: ExpectedStatus a
|
||||
items:
|
||||
type: integer
|
||||
type: array
|
||||
metadata:
|
||||
additionalProperties:
|
||||
type: string
|
||||
@@ -115,9 +120,6 @@ spec:
|
||||
name:
|
||||
description: Name of this webhook
|
||||
type: string
|
||||
timeout:
|
||||
description: Request timeout for this webhook
|
||||
type: string
|
||||
type:
|
||||
description: Type of this webhook
|
||||
type: string
|
||||
@@ -212,6 +214,11 @@ spec:
|
||||
items:
|
||||
description: RolloutWebhook holds the reference to external checks used for canary analysis
|
||||
properties:
|
||||
expectedStatus:
|
||||
description: ExpectedStatus a
|
||||
items:
|
||||
type: integer
|
||||
type: array
|
||||
metadata:
|
||||
additionalProperties:
|
||||
type: string
|
||||
@@ -220,9 +227,6 @@ spec:
|
||||
name:
|
||||
description: Name of this webhook
|
||||
type: string
|
||||
timeout:
|
||||
description: Request timeout for this webhook
|
||||
type: string
|
||||
type:
|
||||
description: Type of this webhook
|
||||
type: string
|
||||
|
||||
@@ -4,7 +4,6 @@ import (
|
||||
"github.com/crossplane/crossplane-runtime/apis/core/v1alpha1"
|
||||
"github.com/pkg/errors"
|
||||
kerrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
|
||||
@@ -215,7 +214,6 @@ func (p *Parser) GenerateApplicationConfiguration(app *Appfile, ns string) (*v1a
|
||||
appconfig.SetGroupVersionKind(v1alpha2.ApplicationConfigurationGroupVersionKind)
|
||||
appconfig.Name = app.Name
|
||||
appconfig.Namespace = ns
|
||||
appconfig.Spec.Components = []v1alpha2.ApplicationConfigurationComponent{}
|
||||
|
||||
if appconfig.Labels == nil {
|
||||
appconfig.Labels = map[string]string{}
|
||||
@@ -277,15 +275,18 @@ func evalWorkloadWithContext(pCtx process.Context, wl *Workload, appName, compNa
|
||||
util.AddLabels(componentWorkload, labels)
|
||||
|
||||
component := &v1alpha2.Component{}
|
||||
component.Spec.Workload.Object = componentWorkload
|
||||
component.Spec.Workload = util.Object2RawExtension(componentWorkload)
|
||||
|
||||
acComponent := &v1alpha2.ApplicationConfigurationComponent{}
|
||||
acComponent.Traits = []v1alpha2.ComponentTrait{}
|
||||
for _, assist := range assists {
|
||||
tr, err := assist.Ins.Unstructured()
|
||||
if err != nil {
|
||||
return nil, nil, errors.Wrapf(err, "evaluate trait=%s template for component=%s app=%s", assist.Name, compName, appName)
|
||||
}
|
||||
if err != nil {
|
||||
return nil, nil, errors.Wrapf(err, "marshal trait=%s to byte array failed for component=%s app=%s",
|
||||
assist.Name, compName, appName)
|
||||
}
|
||||
labels := map[string]string{
|
||||
oam.TraitTypeLabel: assist.Type,
|
||||
oam.LabelAppName: appName,
|
||||
@@ -296,9 +297,8 @@ func evalWorkloadWithContext(pCtx process.Context, wl *Workload, appName, compNa
|
||||
}
|
||||
util.AddLabels(tr, labels)
|
||||
acComponent.Traits = append(acComponent.Traits, v1alpha2.ComponentTrait{
|
||||
Trait: runtime.RawExtension{
|
||||
Object: tr,
|
||||
},
|
||||
// we need to marshal the trait to byte array before sending them to the k8s
|
||||
Trait: util.Object2RawExtension(tr),
|
||||
})
|
||||
}
|
||||
return component, acComponent, nil
|
||||
|
||||
@@ -369,6 +369,20 @@ var _ = Describe("Test appFile parser", func() {
|
||||
Expect(k8sClient.Create(context.Background(), cm.DeepCopy())).Should(SatisfyAny(BeNil(), &util.AlreadyExistMatcher{}))
|
||||
ac, components, err := NewApplicationParser(k8sClient, nil).GenerateApplicationConfiguration(TestApp, "default")
|
||||
Expect(err).To(BeNil())
|
||||
manuscaler := util.Object2RawExtension(&unstructured.Unstructured{
|
||||
Object: map[string]interface{}{
|
||||
"apiVersion": "core.oam.dev/v1alpha2",
|
||||
"kind": "ManualScalerTrait",
|
||||
"metadata": map[string]interface{}{
|
||||
"labels": map[string]interface{}{
|
||||
"app.oam.dev/component": "myweb",
|
||||
"app.oam.dev/name": "test",
|
||||
"trait.oam.dev/type": "scaler",
|
||||
},
|
||||
},
|
||||
"spec": map[string]interface{}{"replicaCount": int64(10)},
|
||||
},
|
||||
})
|
||||
expectAppConfig := &v1alpha2.ApplicationConfiguration{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
Kind: "ApplicationConfiguration",
|
||||
@@ -393,22 +407,8 @@ var _ = Describe("Test appFile parser", func() {
|
||||
},
|
||||
Traits: []v1alpha2.ComponentTrait{
|
||||
{
|
||||
Trait: runtime.RawExtension{
|
||||
Object: &unstructured.Unstructured{
|
||||
Object: map[string]interface{}{
|
||||
"apiVersion": "core.oam.dev/v1alpha2",
|
||||
"kind": "ManualScalerTrait",
|
||||
"metadata": map[string]interface{}{
|
||||
"labels": map[string]interface{}{
|
||||
"app.oam.dev/component": "myweb",
|
||||
"app.oam.dev/name": "test",
|
||||
"trait.oam.dev/type": "scaler",
|
||||
},
|
||||
},
|
||||
"spec": map[string]interface{}{"replicaCount": int64(10)},
|
||||
},
|
||||
},
|
||||
}},
|
||||
Trait: manuscaler,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
@@ -481,9 +481,9 @@ var _ = Describe("Test appFile parser", func() {
|
||||
Expect(len(components)).To(BeEquivalentTo(1))
|
||||
Expect(components[0].ObjectMeta).To(BeEquivalentTo(expectComponent.ObjectMeta))
|
||||
Expect(components[0].TypeMeta).To(BeEquivalentTo(expectComponent.TypeMeta))
|
||||
Expect(components[0].Spec.Workload.Object).Should(SatisfyAny(
|
||||
BeEquivalentTo(expectWorkload),
|
||||
BeEquivalentTo(expectWorkloadOptional)))
|
||||
Expect(components[0].Spec.Workload).Should(SatisfyAny(
|
||||
BeEquivalentTo(util.Object2RawExtension(expectWorkload)),
|
||||
BeEquivalentTo(util.Object2RawExtension(expectWorkloadOptional))))
|
||||
})
|
||||
|
||||
})
|
||||
|
||||
@@ -19,6 +19,7 @@ import (
|
||||
"github.com/oam-dev/kubevela/pkg/appfile/api"
|
||||
"github.com/oam-dev/kubevela/pkg/commands/util"
|
||||
"github.com/oam-dev/kubevela/pkg/oam"
|
||||
"github.com/oam-dev/kubevela/pkg/utils/common"
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -168,7 +169,7 @@ func (o *VelaExecOptions) getComponentName() (string, error) {
|
||||
o.Cmd.Printf("The service name '%s' is not valid\n", svcName)
|
||||
}
|
||||
|
||||
compName, err := util.AskToChooseOneService(appfile.GetComponents(o.App))
|
||||
compName, err := common.AskToChooseOneService(appfile.GetComponents(o.App))
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
@@ -19,6 +19,7 @@ import (
|
||||
"github.com/oam-dev/kubevela/pkg/appfile"
|
||||
"github.com/oam-dev/kubevela/pkg/appfile/api"
|
||||
"github.com/oam-dev/kubevela/pkg/commands/util"
|
||||
"github.com/oam-dev/kubevela/pkg/utils/common"
|
||||
)
|
||||
|
||||
// NewLogsCommand creates `logs` command to tail logs of application
|
||||
@@ -78,7 +79,7 @@ func (l *Args) Run(ctx context.Context, ioStreams util.IOStreams) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
compName, err := util.AskToChooseOneService(appfile.GetComponents(l.App))
|
||||
compName, err := common.AskToChooseOneService(appfile.GetComponents(l.App))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -52,7 +52,8 @@ func NewListCommand(c types.Args, ioStreams cmdutil.IOStreams) *cobra.Command {
|
||||
return cmd
|
||||
}
|
||||
|
||||
func printComponentList(ctx context.Context, c client.Client, appName string, env *types.EnvMeta, ioStreams cmdutil.IOStreams) {
|
||||
func printComponentList(ctx context.Context, c client.Reader, appName string, env *types.EnvMeta,
|
||||
ioStreams cmdutil.IOStreams) {
|
||||
deployedComponentList, err := serverlib.ListComponents(ctx, c, serverlib.Option{
|
||||
AppName: appName,
|
||||
Namespace: env.Namespace,
|
||||
|
||||
@@ -28,6 +28,7 @@ import (
|
||||
"github.com/oam-dev/kubevela/pkg/appfile/api"
|
||||
"github.com/oam-dev/kubevela/pkg/commands/util"
|
||||
"github.com/oam-dev/kubevela/pkg/oam"
|
||||
"github.com/oam-dev/kubevela/pkg/utils/common"
|
||||
)
|
||||
|
||||
// VelaPortForwardOptions for vela port-forward
|
||||
@@ -150,7 +151,7 @@ func getRouteServiceName(appconfig *v1alpha2.ApplicationConfiguration, svcName s
|
||||
|
||||
// Complete will complete the config of port-forward
|
||||
func (o *VelaPortForwardOptions) Complete() error {
|
||||
svcName, err := util.AskToChooseOneService(appfile.GetComponents(o.App))
|
||||
svcName, err := common.AskToChooseOneService(appfile.GetComponents(o.App))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -1,38 +0,0 @@
|
||||
package util
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
"github.com/AlecAivazis/survey/v2"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
|
||||
corev1alpha2 "github.com/oam-dev/kubevela/apis/core.oam.dev/v1alpha2"
|
||||
)
|
||||
|
||||
// GetComponent get OAM component
|
||||
func GetComponent(ctx context.Context, c client.Client, componentName string, namespace string) (corev1alpha2.Component, error) {
|
||||
var component corev1alpha2.Component
|
||||
err := c.Get(ctx, client.ObjectKey{Name: componentName, Namespace: namespace}, &component)
|
||||
return component, err
|
||||
}
|
||||
|
||||
// AskToChooseOneService will ask users to select one service of the application if more than one exidi
|
||||
func AskToChooseOneService(svcNames []string) (string, error) {
|
||||
if len(svcNames) == 0 {
|
||||
return "", fmt.Errorf("no service exist in the application")
|
||||
}
|
||||
if len(svcNames) == 1 {
|
||||
return svcNames[0], nil
|
||||
}
|
||||
prompt := &survey.Select{
|
||||
Message: "You have multiple services in your app. Please choose one service: ",
|
||||
Options: svcNames,
|
||||
}
|
||||
var svcName string
|
||||
err := survey.AskOne(prompt, &svcName)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("choosing service err %w", err)
|
||||
}
|
||||
return svcName, nil
|
||||
}
|
||||
@@ -29,7 +29,7 @@ type Controller struct {
|
||||
parentController oam.Object
|
||||
|
||||
rolloutSpec *v1alpha1.RolloutPlan
|
||||
rolloutStatus v1alpha1.RolloutStatus
|
||||
rolloutStatus *v1alpha1.RolloutStatus
|
||||
|
||||
targetWorkload *unstructured.Unstructured
|
||||
sourceWorkload *unstructured.Unstructured
|
||||
@@ -38,21 +38,21 @@ type Controller struct {
|
||||
// NewRolloutPlanController creates a RolloutPlanController
|
||||
func NewRolloutPlanController(client client.Client, parentController oam.Object, recorder event.Recorder,
|
||||
rolloutSpec *v1alpha1.RolloutPlan,
|
||||
rolloutStatus v1alpha1.RolloutStatus, targetWorkload,
|
||||
rolloutStatus *v1alpha1.RolloutStatus, targetWorkload,
|
||||
sourceWorkload *unstructured.Unstructured) *Controller {
|
||||
return &Controller{
|
||||
client: client,
|
||||
parentController: parentController,
|
||||
recorder: recorder,
|
||||
rolloutSpec: rolloutSpec,
|
||||
rolloutStatus: rolloutStatus,
|
||||
rolloutSpec: rolloutSpec.DeepCopy(),
|
||||
rolloutStatus: rolloutStatus.DeepCopy(),
|
||||
targetWorkload: targetWorkload,
|
||||
sourceWorkload: sourceWorkload,
|
||||
}
|
||||
}
|
||||
|
||||
// Reconcile reconciles a rollout plan
|
||||
func (r *Controller) Reconcile(ctx context.Context) (res reconcile.Result, status v1alpha1.RolloutStatus) {
|
||||
func (r *Controller) Reconcile(ctx context.Context) (res reconcile.Result, status *v1alpha1.RolloutStatus) {
|
||||
klog.InfoS("Reconcile the rollout plan", "rollout Spec", r.rolloutSpec,
|
||||
"target workload", klog.KObj(r.targetWorkload))
|
||||
if r.sourceWorkload != nil {
|
||||
@@ -89,18 +89,22 @@ func (r *Controller) Reconcile(ctx context.Context) (res reconcile.Result, statu
|
||||
|
||||
switch r.rolloutStatus.RollingState {
|
||||
case v1alpha1.VerifyingState:
|
||||
status = *workloadController.Verify(ctx)
|
||||
r.rolloutStatus = workloadController.Verify(ctx)
|
||||
|
||||
case v1alpha1.InitializingState:
|
||||
// TODO: call the pre-rollout webhooks
|
||||
status = *workloadController.Initialize(ctx)
|
||||
if err := r.initializeRollout(ctx); err == nil {
|
||||
r.rolloutStatus = workloadController.Initialize(ctx)
|
||||
}
|
||||
|
||||
case v1alpha1.RollingInBatchesState:
|
||||
status = r.reconcileBatchInRolling(ctx, workloadController)
|
||||
r.reconcileBatchInRolling(ctx, workloadController)
|
||||
|
||||
case v1alpha1.FinalisingState:
|
||||
// TODO: call the post-rollout webhooks
|
||||
status = *workloadController.Finalize(ctx)
|
||||
r.rolloutStatus = workloadController.Finalize(ctx)
|
||||
// if we are still going to finalize it
|
||||
if r.rolloutStatus.RollingState == v1alpha1.FinalisingState {
|
||||
r.finalizeRollout(ctx)
|
||||
}
|
||||
|
||||
case v1alpha1.RolloutSucceedState:
|
||||
// Nothing to do
|
||||
@@ -112,44 +116,43 @@ func (r *Controller) Reconcile(ctx context.Context) (res reconcile.Result, statu
|
||||
panic(fmt.Sprintf("illegal rollout status %+v", r.rolloutStatus))
|
||||
}
|
||||
|
||||
return res, status
|
||||
return res, r.rolloutStatus
|
||||
}
|
||||
|
||||
// reconcile logic when we are in the middle of rollout
|
||||
func (r *Controller) reconcileBatchInRolling(ctx context.Context, workloadController workloads.WorkloadController) (
|
||||
status v1alpha1.RolloutStatus) {
|
||||
func (r *Controller) reconcileBatchInRolling(ctx context.Context, workloadController workloads.WorkloadController) {
|
||||
|
||||
if r.rolloutSpec.Paused {
|
||||
r.recorder.Event(r.parentController, event.Normal("Rollout paused", "Rollout paused"))
|
||||
r.rolloutStatus.SetConditions(v1alpha1.NewPositiveCondition("Paused"))
|
||||
return r.rolloutStatus
|
||||
return
|
||||
}
|
||||
|
||||
// makes sure that the current batch and replica count in the status are validate
|
||||
replicas, err := workloadController.Size(ctx)
|
||||
if err != nil {
|
||||
r.rolloutStatus.RolloutRetry(err.Error())
|
||||
return r.rolloutStatus
|
||||
return
|
||||
}
|
||||
r.validateRollingBatchStatus(int(replicas))
|
||||
|
||||
switch r.rolloutStatus.BatchRollingState {
|
||||
case v1alpha1.BatchInitializingState:
|
||||
// TODO: call the pre-batch webhook
|
||||
r.initializeOneBatch(ctx)
|
||||
|
||||
case v1alpha1.BatchInRollingState:
|
||||
// still rolling the batch, the batch rolling is not completed yet
|
||||
status = *workloadController.RolloutOneBatchPods(ctx)
|
||||
r.rolloutStatus = workloadController.RolloutOneBatchPods(ctx)
|
||||
|
||||
case v1alpha1.BatchVerifyingState:
|
||||
// verifying if the application is ready to roll
|
||||
// need to check if they meet the availability requirements in the rollout spec.
|
||||
// TODO: evaluate any metrics/analysis
|
||||
status = *workloadController.CheckOneBatchPods(ctx)
|
||||
r.rolloutStatus = workloadController.CheckOneBatchPods(ctx)
|
||||
|
||||
case v1alpha1.BatchFinalizingState:
|
||||
// all the pods in the are available
|
||||
r.finalizeOneBatch()
|
||||
r.finalizeOneBatch(ctx)
|
||||
|
||||
case v1alpha1.BatchReadyState:
|
||||
// all the pods in the are upgraded and their state are ready
|
||||
@@ -159,15 +162,62 @@ func (r *Controller) reconcileBatchInRolling(ctx context.Context, workloadContro
|
||||
default:
|
||||
panic(fmt.Sprintf("illegal status %+v", r.rolloutStatus))
|
||||
}
|
||||
}
|
||||
|
||||
return status
|
||||
// all the common initialize work before we rollout
|
||||
func (r *Controller) initializeRollout(ctx context.Context) error {
|
||||
// call the pre-rollout webhooks
|
||||
for _, rw := range r.rolloutSpec.RolloutWebhooks {
|
||||
if rw.Type == v1alpha1.InitializeRolloutHook {
|
||||
err := callWebhook(ctx, r.parentController, v1alpha1.InitializingState, rw)
|
||||
if err != nil {
|
||||
klog.ErrorS(err, "failed to invoke a webhook",
|
||||
"webhook name", rw.Name, "webhook end point", rw.URL)
|
||||
r.rolloutStatus.RolloutFailed("failed to invoke a webhook")
|
||||
return err
|
||||
}
|
||||
klog.InfoS("successfully invoked a pre rollout webhook", "webhook name", rw.Name, "webhook end point",
|
||||
rw.URL)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// all the common initialize work before we rollout one batch of resources
|
||||
func (r *Controller) initializeOneBatch(ctx context.Context) {
|
||||
rolloutHooks := r.gatherAllWebhooks()
|
||||
// call all the pre-batch rollout webhooks
|
||||
for _, rh := range rolloutHooks {
|
||||
if rh.Type == v1alpha1.PreBatchRolloutHook {
|
||||
err := callWebhook(ctx, r.parentController, v1alpha1.InitializingState, rh)
|
||||
if err != nil {
|
||||
klog.ErrorS(err, "failed to invoke a webhook",
|
||||
"webhook name", rh.Name, "webhook end point", rh.URL)
|
||||
r.rolloutStatus.RolloutFailed("failed to invoke a webhook")
|
||||
return
|
||||
}
|
||||
klog.InfoS("successfully invoked a pre batch webhook", "webhook name", rh.Name, "webhook end point",
|
||||
rh.URL)
|
||||
}
|
||||
}
|
||||
r.rolloutStatus.StateTransition(v1alpha1.InitializedOneBatchEvent)
|
||||
}
|
||||
|
||||
func (r *Controller) gatherAllWebhooks() []v1alpha1.RolloutWebhook {
|
||||
// we go through the rollout level webhooks first
|
||||
rolloutHooks := r.rolloutSpec.RolloutWebhooks
|
||||
// we then append the batch specific rollout webhooks to the overall webhooks
|
||||
// order matters here
|
||||
currentBatch := int(r.rolloutStatus.CurrentBatch)
|
||||
rolloutHooks = append(rolloutHooks, r.rolloutSpec.RolloutBatches[currentBatch].BatchRolloutWebhooks...)
|
||||
return rolloutHooks
|
||||
}
|
||||
|
||||
// check if we can move to the next batch
|
||||
func (r *Controller) tryMovingToNextBatch() {
|
||||
if r.rolloutSpec.BatchPartition == nil || *r.rolloutSpec.BatchPartition > r.rolloutStatus.CurrentBatch {
|
||||
klog.InfoS("ready to rollout the next batch", "current batch", r.rolloutStatus.CurrentBatch)
|
||||
r.rolloutStatus.CurrentBatch++
|
||||
r.rolloutStatus.StateTransition(v1alpha1.BatchRolloutApprovedEvent)
|
||||
} else {
|
||||
klog.V(common.LogDebug).InfoS("the current batch is waiting to move on", "current batch",
|
||||
@@ -175,8 +225,23 @@ func (r *Controller) tryMovingToNextBatch() {
|
||||
}
|
||||
}
|
||||
|
||||
func (r *Controller) finalizeOneBatch() {
|
||||
// TODO: call the post-batch webhooks if there are any
|
||||
func (r *Controller) finalizeOneBatch(ctx context.Context) {
|
||||
rolloutHooks := r.gatherAllWebhooks()
|
||||
// call all the post-batch rollout webhooks
|
||||
for _, rh := range rolloutHooks {
|
||||
if rh.Type == v1alpha1.PostBatchRolloutHook {
|
||||
err := callWebhook(ctx, r.parentController, v1alpha1.FinalisingState, rh)
|
||||
if err != nil {
|
||||
klog.ErrorS(err, "failed to invoke a webhook",
|
||||
"webhook name", rh.Name, "webhook end point", rh.URL)
|
||||
r.rolloutStatus.RolloutFailed("failed to invoke a webhook")
|
||||
return
|
||||
}
|
||||
klog.InfoS("successfully invoked a post batch webhook", "webhook name", rh.Name, "webhook end point",
|
||||
rh.URL)
|
||||
}
|
||||
}
|
||||
// calculate the next phase
|
||||
currentBatch := int(r.rolloutStatus.CurrentBatch)
|
||||
if currentBatch == len(r.rolloutSpec.RolloutBatches)-1 {
|
||||
// this is the last batch, mark the rollout finalized
|
||||
@@ -193,6 +258,24 @@ func (r *Controller) finalizeOneBatch() {
|
||||
}
|
||||
}
|
||||
|
||||
// all the common finalize work after we rollout
|
||||
func (r *Controller) finalizeRollout(ctx context.Context) {
|
||||
// call the post-rollout webhooks
|
||||
for _, rw := range r.rolloutSpec.RolloutWebhooks {
|
||||
if rw.Type == v1alpha1.FinalizeRolloutHook {
|
||||
err := callWebhook(ctx, r.parentController, v1alpha1.FinalisingState, rw)
|
||||
if err != nil {
|
||||
klog.ErrorS(err, "failed to invoke a webhook",
|
||||
"webhook name", rw.Name, "webhook end point", rw.URL)
|
||||
r.rolloutStatus.RolloutFailed("failed to invoke a post rollout webhook")
|
||||
}
|
||||
klog.InfoS("successfully invoked a post rollout webhook", "webhook name", rw.Name, "webhook end point",
|
||||
rw.URL)
|
||||
}
|
||||
}
|
||||
r.rolloutStatus.StateTransition(v1alpha1.RollingFinalizedEvent)
|
||||
}
|
||||
|
||||
// verify that the upgradedReplicas and current batch in the status are valid according to the spec
|
||||
func (r *Controller) validateRollingBatchStatus(totalSize int) bool {
|
||||
status := r.rolloutStatus
|
||||
@@ -247,7 +330,7 @@ func (r *Controller) GetWorkloadController() (workloads.WorkloadController, erro
|
||||
switch kind {
|
||||
case "CloneSet":
|
||||
return workloads.NewCloneSetController(r.client, r.recorder, r.parentController,
|
||||
r.rolloutSpec, &r.rolloutStatus, target), nil
|
||||
r.rolloutSpec, r.rolloutStatus, target), nil
|
||||
|
||||
default:
|
||||
return nil, fmt.Errorf("the workload kind `%s` is not supported", kind)
|
||||
|
||||
@@ -4,94 +4,107 @@ import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
"k8s.io/client-go/util/retry"
|
||||
"k8s.io/klog/v2"
|
||||
|
||||
"github.com/oam-dev/kubevela/apis/standard.oam.dev/v1alpha1"
|
||||
"github.com/oam-dev/kubevela/pkg/controller/common"
|
||||
)
|
||||
|
||||
func callWebhook(webhook string, payload interface{}, timeout string) error {
|
||||
// issue an http call to the an end ponit
|
||||
func makeHTTPRequest(ctx context.Context, webhookEndPoint string, payload interface{}) ([]byte, int, error) {
|
||||
payloadBin, err := json.Marshal(payload)
|
||||
if err != nil {
|
||||
return err
|
||||
return nil, http.StatusInternalServerError, err
|
||||
}
|
||||
|
||||
hook, err := url.Parse(webhook)
|
||||
hook, err := url.Parse(webhookEndPoint)
|
||||
if err != nil {
|
||||
return err
|
||||
return nil, http.StatusInternalServerError, err
|
||||
}
|
||||
|
||||
req, err := http.NewRequestWithContext(context.Background(), "POST", hook.String(), bytes.NewBuffer(payloadBin))
|
||||
if err != nil {
|
||||
return err
|
||||
return nil, http.StatusInternalServerError, err
|
||||
}
|
||||
|
||||
req.Header.Set("Content-Type", "application/json")
|
||||
|
||||
if timeout == "" {
|
||||
timeout = "10s"
|
||||
}
|
||||
// issue request with retry
|
||||
var r *http.Response
|
||||
var body []byte
|
||||
err = retry.OnError(retry.DefaultBackoff,
|
||||
func(error) bool {
|
||||
// not sure what not to retry on
|
||||
return true
|
||||
}, func() error {
|
||||
var requestErr error
|
||||
r, requestErr = http.DefaultClient.Do(req.WithContext(ctx))
|
||||
defer func() {
|
||||
_ = r.Body.Close()
|
||||
}()
|
||||
if requestErr != nil {
|
||||
return requestErr
|
||||
}
|
||||
body, requestErr = ioutil.ReadAll(r.Body)
|
||||
if requestErr != nil {
|
||||
return requestErr
|
||||
}
|
||||
if r.StatusCode == http.StatusInternalServerError ||
|
||||
r.StatusCode == http.StatusServiceUnavailable {
|
||||
requestErr = fmt.Errorf("internal server error, status code = %d", r.StatusCode)
|
||||
}
|
||||
return requestErr
|
||||
})
|
||||
|
||||
t, err := time.ParseDuration(timeout)
|
||||
// failed even with retry
|
||||
if err != nil {
|
||||
return err
|
||||
return nil, http.StatusInternalServerError, err
|
||||
}
|
||||
|
||||
ctx, cancel := context.WithTimeout(req.Context(), t)
|
||||
defer cancel()
|
||||
|
||||
r, err := http.DefaultClient.Do(req.WithContext(ctx))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer func() {
|
||||
_ = r.Body.Close()
|
||||
}()
|
||||
|
||||
b, err := ioutil.ReadAll(r.Body)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error reading body: %w", err)
|
||||
}
|
||||
|
||||
if r.StatusCode > 202 {
|
||||
return errors.New(string(b))
|
||||
}
|
||||
|
||||
return nil
|
||||
return body, r.StatusCode, nil
|
||||
}
|
||||
|
||||
// CallWebhook does a HTTP POST to an external service and
|
||||
// callWebhook does a HTTP POST to an external service and
|
||||
// returns an error if the response status code is non-2xx
|
||||
func CallWebhook(name string, namespace string, phase v1alpha1.RollingState, w v1alpha1.RolloutWebhook) error {
|
||||
payload := v1alpha1.RolloutWebhookPayload{}
|
||||
func callWebhook(ctx context.Context, resource klog.KMetadata, phase v1alpha1.RollingState,
|
||||
w v1alpha1.RolloutWebhook) error {
|
||||
payload := v1alpha1.RolloutWebhookPayload{
|
||||
Name: resource.GetName(),
|
||||
Namespace: resource.GetNamespace(),
|
||||
Phase: phase,
|
||||
}
|
||||
|
||||
if w.Metadata != nil {
|
||||
payload.Metadata = *w.Metadata
|
||||
}
|
||||
|
||||
if len(w.Timeout) < 2 {
|
||||
w.Timeout = "10s"
|
||||
// make the http request
|
||||
_, status, err := makeHTTPRequest(ctx, w.URL, payload)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return callWebhook(w.URL, payload, w.Timeout)
|
||||
}
|
||||
|
||||
// CallEventWebhook does a HTTP POST to an external service with meta data
|
||||
func CallEventWebhook(r *v1alpha1.RolloutTrait, webhookURL, message, eventtype string) error {
|
||||
t := time.Now()
|
||||
|
||||
payload := v1alpha1.RolloutWebhookPayload{
|
||||
Metadata: map[string]string{
|
||||
"eventMessage": message,
|
||||
"eventType": eventtype,
|
||||
"timestamp": strconv.FormatInt(t.UnixNano()/1000000, 10),
|
||||
},
|
||||
if len(w.ExpectedStatus) == 0 {
|
||||
if status > http.StatusAccepted {
|
||||
err := fmt.Errorf("we fail the webhook request based on status, http status = %d", status)
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
return callWebhook(webhookURL, payload, "5s")
|
||||
// check if the returned status is expected
|
||||
accepted := false
|
||||
for _, es := range w.ExpectedStatus {
|
||||
if es == status {
|
||||
accepted = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if !accepted {
|
||||
err := fmt.Errorf("http request to the webhook not accepeted, http status = %d", status)
|
||||
klog.V(common.LogDebug).InfoS("the status is not expected", "expected status", w.ExpectedStatus)
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -58,8 +58,10 @@ func (c *CloneSetController) Size(ctx context.Context) (int32, error) {
|
||||
}
|
||||
|
||||
// Verify verifies that the target rollout resource is consistent with the rollout spec
|
||||
func (c *CloneSetController) Verify(ctx context.Context) *v1alpha1.RolloutStatus {
|
||||
func (c *CloneSetController) Verify(ctx context.Context) (status *v1alpha1.RolloutStatus) {
|
||||
var verifyErr error
|
||||
status = c.rolloutStatus
|
||||
|
||||
defer func() {
|
||||
if verifyErr != nil {
|
||||
klog.Error(verifyErr)
|
||||
@@ -68,7 +70,7 @@ func (c *CloneSetController) Verify(ctx context.Context) *v1alpha1.RolloutStatus
|
||||
}()
|
||||
|
||||
if verifyErr = c.fetchCloneSet(ctx); verifyErr != nil {
|
||||
return c.rolloutStatus
|
||||
return
|
||||
}
|
||||
|
||||
// make sure that there are changes in the pod template
|
||||
@@ -76,7 +78,7 @@ func (c *CloneSetController) Verify(ctx context.Context) *v1alpha1.RolloutStatus
|
||||
if targetHash == c.rolloutStatus.LastAppliedPodTemplateIdentifier {
|
||||
verifyErr = fmt.Errorf("there is no difference between the source and target, hash = %s", targetHash)
|
||||
c.rolloutStatus.RolloutFailed(verifyErr.Error())
|
||||
return c.rolloutStatus
|
||||
return
|
||||
}
|
||||
// record the new pod template hash
|
||||
c.rolloutStatus.NewPodTemplateIdentifier = targetHash
|
||||
@@ -84,26 +86,26 @@ func (c *CloneSetController) Verify(ctx context.Context) *v1alpha1.RolloutStatus
|
||||
// check if the rollout spec is compatible with the current state
|
||||
totalReplicas, _ := c.Size(ctx)
|
||||
|
||||
// check if the target spec is the same as the Cloneset replicas
|
||||
if verifyErr = c.verifyBatchSizes(totalReplicas); verifyErr != nil {
|
||||
// check if the rollout batch replicas added up to the Cloneset replicas
|
||||
if verifyErr = c.verifyRolloutBatchReplicaValue(totalReplicas); verifyErr != nil {
|
||||
c.rolloutStatus.RolloutFailed(verifyErr.Error())
|
||||
return c.rolloutStatus
|
||||
return
|
||||
}
|
||||
|
||||
// the rollout batch partition is either automatic or zero
|
||||
if c.rolloutSpec.BatchPartition != nil && *c.rolloutSpec.BatchPartition != 0 {
|
||||
verifyErr = fmt.Errorf("the rollout plan has to start from zero, partition= %d", *c.rolloutSpec.BatchPartition)
|
||||
c.rolloutStatus.RolloutFailed(verifyErr.Error())
|
||||
return c.rolloutStatus
|
||||
return
|
||||
}
|
||||
|
||||
// the number of old version in the Cloneset equals to the total number
|
||||
// Cloneset should not be in the middle of an upgrade (the number of new version pod should be 0)
|
||||
oldVersionPod, _ := intstr.GetValueFromIntOrPercent(c.cloneSet.Spec.UpdateStrategy.Partition, int(totalReplicas),
|
||||
true)
|
||||
if oldVersionPod != int(totalReplicas) {
|
||||
verifyErr = fmt.Errorf("the cloneset was still in the middle of updating, number of old pods= %d", oldVersionPod)
|
||||
c.rolloutStatus.RolloutFailed(verifyErr.Error())
|
||||
return c.rolloutStatus
|
||||
return
|
||||
}
|
||||
|
||||
// mark the rollout verified
|
||||
@@ -171,10 +173,11 @@ func (c *CloneSetController) CheckOneBatchPods(ctx context.Context) *v1alpha1.Ro
|
||||
c.recorder.Event(c.parentController, event.Normal("Batch Available",
|
||||
fmt.Sprintf("the batch num = %d is available", c.rolloutStatus.CurrentBatch)))
|
||||
c.rolloutStatus.StateTransition(v1alpha1.OneBatchAvailableEvent)
|
||||
c.rolloutStatus.LastAppliedPodTemplateIdentifier = c.rolloutStatus.NewPodTemplateIdentifier
|
||||
} else {
|
||||
// continue to verify
|
||||
klog.V(common.LogDebug).InfoS("the batch is not ready yet", "current batch", currentBatch)
|
||||
c.rolloutStatus.StateTransition(v1alpha1.BatchRolloutVerifyingEvent)
|
||||
c.rolloutStatus.RolloutRetry("the batch is not ready yet")
|
||||
}
|
||||
return c.rolloutStatus
|
||||
}
|
||||
@@ -191,8 +194,6 @@ func (c *CloneSetController) Finalize(ctx context.Context) *v1alpha1.RolloutStat
|
||||
return c.rolloutStatus
|
||||
}
|
||||
|
||||
c.rolloutStatus.StateTransition(v1alpha1.RollingFinalizedEvent)
|
||||
|
||||
return c.rolloutStatus
|
||||
}
|
||||
|
||||
@@ -200,7 +201,7 @@ func (c *CloneSetController) Finalize(ctx context.Context) *v1alpha1.RolloutStat
|
||||
The functions below are helper functions
|
||||
--------------------- */
|
||||
// check if the replicas in all the rollout batches add up to the right number
|
||||
func (c *CloneSetController) verifyBatchSizes(totalReplicas int32) error {
|
||||
func (c *CloneSetController) verifyRolloutBatchReplicaValue(totalReplicas int32) error {
|
||||
// the target size has to be the same as the cloneset size
|
||||
if c.rolloutSpec.TargetSize != nil && *c.rolloutSpec.TargetSize != totalReplicas {
|
||||
return fmt.Errorf("the rollout plan is attempting to scale the cloneset, target = %d, cloneset size = %d",
|
||||
|
||||
@@ -11,7 +11,6 @@ import (
|
||||
// VerifySumOfBatchSizes verifies that the the sum of all the batch replicas is valid given the total replica
|
||||
// each batch replica can be absolute or a percentage
|
||||
func VerifySumOfBatchSizes(rolloutSpec *v1alpha1.RolloutPlan, totalReplicas int32) error {
|
||||
// if not set, the sum of all the batch sizes minus the last batch cannot be more than the totalReplicas
|
||||
// if not set, the sum of all the batch sizes minus the last batch cannot be more than the totalReplicas
|
||||
totalRollout := 0
|
||||
for i := 0; i < len(rolloutSpec.RolloutBatches)-1; i++ {
|
||||
|
||||
60
pkg/controller/common/util.go
Normal file
60
pkg/controller/common/util.go
Normal file
@@ -0,0 +1,60 @@
|
||||
package common
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"reflect"
|
||||
"strings"
|
||||
|
||||
"github.com/crossplane/crossplane-runtime/pkg/logging"
|
||||
v1 "k8s.io/api/apps/v1"
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
"k8s.io/client-go/util/retry"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
|
||||
"github.com/oam-dev/kubevela/apis/core.oam.dev/v1alpha2"
|
||||
"github.com/oam-dev/kubevela/pkg/oam/util"
|
||||
)
|
||||
|
||||
// ConstructRevisionName will generate revisionName from componentName
|
||||
// will be <componentName>-v<RevisionNumber>, for example: comp-v1
|
||||
func ConstructRevisionName(componentName string, revision int64) string {
|
||||
return strings.Join([]string{componentName, fmt.Sprintf("v%d", revision)}, "-")
|
||||
}
|
||||
|
||||
// ExtractComponentName will extract componentName from revisionName
|
||||
func ExtractComponentName(revisionName string) string {
|
||||
splits := strings.Split(revisionName, "-")
|
||||
return strings.Join(splits[0:len(splits)-1], "-")
|
||||
}
|
||||
|
||||
// CompareWithRevision compares a component's spec with the component's latest revision content
|
||||
func CompareWithRevision(ctx context.Context, c client.Client, logger logging.Logger, componentName, nameSpace,
|
||||
latestRevision string, curCompSpec *v1alpha2.ComponentSpec) (bool, error) {
|
||||
oldRev := &v1.ControllerRevision{}
|
||||
// retry on NotFound since we update the component last revision first
|
||||
err := wait.ExponentialBackoff(retry.DefaultBackoff, func() (bool, error) {
|
||||
err := c.Get(ctx, client.ObjectKey{Namespace: nameSpace, Name: latestRevision}, oldRev)
|
||||
if err != nil && !apierrors.IsNotFound(err) {
|
||||
logger.Info(fmt.Sprintf("get old controllerRevision %s error %v",
|
||||
latestRevision, err), "componentName", componentName)
|
||||
return false, err
|
||||
}
|
||||
return true, nil
|
||||
})
|
||||
if err != nil {
|
||||
return true, err
|
||||
}
|
||||
oldComp, err := util.UnpackRevisionData(oldRev)
|
||||
if err != nil {
|
||||
logger.Info(fmt.Sprintf("Unmarshal old controllerRevision %s error %v",
|
||||
latestRevision, err), "componentName", componentName)
|
||||
return true, err
|
||||
}
|
||||
if reflect.DeepEqual(curCompSpec, &oldComp.Spec) {
|
||||
// no need to create a new revision
|
||||
return false, nil
|
||||
}
|
||||
return true, nil
|
||||
}
|
||||
145
pkg/controller/common/util_test.go
Normal file
145
pkg/controller/common/util_test.go
Normal file
@@ -0,0 +1,145 @@
|
||||
package common
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"testing"
|
||||
|
||||
"github.com/crossplane/crossplane-runtime/pkg/logging"
|
||||
"github.com/crossplane/crossplane-runtime/pkg/test"
|
||||
v1 "k8s.io/api/apps/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/utils/pointer"
|
||||
ctrl "sigs.k8s.io/controller-runtime"
|
||||
|
||||
"github.com/oam-dev/kubevela/apis/core.oam.dev/v1alpha2"
|
||||
)
|
||||
|
||||
func TestConstructExtract(t *testing.T) {
|
||||
tests := []string{"tam1", "test-comp", "xx", "tt-x-x-c"}
|
||||
revisionNum := []int64{1, 5, 10, 100000}
|
||||
for idx, componentName := range tests {
|
||||
t.Run(fmt.Sprintf("tests %d for component[%s]", idx, componentName), func(t *testing.T) {
|
||||
revisionName := ConstructRevisionName(componentName, revisionNum[idx])
|
||||
got := ExtractComponentName(revisionName)
|
||||
if got != componentName {
|
||||
t.Errorf("want to get %s from %s but got %s", componentName, revisionName, got)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestCompareWithRevision(t *testing.T) {
|
||||
ctx := context.TODO()
|
||||
logger := logging.NewLogrLogger(ctrl.Log.WithName("util-test"))
|
||||
componentName := "testComp"
|
||||
nameSpace := "namespace"
|
||||
latestRevision := "revision"
|
||||
imageV1 := "wordpress:4.6.1-apache"
|
||||
namespaceName := "test"
|
||||
cwV1 := v1alpha2.ContainerizedWorkload{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
Kind: "ContainerizedWorkload",
|
||||
APIVersion: "core.oam.dev/v1alpha2",
|
||||
},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: namespaceName,
|
||||
},
|
||||
Spec: v1alpha2.ContainerizedWorkloadSpec{
|
||||
Containers: []v1alpha2.Container{
|
||||
{
|
||||
Name: "wordpress",
|
||||
Image: imageV1,
|
||||
Ports: []v1alpha2.ContainerPort{
|
||||
{
|
||||
Name: "wordpress",
|
||||
Port: 80,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
baseComp := &v1alpha2.Component{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
Kind: "Component",
|
||||
APIVersion: "core.oam.dev/v1alpha2",
|
||||
}, ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "myweb",
|
||||
Namespace: namespaceName,
|
||||
Labels: map[string]string{"application.oam.dev": "test"},
|
||||
},
|
||||
Spec: v1alpha2.ComponentSpec{
|
||||
Workload: runtime.RawExtension{
|
||||
Object: &cwV1,
|
||||
},
|
||||
}}
|
||||
|
||||
revisionBase := v1.ControllerRevision{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "revisionName",
|
||||
Namespace: baseComp.Namespace,
|
||||
OwnerReferences: []metav1.OwnerReference{
|
||||
{
|
||||
APIVersion: v1alpha2.SchemeGroupVersion.String(),
|
||||
Kind: v1alpha2.ComponentKind,
|
||||
Name: baseComp.Name,
|
||||
UID: baseComp.UID,
|
||||
Controller: pointer.BoolPtr(true),
|
||||
},
|
||||
},
|
||||
Labels: map[string]string{
|
||||
"controller.oam.dev/component": baseComp.Name,
|
||||
},
|
||||
},
|
||||
Revision: 2,
|
||||
Data: runtime.RawExtension{Object: baseComp},
|
||||
}
|
||||
|
||||
tests := map[string]struct {
|
||||
getFunc test.ObjectFn
|
||||
curCompSpec *v1alpha2.ComponentSpec
|
||||
expectedResult bool
|
||||
expectedErr error
|
||||
}{
|
||||
"compare object": {
|
||||
getFunc: func(obj runtime.Object) error {
|
||||
o, ok := obj.(*v1.ControllerRevision)
|
||||
if !ok {
|
||||
t.Errorf("the object %+v is not of type controller revision", o)
|
||||
}
|
||||
*o = revisionBase
|
||||
return nil
|
||||
},
|
||||
curCompSpec: &v1alpha2.ComponentSpec{
|
||||
Workload: runtime.RawExtension{
|
||||
Object: baseComp,
|
||||
},
|
||||
},
|
||||
expectedResult: true,
|
||||
expectedErr: nil,
|
||||
},
|
||||
// TODO: add test cases
|
||||
// compare raw with object
|
||||
// raw with raw
|
||||
// diff in object meta
|
||||
// diff in namespace
|
||||
}
|
||||
for name, tt := range tests {
|
||||
t.Run(name, func(t *testing.T) {
|
||||
tclient := test.MockClient{
|
||||
MockGet: test.NewMockGetFn(nil, tt.getFunc),
|
||||
}
|
||||
same, err := CompareWithRevision(ctx, &tclient, logger, componentName, nameSpace, latestRevision,
|
||||
tt.curCompSpec)
|
||||
if err != tt.expectedErr {
|
||||
t.Errorf("CompareWithRevision() error = %v, wantErr %v", err, tt.expectedErr)
|
||||
return
|
||||
}
|
||||
if same != tt.expectedResult {
|
||||
t.Errorf("CompareWithRevision() got = %t, want %t", same, tt.expectedResult)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -35,7 +35,6 @@ import (
|
||||
"github.com/oam-dev/kubevela/apis/core.oam.dev/v1alpha2"
|
||||
"github.com/oam-dev/kubevela/pkg/appfile"
|
||||
core "github.com/oam-dev/kubevela/pkg/controller/core.oam.dev"
|
||||
"github.com/oam-dev/kubevela/pkg/oam"
|
||||
"github.com/oam-dev/kubevela/pkg/oam/discoverymapper"
|
||||
)
|
||||
|
||||
@@ -44,8 +43,8 @@ const RolloutReconcileWaitTime = time.Second * 3
|
||||
|
||||
// Reconciler reconciles a Application object
|
||||
type Reconciler struct {
|
||||
dm discoverymapper.DiscoveryMapper
|
||||
client.Client
|
||||
dm discoverymapper.DiscoveryMapper
|
||||
Log logr.Logger
|
||||
Scheme *runtime.Scheme
|
||||
}
|
||||
@@ -73,15 +72,6 @@ func (r *Reconciler) Reconcile(req ctrl.Request) (ctrl.Result, error) {
|
||||
return ctrl.Result{}, nil
|
||||
}
|
||||
|
||||
// Check if the oam rollout annotation exists
|
||||
if _, exist := app.GetAnnotations()[oam.AnnotationAppRollout]; exist {
|
||||
applog.Info("The application is still in the process of rolling out")
|
||||
app.Status.Phase = v1alpha2.ApplicationRollingOut
|
||||
app.Status.SetConditions(readyCondition("Rolling"))
|
||||
// do not process apps still in rolling out
|
||||
return ctrl.Result{RequeueAfter: RolloutReconcileWaitTime}, r.UpdateStatus(ctx, app)
|
||||
}
|
||||
|
||||
applog.Info("Start Rendering")
|
||||
|
||||
app.Status.Phase = v1alpha2.ApplicationRendering
|
||||
@@ -95,9 +85,9 @@ func (r *Reconciler) Reconcile(req ctrl.Request) (ctrl.Result, error) {
|
||||
|
||||
appfile, err := appParser.GenerateAppFile(app.Name, app)
|
||||
if err != nil {
|
||||
handler.l.Error(err, "[Handle Parse]")
|
||||
applog.Error(err, "[Handle Parse]")
|
||||
app.Status.SetConditions(errorCondition("Parsed", err))
|
||||
return handler.Err(err)
|
||||
return handler.handleErr(err)
|
||||
}
|
||||
|
||||
app.Status.SetConditions(readyCondition("Parsed"))
|
||||
@@ -106,18 +96,18 @@ func (r *Reconciler) Reconcile(req ctrl.Request) (ctrl.Result, error) {
|
||||
// build template to applicationconfig & component
|
||||
ac, comps, err := appParser.GenerateApplicationConfiguration(appfile, app.Namespace)
|
||||
if err != nil {
|
||||
handler.l.Error(err, "[Handle GenerateApplicationConfiguration]")
|
||||
applog.Error(err, "[Handle GenerateApplicationConfiguration]")
|
||||
app.Status.SetConditions(errorCondition("Built", err))
|
||||
return handler.Err(err)
|
||||
return handler.handleErr(err)
|
||||
}
|
||||
|
||||
app.Status.SetConditions(readyCondition("Built"))
|
||||
applog.Info("apply appConfig & component to the cluster")
|
||||
// apply appConfig & component to the cluster
|
||||
if err := handler.apply(ctx, ac, comps); err != nil {
|
||||
handler.l.Error(err, "[Handle apply]")
|
||||
applog.Error(err, "[Handle apply]")
|
||||
app.Status.SetConditions(errorCondition("Applied", err))
|
||||
return handler.Err(err)
|
||||
return handler.handleErr(err)
|
||||
}
|
||||
|
||||
app.Status.SetConditions(readyCondition("Applied"))
|
||||
@@ -128,7 +118,7 @@ func (r *Reconciler) Reconcile(req ctrl.Request) (ctrl.Result, error) {
|
||||
appCompStatus, healthy, err := handler.statusAggregate(appfile)
|
||||
if err != nil {
|
||||
app.Status.SetConditions(errorCondition("HealthCheck", err))
|
||||
return handler.Err(err)
|
||||
return handler.handleErr(err)
|
||||
}
|
||||
if !healthy {
|
||||
app.Status.SetConditions(errorCondition("HealthCheck", errors.New("not healthy")))
|
||||
|
||||
@@ -36,18 +36,18 @@ import (
|
||||
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/utils/pointer"
|
||||
ctrl "sigs.k8s.io/controller-runtime"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
"sigs.k8s.io/controller-runtime/pkg/reconcile"
|
||||
"sigs.k8s.io/yaml"
|
||||
|
||||
"github.com/oam-dev/kubevela/apis/core.oam.dev/v1alpha2"
|
||||
"github.com/oam-dev/kubevela/pkg/controller/common"
|
||||
"github.com/oam-dev/kubevela/pkg/oam"
|
||||
"github.com/oam-dev/kubevela/pkg/oam/util"
|
||||
)
|
||||
|
||||
var _ = Describe("Test Application Controller", func() {
|
||||
ctx := context.Background()
|
||||
ctx := context.TODO()
|
||||
appwithConfig := &v1alpha2.Application{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
Kind: "Application",
|
||||
@@ -192,13 +192,14 @@ var _ = Describe("Test Application Controller", func() {
|
||||
|
||||
})
|
||||
AfterEach(func() {
|
||||
By("[TEST] Clean up resources after an integration test")
|
||||
})
|
||||
|
||||
It("app-without-trait will only create workload", func() {
|
||||
expDeployment := getExpDeployment("myweb2", appwithNoTrait.Name)
|
||||
ns := &corev1.Namespace{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "vela-test",
|
||||
Name: "vela-test-app-without-trait",
|
||||
},
|
||||
}
|
||||
appwithNoTrait.SetNamespace(ns.Name)
|
||||
@@ -219,7 +220,7 @@ var _ = Describe("Test Application Controller", func() {
|
||||
appConfig := &v1alpha2.ApplicationConfiguration{}
|
||||
Expect(k8sClient.Get(ctx, client.ObjectKey{
|
||||
Namespace: appwithNoTrait.Namespace,
|
||||
Name: appwithNoTrait.Name,
|
||||
Name: common.ConstructRevisionName(appwithNoTrait.Name, 1),
|
||||
}, appConfig)).Should(BeNil())
|
||||
|
||||
By("Check Component Created with the expected workload spec")
|
||||
@@ -233,8 +234,14 @@ var _ = Describe("Test Application Controller", func() {
|
||||
Expect(component.ObjectMeta.OwnerReferences[0].Kind).Should(BeEquivalentTo("Application"))
|
||||
Expect(component.ObjectMeta.OwnerReferences[0].APIVersion).Should(BeEquivalentTo("core.oam.dev/v1alpha2"))
|
||||
Expect(component.ObjectMeta.OwnerReferences[0].Controller).Should(BeEquivalentTo(pointer.BoolPtr(true)))
|
||||
gotD := &v1.Deployment{}
|
||||
Expect(component.Status.LatestRevision).ShouldNot(BeNil())
|
||||
|
||||
// check that the new appconfig has the correct annotation and labels
|
||||
Expect(appConfig.GetAnnotations()[oam.AnnotationNewAppConfig]).Should(BeIdenticalTo("true"))
|
||||
Expect(appConfig.GetLabels()[oam.LabelAppConfigHash]).ShouldNot(BeEmpty())
|
||||
|
||||
// check the workload created should be the same as the raw data in the component
|
||||
gotD := &v1.Deployment{}
|
||||
Expect(json.Unmarshal(component.Spec.Workload.Raw, gotD)).Should(BeNil())
|
||||
fmt.Println(cmp.Diff(expDeployment, gotD))
|
||||
Expect(assert.ObjectsAreEqual(expDeployment, gotD)).Should(BeEquivalentTo(true))
|
||||
@@ -270,7 +277,7 @@ var _ = Describe("Test Application Controller", func() {
|
||||
appConfig := &v1alpha2.ApplicationConfiguration{}
|
||||
Expect(k8sClient.Get(ctx, client.ObjectKey{
|
||||
Namespace: app.Namespace,
|
||||
Name: app.Name,
|
||||
Name: common.ConstructRevisionName(app.Name, 1),
|
||||
}, appConfig)).Should(BeNil())
|
||||
|
||||
By("Check Component Created with the expected workload spec")
|
||||
@@ -316,7 +323,7 @@ var _ = Describe("Test Application Controller", func() {
|
||||
appConfig := &v1alpha2.ApplicationConfiguration{}
|
||||
Expect(k8sClient.Get(ctx, client.ObjectKey{
|
||||
Namespace: app.Namespace,
|
||||
Name: app.Name,
|
||||
Name: common.ConstructRevisionName(app.Name, 1),
|
||||
}, appConfig)).Should(BeNil())
|
||||
|
||||
gotTrait := unstructured.Unstructured{}
|
||||
@@ -381,10 +388,16 @@ var _ = Describe("Test Application Controller", func() {
|
||||
appConfig := &v1alpha2.ApplicationConfiguration{}
|
||||
Expect(k8sClient.Get(ctx, client.ObjectKey{
|
||||
Namespace: app.Namespace,
|
||||
Name: app.Name,
|
||||
Name: common.ConstructRevisionName(app.Name, 1),
|
||||
}, appConfig)).Should(BeNil())
|
||||
|
||||
Expect(len(appConfig.Spec.Components[0].Traits)).Should(BeEquivalentTo(2))
|
||||
Expect(appConfig.Spec.Components[0].ComponentName).Should(BeEmpty())
|
||||
Expect(appConfig.Spec.Components[0].RevisionName).ShouldNot(BeEmpty())
|
||||
// component create handler may create a v2 when it can't find v1
|
||||
Expect(appConfig.Spec.Components[0].RevisionName).Should(
|
||||
SatisfyAny(BeEquivalentTo(common.ConstructRevisionName(compName, 1)),
|
||||
BeEquivalentTo(common.ConstructRevisionName(compName, 2))))
|
||||
|
||||
gotTrait := unstructured.Unstructured{}
|
||||
By("Check the first trait should be service")
|
||||
@@ -465,7 +478,7 @@ var _ = Describe("Test Application Controller", func() {
|
||||
appConfig := &v1alpha2.ApplicationConfiguration{}
|
||||
Expect(k8sClient.Get(ctx, client.ObjectKey{
|
||||
Namespace: app.Namespace,
|
||||
Name: app.Name,
|
||||
Name: common.ConstructRevisionName(app.Name, 1),
|
||||
}, appConfig)).Should(BeNil())
|
||||
|
||||
gotTrait := unstructured.Unstructured{}
|
||||
@@ -527,7 +540,7 @@ var _ = Describe("Test Application Controller", func() {
|
||||
appConfig := &v1alpha2.ApplicationConfiguration{}
|
||||
Expect(k8sClient.Get(ctx, client.ObjectKey{
|
||||
Namespace: app.Namespace,
|
||||
Name: app.Name,
|
||||
Name: common.ConstructRevisionName(app.Name, 1),
|
||||
}, appConfig)).Should(BeNil())
|
||||
|
||||
gotTrait := unstructured.Unstructured{}
|
||||
@@ -595,7 +608,7 @@ var _ = Describe("Test Application Controller", func() {
|
||||
By("check AC and Component updated")
|
||||
Expect(k8sClient.Get(ctx, client.ObjectKey{
|
||||
Namespace: app.Namespace,
|
||||
Name: app.Name,
|
||||
Name: common.ConstructRevisionName(app.Name, 1),
|
||||
}, appConfig)).Should(BeNil())
|
||||
|
||||
Expect(json.Unmarshal(appConfig.Spec.Components[0].Traits[0].Trait.Raw, &gotTrait)).Should(BeNil())
|
||||
@@ -680,7 +693,7 @@ var _ = Describe("Test Application Controller", func() {
|
||||
appConfig := &v1alpha2.ApplicationConfiguration{}
|
||||
Expect(k8sClient.Get(ctx, client.ObjectKey{
|
||||
Namespace: app.Namespace,
|
||||
Name: app.Name,
|
||||
Name: common.ConstructRevisionName(app.Name, 1),
|
||||
}, appConfig)).Should(BeNil())
|
||||
|
||||
gotTrait := unstructured.Unstructured{}
|
||||
@@ -792,35 +805,92 @@ var _ = Describe("Test Application Controller", func() {
|
||||
Expect(k8sClient.Delete(ctx, app)).Should(BeNil())
|
||||
})
|
||||
|
||||
It("app with rolling out annotation", func() {
|
||||
It("app generate appConfigs with annotation", func() {
|
||||
By("create application with rolling out annotation")
|
||||
ns := &corev1.Namespace{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "app-test-with-rollout",
|
||||
Name: "vela-test-app-with-rollout",
|
||||
},
|
||||
}
|
||||
appWithTrait.SetNamespace(ns.Name)
|
||||
rolloutApp := appWithTraitAndScope.DeepCopy()
|
||||
rolloutApp.SetNamespace(ns.Name)
|
||||
Expect(k8sClient.Create(ctx, ns)).Should(BeNil())
|
||||
app := appWithTrait.DeepCopy()
|
||||
app.SetAnnotations(map[string]string{
|
||||
compName := rolloutApp.Spec.Components[0].Name
|
||||
// set the annotation
|
||||
rolloutApp.SetAnnotations(map[string]string{
|
||||
oam.AnnotationAppRollout: "true",
|
||||
})
|
||||
Expect(k8sClient.Create(ctx, rolloutApp)).Should(BeNil())
|
||||
|
||||
By("apply appfile")
|
||||
Expect(k8sClient.Create(ctx, app)).Should(BeNil())
|
||||
appKey := client.ObjectKey{
|
||||
Name: app.Name,
|
||||
Namespace: app.Namespace,
|
||||
Name: rolloutApp.Name,
|
||||
Namespace: rolloutApp.Namespace,
|
||||
}
|
||||
result, err := reconciler.Reconcile(reconcile.Request{NamespacedName: appKey})
|
||||
Expect(result).To(BeIdenticalTo(ctrl.Result{RequeueAfter: RolloutReconcileWaitTime}))
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
By("Check App status is rollingout")
|
||||
reconcileRetry(reconciler, reconcile.Request{NamespacedName: appKey})
|
||||
By("Check Application Created with the correct revision")
|
||||
checkApp := &v1alpha2.Application{}
|
||||
Expect(k8sClient.Get(ctx, appKey, checkApp)).Should(BeNil())
|
||||
Expect(checkApp.Status.Phase).Should(Equal(v1alpha2.ApplicationRollingOut))
|
||||
Expect(checkApp.Status.Phase).Should(Equal(v1alpha2.ApplicationRunning))
|
||||
Expect(checkApp.Status.LatestRevision).ShouldNot(BeNil())
|
||||
Expect(checkApp.Status.LatestRevision.Revision).Should(BeEquivalentTo(1))
|
||||
By("Check ApplicationConfiguration Created")
|
||||
appConfig := &v1alpha2.ApplicationConfiguration{}
|
||||
Expect(k8sClient.Get(ctx, client.ObjectKey{
|
||||
Namespace: rolloutApp.Namespace,
|
||||
Name: common.ConstructRevisionName(rolloutApp.Name, 1),
|
||||
}, appConfig)).Should(BeNil())
|
||||
// check v2 is not created
|
||||
Expect(k8sClient.Get(ctx, client.ObjectKey{
|
||||
Namespace: rolloutApp.Namespace,
|
||||
Name: common.ConstructRevisionName(rolloutApp.Name, 2),
|
||||
}, appConfig)).Should(HaveOccurred())
|
||||
Expect(checkApp.Status.LatestRevision).ShouldNot(BeNil())
|
||||
Expect(checkApp.Status.LatestRevision.Revision).Should(BeEquivalentTo(1))
|
||||
By("Check Component Created with the expected workload spec")
|
||||
var component v1alpha2.Component
|
||||
Expect(k8sClient.Get(ctx, client.ObjectKey{
|
||||
Namespace: rolloutApp.Namespace,
|
||||
Name: compName,
|
||||
}, &component)).Should(BeNil())
|
||||
Expect(component.Status.LatestRevision).ShouldNot(BeNil())
|
||||
Expect(component.Status.LatestRevision.Revision).Should(
|
||||
SatisfyAny(BeEquivalentTo(1), BeEquivalentTo(2)))
|
||||
|
||||
Expect(k8sClient.Delete(ctx, app)).Should(BeNil())
|
||||
// check that the new appconfig has the correct annotation and labels
|
||||
Expect(appConfig.GetAnnotations()[oam.AnnotationNewAppConfig]).Should(BeIdenticalTo("true"))
|
||||
Expect(appConfig.GetAnnotations()[oam.AnnotationNewComponent]).Should(Equal(component.Status.LatestRevision.Name))
|
||||
Expect(appConfig.GetLabels()[oam.LabelAppConfigHash]).ShouldNot(BeEmpty())
|
||||
Expect(appConfig.Spec.Components[0].ComponentName).Should(BeEmpty())
|
||||
Expect(appConfig.Spec.Components[0].RevisionName).Should(Equal(component.Status.LatestRevision.Name))
|
||||
|
||||
By("Reconcile again to make sure we are not creating more appConfigs")
|
||||
reconcileRetry(reconciler, reconcile.Request{NamespacedName: appKey})
|
||||
Expect(k8sClient.Get(ctx, appKey, checkApp)).Should(BeNil())
|
||||
Expect(checkApp.Status.Phase).Should(Equal(v1alpha2.ApplicationRunning))
|
||||
Expect(checkApp.Status.LatestRevision).ShouldNot(BeNil())
|
||||
Expect(checkApp.Status.LatestRevision.Revision).Should(BeEquivalentTo(1))
|
||||
|
||||
By("Check no new ApplicationConfiguration created")
|
||||
Expect(k8sClient.Get(ctx, client.ObjectKey{
|
||||
Namespace: rolloutApp.Namespace,
|
||||
Name: common.ConstructRevisionName(rolloutApp.Name, 1),
|
||||
}, appConfig)).Should(BeNil())
|
||||
// check v2 is not created
|
||||
Expect(k8sClient.Get(ctx, client.ObjectKey{
|
||||
Namespace: rolloutApp.Namespace,
|
||||
Name: common.ConstructRevisionName(rolloutApp.Name, 2),
|
||||
}, appConfig)).Should(HaveOccurred())
|
||||
By("Check no new Component created")
|
||||
Expect(k8sClient.Get(ctx, client.ObjectKey{
|
||||
Namespace: rolloutApp.Namespace,
|
||||
Name: compName,
|
||||
}, &component)).Should(BeNil())
|
||||
Expect(component.Status.LatestRevision).ShouldNot(BeNil())
|
||||
Expect(component.Status.LatestRevision.Revision).ShouldNot(BeNil())
|
||||
Expect(component.Status.LatestRevision.Revision).Should(BeEquivalentTo(1))
|
||||
|
||||
By("Delete Application, clean the resource")
|
||||
Expect(k8sClient.Delete(ctx, rolloutApp)).Should(BeNil())
|
||||
})
|
||||
|
||||
It("app with health policy and custom status for workload", func() {
|
||||
@@ -982,12 +1052,16 @@ var _ = Describe("Test Application Controller", func() {
|
||||
|
||||
func reconcileRetry(r reconcile.Reconciler, req reconcile.Request) {
|
||||
Eventually(func() error {
|
||||
_, err := r.Reconcile(req)
|
||||
result, err := r.Reconcile(req)
|
||||
if err != nil {
|
||||
fmt.Println("reconcile err: ", err)
|
||||
By(fmt.Sprintf("reconcile err: %+v ", err))
|
||||
} else if result.Requeue || result.RequeueAfter > 0 {
|
||||
// retry if we need to requeue
|
||||
By("reconcile failed with requeue")
|
||||
return fmt.Errorf("reconcile failed with requeue")
|
||||
}
|
||||
return err
|
||||
}, 3*time.Second, time.Second).Should(BeNil())
|
||||
}, 30*time.Second, time.Second).Should(BeNil())
|
||||
}
|
||||
|
||||
const (
|
||||
|
||||
@@ -2,22 +2,31 @@ package application
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
runtimev1alpha1 "github.com/crossplane/crossplane-runtime/apis/core/v1alpha1"
|
||||
"github.com/crossplane/crossplane-runtime/pkg/logging"
|
||||
"github.com/go-logr/logr"
|
||||
"github.com/mitchellh/hashstructure/v2"
|
||||
"github.com/pkg/errors"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
apiequality "k8s.io/apimachinery/pkg/api/equality"
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
ctypes "k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
"k8s.io/utils/pointer"
|
||||
ctrl "sigs.k8s.io/controller-runtime"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
|
||||
"github.com/oam-dev/kubevela/apis/core.oam.dev/v1alpha2"
|
||||
"github.com/oam-dev/kubevela/pkg/appfile"
|
||||
"github.com/oam-dev/kubevela/pkg/controller/common"
|
||||
"github.com/oam-dev/kubevela/pkg/controller/utils"
|
||||
"github.com/oam-dev/kubevela/pkg/dsl/process"
|
||||
"github.com/oam-dev/kubevela/pkg/oam"
|
||||
oamutil "github.com/oam-dev/kubevela/pkg/oam/util"
|
||||
)
|
||||
|
||||
func errorCondition(tpy string, err error) runtimev1alpha1.Condition {
|
||||
@@ -40,41 +49,98 @@ func readyCondition(tpy string) runtimev1alpha1.Condition {
|
||||
}
|
||||
|
||||
type appHandler struct {
|
||||
r *Reconciler
|
||||
app *v1alpha2.Application
|
||||
l logr.Logger
|
||||
r *Reconciler
|
||||
app *v1alpha2.Application
|
||||
logger logr.Logger
|
||||
}
|
||||
|
||||
func (ret *appHandler) Err(err error) (ctrl.Result, error) {
|
||||
nerr := ret.r.UpdateStatus(context.Background(), ret.app)
|
||||
func (h *appHandler) handleErr(err error) (ctrl.Result, error) {
|
||||
nerr := h.r.UpdateStatus(context.Background(), h.app)
|
||||
if err == nil && nerr == nil {
|
||||
return ctrl.Result{}, nil
|
||||
}
|
||||
if nerr != nil {
|
||||
ret.l.Error(nerr, "[Update] application")
|
||||
h.logger.Error(nerr, "[Update] application")
|
||||
}
|
||||
return ctrl.Result{
|
||||
RequeueAfter: time.Second * 10,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// apply will set ownerReference for ApplicationConfiguration and Components created by Application
|
||||
func (ret *appHandler) apply(ctx context.Context, ac *v1alpha2.ApplicationConfiguration, comps []*v1alpha2.Component) error {
|
||||
// apply will
|
||||
// 1. set ownerReference for ApplicationConfiguration and Components
|
||||
// 2. update AC's components using the component revision name
|
||||
// 3. update or create the AC with new revision and remember it in the application status
|
||||
// 4. garbage collect unused components
|
||||
func (h *appHandler) apply(ctx context.Context, ac *v1alpha2.ApplicationConfiguration, comps []*v1alpha2.Component) error {
|
||||
owners := []metav1.OwnerReference{{
|
||||
APIVersion: v1alpha2.SchemeGroupVersion.String(),
|
||||
Kind: v1alpha2.ApplicationKind,
|
||||
Name: ret.app.Name,
|
||||
UID: ret.app.UID,
|
||||
Name: h.app.Name,
|
||||
UID: h.app.UID,
|
||||
Controller: pointer.BoolPtr(true),
|
||||
}}
|
||||
ac.SetOwnerReferences(owners)
|
||||
for _, c := range comps {
|
||||
c.SetOwnerReferences(owners)
|
||||
hasRolloutLogic := false
|
||||
// Check if we are doing rolling out
|
||||
if _, exist := h.app.GetAnnotations()[oam.AnnotationAppRollout]; exist || h.app.Spec.RolloutPlan != nil {
|
||||
h.logger.Info("The application rolling out is controlled by a rollout plan")
|
||||
hasRolloutLogic = true
|
||||
}
|
||||
return ret.Sync(ctx, ac, comps)
|
||||
|
||||
for _, comp := range comps {
|
||||
comp.SetOwnerReferences(owners)
|
||||
newComp := comp.DeepCopy()
|
||||
// newComp will be updated and return the revision name instead of the component name
|
||||
revisionName, newRevision, err := h.createOrUpdateComponent(ctx, newComp)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if newRevision && hasRolloutLogic {
|
||||
// set the annotation on ac to point out which component is newly changed
|
||||
// TODO: handle multiple components
|
||||
ac.SetAnnotations(oamutil.MergeMapOverrideWithDst(ac.GetAnnotations(), map[string]string{
|
||||
oam.AnnotationNewComponent: revisionName,
|
||||
}))
|
||||
}
|
||||
// find the ACC that contains this component
|
||||
for i := 0; i < len(ac.Spec.Components); i++ {
|
||||
// update the AC using the component revision instead of component name
|
||||
// we have to make AC immutable including the component it's pointing to
|
||||
if ac.Spec.Components[i].ComponentName == newComp.Name {
|
||||
ac.Spec.Components[i].RevisionName = revisionName
|
||||
ac.Spec.Components[i].ComponentName = ""
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if err := h.createOrUpdateAppConfig(ctx, ac); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Garbage Collection for no used Components.
|
||||
// There's no need to ApplicationConfiguration Garbage Collection, it has the same name with Application.
|
||||
for _, comp := range h.app.Status.Components {
|
||||
var exist = false
|
||||
for _, cc := range comps {
|
||||
if comp.Name == cc.Name {
|
||||
exist = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if exist {
|
||||
continue
|
||||
}
|
||||
// Component not exits in current Application, should be deleted
|
||||
var oldC = &v1alpha2.Component{ObjectMeta: metav1.ObjectMeta{Name: comp.Name, Namespace: ac.Namespace}}
|
||||
if err := h.r.Delete(ctx, oldC); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (ret *appHandler) statusAggregate(appfile *appfile.Appfile) ([]v1alpha2.ApplicationComponentStatus, bool, error) {
|
||||
func (h *appHandler) statusAggregate(appfile *appfile.Appfile) ([]v1alpha2.ApplicationComponentStatus, bool, error) {
|
||||
var appStatus []v1alpha2.ApplicationComponentStatus
|
||||
var healthy = true
|
||||
for _, wl := range appfile.Workloads {
|
||||
@@ -92,7 +158,7 @@ func (ret *appHandler) statusAggregate(appfile *appfile.Appfile) ([]v1alpha2.App
|
||||
}
|
||||
}
|
||||
|
||||
workloadHealth, err := wl.EvalHealth(pCtx, ret.r, ret.app.Namespace)
|
||||
workloadHealth, err := wl.EvalHealth(pCtx, h.r, h.app.Namespace)
|
||||
if err != nil {
|
||||
return nil, false, errors.WithMessagef(err, "app=%s, comp=%s, check health error", appfile.Name, wl.Name)
|
||||
}
|
||||
@@ -101,7 +167,7 @@ func (ret *appHandler) statusAggregate(appfile *appfile.Appfile) ([]v1alpha2.App
|
||||
status.Healthy = false
|
||||
healthy = false
|
||||
}
|
||||
status.Message, err = wl.EvalStatus(pCtx, ret.r, ret.app.Namespace)
|
||||
status.Message, err = wl.EvalStatus(pCtx, h.r, h.app.Namespace)
|
||||
if err != nil {
|
||||
return nil, false, errors.WithMessagef(err, "app=%s, comp=%s, evaluate workload status message error", appfile.Name, wl.Name)
|
||||
}
|
||||
@@ -111,7 +177,7 @@ func (ret *appHandler) statusAggregate(appfile *appfile.Appfile) ([]v1alpha2.App
|
||||
Type: trait.Name,
|
||||
Healthy: true,
|
||||
}
|
||||
traitHealth, err := trait.EvalHealth(pCtx, ret.r, ret.app.Namespace)
|
||||
traitHealth, err := trait.EvalHealth(pCtx, h.r, h.app.Namespace)
|
||||
if err != nil {
|
||||
return nil, false, errors.WithMessagef(err, "app=%s, comp=%s, trait=%s, check health error", appfile.Name, wl.Name, trait.Name)
|
||||
}
|
||||
@@ -120,7 +186,7 @@ func (ret *appHandler) statusAggregate(appfile *appfile.Appfile) ([]v1alpha2.App
|
||||
traitStatus.Healthy = false
|
||||
healthy = false
|
||||
}
|
||||
traitStatus.Message, err = trait.EvalStatus(pCtx, ret.r, ret.app.Namespace)
|
||||
traitStatus.Message, err = trait.EvalStatus(pCtx, h.r, h.app.Namespace)
|
||||
if err != nil {
|
||||
return nil, false, errors.WithMessagef(err, "app=%s, comp=%s, trait=%s, evaluate status message error", appfile.Name, wl.Name, trait.Name)
|
||||
}
|
||||
@@ -132,68 +198,153 @@ func (ret *appHandler) statusAggregate(appfile *appfile.Appfile) ([]v1alpha2.App
|
||||
return appStatus, healthy, nil
|
||||
}
|
||||
|
||||
// CreateOrUpdateComponent will create if not exist and update if exists.
|
||||
func CreateOrUpdateComponent(ctx context.Context, client client.Client, comp *v1alpha2.Component) error {
|
||||
var getc v1alpha2.Component
|
||||
key := ctypes.NamespacedName{Name: comp.Name, Namespace: comp.Namespace}
|
||||
if err := client.Get(ctx, key, &getc); err != nil {
|
||||
// createOrUpdateComponent creates a component if not exist and update if exists.
|
||||
// it returns the corresponding component revisionName and if a new component revision is created
|
||||
func (h *appHandler) createOrUpdateComponent(ctx context.Context, comp *v1alpha2.Component) (string, bool, error) {
|
||||
curComp := v1alpha2.Component{}
|
||||
var preRevisionName, curRevisionName string
|
||||
compName := comp.GetName()
|
||||
compNameSpace := comp.GetNamespace()
|
||||
compKey := ctypes.NamespacedName{Name: compName, Namespace: compNameSpace}
|
||||
|
||||
err := h.r.Get(ctx, compKey, &curComp)
|
||||
if err != nil {
|
||||
if !apierrors.IsNotFound(err) {
|
||||
return err
|
||||
return "", false, err
|
||||
}
|
||||
return client.Create(ctx, comp)
|
||||
if err = h.r.Create(ctx, comp); err != nil {
|
||||
return "", false, err
|
||||
}
|
||||
h.logger.Info("Created a new component", "component name", comp.GetName())
|
||||
} else {
|
||||
// remember the revision if there is a previous component
|
||||
if curComp.Status.LatestRevision != nil {
|
||||
preRevisionName = curComp.Status.LatestRevision.Name
|
||||
}
|
||||
comp.ResourceVersion = curComp.ResourceVersion
|
||||
if err := h.r.Update(ctx, comp); err != nil {
|
||||
return "", false, err
|
||||
}
|
||||
h.logger.Info("Updated a component", "component name", comp.GetName())
|
||||
}
|
||||
comp.ResourceVersion = getc.ResourceVersion
|
||||
return client.Update(ctx, comp)
|
||||
// remove the object from the raw extension before we can compare with the existing componentRevision whose
|
||||
// object is persisted as Raw data after going through api server
|
||||
updatedComp := comp.DeepCopy()
|
||||
updatedComp.Spec.Workload.Object = nil
|
||||
if len(preRevisionName) != 0 {
|
||||
needNewRevision, err := common.CompareWithRevision(ctx, h.r,
|
||||
logging.NewLogrLogger(h.logger), compName, compNameSpace, preRevisionName, &updatedComp.Spec)
|
||||
if err != nil {
|
||||
return "", false, errors.Wrap(err, fmt.Sprintf("compare with existing controllerRevision %s failed",
|
||||
preRevisionName))
|
||||
}
|
||||
if !needNewRevision {
|
||||
h.logger.Info("no need to wait for a new component revision", "component name", updatedComp.GetName(),
|
||||
"revision", preRevisionName)
|
||||
return preRevisionName, false, nil
|
||||
}
|
||||
}
|
||||
h.logger.Info("wait for a new component revision", "component name", compName,
|
||||
"previous revision", preRevisionName)
|
||||
// get the new component revision that contains the component with retry
|
||||
checkForRevision := func() (bool, error) {
|
||||
if err := h.r.Get(ctx, compKey, &curComp); err != nil {
|
||||
// retry no matter what
|
||||
return false, nil
|
||||
}
|
||||
if curComp.Status.LatestRevision == nil || curComp.Status.LatestRevision.Name == preRevisionName {
|
||||
return false, nil
|
||||
}
|
||||
needNewRevision, err := common.CompareWithRevision(ctx, h.r, logging.NewLogrLogger(h.logger), compName,
|
||||
compNameSpace, curComp.Status.LatestRevision.Name, &updatedComp.Spec)
|
||||
if err != nil {
|
||||
// retry no matter what
|
||||
return false, nil
|
||||
}
|
||||
// end the loop if we find the revision
|
||||
if !needNewRevision {
|
||||
curRevisionName = curComp.Status.LatestRevision.Name
|
||||
h.logger.Info("get a matching component revision", "component name", compName,
|
||||
"current revision", curRevisionName)
|
||||
}
|
||||
return !needNewRevision, nil
|
||||
}
|
||||
if err := wait.ExponentialBackoff(utils.DefaultBackoff, checkForRevision); err != nil {
|
||||
return "", true, err
|
||||
}
|
||||
return curRevisionName, true, nil
|
||||
}
|
||||
|
||||
// CreateOrUpdateAppConfig will create if not exist and update if exists.
|
||||
func CreateOrUpdateAppConfig(ctx context.Context, client client.Client, appConfig *v1alpha2.ApplicationConfiguration) error {
|
||||
var geta v1alpha2.ApplicationConfiguration
|
||||
key := ctypes.NamespacedName{Name: appConfig.Name, Namespace: appConfig.Namespace}
|
||||
var exist = true
|
||||
if err := client.Get(ctx, key, &geta); err != nil {
|
||||
if !apierrors.IsNotFound(err) {
|
||||
return err
|
||||
}
|
||||
exist = false
|
||||
}
|
||||
if !exist {
|
||||
return client.Create(ctx, appConfig)
|
||||
}
|
||||
appConfig.ResourceVersion = geta.ResourceVersion
|
||||
return client.Update(ctx, appConfig)
|
||||
}
|
||||
|
||||
// Sync perform synchronization operations
|
||||
func (ret *appHandler) Sync(ctx context.Context, ac *v1alpha2.ApplicationConfiguration, comps []*v1alpha2.Component) error {
|
||||
for _, comp := range comps {
|
||||
if err := CreateOrUpdateComponent(ctx, ret.r, comp.DeepCopy()); err != nil {
|
||||
return err
|
||||
// createOrUpdateAppConfig will find the latest revision of the AC according
|
||||
// it will create a new revision if the appConfig is different from the existing one
|
||||
func (h *appHandler) createOrUpdateAppConfig(ctx context.Context, appConfig *v1alpha2.ApplicationConfiguration) error {
|
||||
var curAppConfig v1alpha2.ApplicationConfiguration
|
||||
// initialized
|
||||
if h.app.Status.LatestRevision == nil {
|
||||
revisionName := common.ConstructRevisionName(h.app.Name, 0)
|
||||
h.app.Status.LatestRevision = &v1alpha2.Revision{
|
||||
Name: revisionName,
|
||||
Revision: 0,
|
||||
}
|
||||
}
|
||||
|
||||
if err := CreateOrUpdateAppConfig(ctx, ret.r, ac); err != nil {
|
||||
// compute a hash value of the appConfig spec
|
||||
specHash, err := hashstructure.Hash(appConfig.Spec, hashstructure.FormatV2, nil)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
appConfig.SetLabels(oamutil.MergeMapOverrideWithDst(appConfig.GetLabels(),
|
||||
map[string]string{
|
||||
oam.LabelAppConfigHash: strconv.FormatUint(specHash, 16),
|
||||
}))
|
||||
|
||||
// Garbage Collection for no used Components.
|
||||
// There's no need to ApplicationConfiguration Garbage Collection, it has the same name with Application.
|
||||
for _, comp := range ret.app.Status.Components {
|
||||
var exist = false
|
||||
for _, cc := range comps {
|
||||
if comp.Name == cc.Name {
|
||||
exist = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if exist {
|
||||
continue
|
||||
}
|
||||
// Component not exits in current Application, should be deleted
|
||||
var oldC = &v1alpha2.Component{ObjectMeta: metav1.ObjectMeta{Name: comp.Name, Namespace: ac.Namespace}}
|
||||
if err := ret.r.Delete(ctx, oldC); err != nil {
|
||||
// get the AC with the last revision name stored in the application
|
||||
key := ctypes.NamespacedName{Name: h.app.Status.LatestRevision.Name, Namespace: h.app.Namespace}
|
||||
if err := h.r.Get(ctx, key, &curAppConfig); err != nil {
|
||||
if !apierrors.IsNotFound(err) {
|
||||
return err
|
||||
}
|
||||
h.logger.Info("create a new appConfig", "application name", h.app.GetName(),
|
||||
"latest revision that does not exist", h.app.Status.LatestRevision.Name)
|
||||
return h.createNewAppConfig(ctx, appConfig)
|
||||
}
|
||||
return nil
|
||||
|
||||
// check if the old AC has the same HASH value
|
||||
if curAppConfig.GetLabels()[oam.LabelAppConfigHash] == appConfig.GetLabels()[oam.LabelAppConfigHash] {
|
||||
// Just to be safe that it's not because of a random Hash collision
|
||||
if apiequality.Semantic.DeepEqual(&curAppConfig.Spec, &appConfig.Spec) {
|
||||
// same spec, no need to create another AC
|
||||
return nil
|
||||
}
|
||||
h.logger.Info("encountered a different app spec with same hash", "current spec",
|
||||
curAppConfig.Spec, "new appConfig spec", appConfig.Spec)
|
||||
}
|
||||
// create the next version
|
||||
h.logger.Info("create a new appConfig", "application name", h.app.GetName(),
|
||||
"latest revision that does not match the appConfig", h.app.Status.LatestRevision.Name)
|
||||
return h.createNewAppConfig(ctx, appConfig)
|
||||
}
|
||||
|
||||
// create a new appConfig given the latest revision in the application
|
||||
func (h *appHandler) createNewAppConfig(ctx context.Context, appConfig *v1alpha2.ApplicationConfiguration) error {
|
||||
nextRevision := h.app.Status.LatestRevision.Revision + 1
|
||||
revisionName := common.ConstructRevisionName(h.app.Name, nextRevision)
|
||||
// update the next revision in the application's status
|
||||
h.app.Status.LatestRevision = &v1alpha2.Revision{
|
||||
Name: revisionName,
|
||||
Revision: nextRevision,
|
||||
}
|
||||
appConfig.Name = revisionName
|
||||
// indicate that the application is new, the appConfig controller should remove this after first reconcile
|
||||
appConfig.SetAnnotations(oamutil.MergeMapOverrideWithDst(appConfig.GetAnnotations(), map[string]string{
|
||||
oam.AnnotationNewAppConfig: "true",
|
||||
}))
|
||||
// record that last appConfig we created first in the app's status
|
||||
// make sure that we persist the latest revision first
|
||||
if err := h.r.UpdateStatus(ctx, h.app); err != nil {
|
||||
return err
|
||||
}
|
||||
h.logger.Info("recorded the latest appConfig revision", "application name", h.app.GetName(),
|
||||
"latest revision", revisionName)
|
||||
// it ok if the create failed, we will create again in the next loop
|
||||
return h.r.Create(ctx, appConfig)
|
||||
}
|
||||
|
||||
304
pkg/controller/core.oam.dev/v1alpha2/application/apply_test.go
Normal file
304
pkg/controller/core.oam.dev/v1alpha2/application/apply_test.go
Normal file
@@ -0,0 +1,304 @@
|
||||
package application
|
||||
|
||||
import (
|
||||
"context"
|
||||
"math/rand"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
. "github.com/onsi/ginkgo"
|
||||
. "github.com/onsi/gomega"
|
||||
|
||||
"github.com/crossplane/crossplane-runtime/pkg/logging"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
|
||||
"github.com/oam-dev/kubevela/apis/core.oam.dev/v1alpha2"
|
||||
"github.com/oam-dev/kubevela/apis/standard.oam.dev/v1alpha1"
|
||||
"github.com/oam-dev/kubevela/pkg/controller/common"
|
||||
"github.com/oam-dev/kubevela/pkg/oam"
|
||||
oamutil "github.com/oam-dev/kubevela/pkg/oam/util"
|
||||
)
|
||||
|
||||
var _ = Describe("Test Application apply", func() {
|
||||
var handler appHandler
|
||||
app := &v1alpha2.Application{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
Kind: "Application",
|
||||
APIVersion: "core.oam.dev/v1alpha2",
|
||||
},
|
||||
}
|
||||
var appConfig *v1alpha2.ApplicationConfiguration
|
||||
var namespaceName string
|
||||
var componentName string
|
||||
var ns corev1.Namespace
|
||||
|
||||
BeforeEach(func() {
|
||||
ctx := context.TODO()
|
||||
namespaceName = "apply-test-" + strconv.Itoa(rand.Intn(1000))
|
||||
ns = corev1.Namespace{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: namespaceName,
|
||||
},
|
||||
}
|
||||
app.Namespace = namespaceName
|
||||
app.Spec = v1alpha2.ApplicationSpec{
|
||||
Components: []v1alpha2.ApplicationComponent{{
|
||||
WorkloadType: "webservice",
|
||||
Name: "express-server",
|
||||
Scopes: map[string]string{"healthscopes.core.oam.dev": "myapp-default-health"},
|
||||
Settings: runtime.RawExtension{
|
||||
Raw: []byte(`{"image": "oamdev/testapp:v1", "cmd": ["node", "server.js"]}`),
|
||||
},
|
||||
Traits: []v1alpha2.ApplicationTrait{{
|
||||
Name: "route",
|
||||
Properties: runtime.RawExtension{
|
||||
Raw: []byte(`{"domain": "example.com", "http":{"/": 8080}}`),
|
||||
},
|
||||
},
|
||||
},
|
||||
}},
|
||||
}
|
||||
handler = appHandler{
|
||||
r: reconciler,
|
||||
app: app,
|
||||
logger: reconciler.Log.WithValues("application", "unit-test"),
|
||||
}
|
||||
|
||||
By("Create the Namespace for test")
|
||||
Expect(k8sClient.Create(ctx, &ns)).Should(Succeed())
|
||||
|
||||
appConfig = &v1alpha2.ApplicationConfiguration{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: app.Name,
|
||||
Namespace: namespaceName,
|
||||
},
|
||||
Spec: v1alpha2.ApplicationConfigurationSpec{
|
||||
Components: []v1alpha2.ApplicationConfigurationComponent{
|
||||
{
|
||||
ComponentName: componentName,
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
})
|
||||
|
||||
AfterEach(func() {
|
||||
By("[TEST] Clean up resources after an integration test")
|
||||
Expect(k8sClient.Delete(context.TODO(), &ns)).Should(Succeed())
|
||||
})
|
||||
|
||||
It("Test creating applicationConfiguration revision", func() {
|
||||
ctx := context.TODO()
|
||||
|
||||
By("[TEST] Test application without AC revision")
|
||||
app.Name = "test-revision"
|
||||
Expect(handler.r.Create(ctx, app)).NotTo(HaveOccurred())
|
||||
// Test create or update
|
||||
err := handler.createOrUpdateAppConfig(ctx, appConfig.DeepCopy())
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
// verify
|
||||
curApp := &v1alpha2.Application{}
|
||||
Eventually(
|
||||
func() error {
|
||||
return handler.r.Get(ctx,
|
||||
types.NamespacedName{Namespace: ns.Name, Name: app.Name},
|
||||
curApp)
|
||||
},
|
||||
time.Second*10, time.Millisecond*500).Should(BeNil())
|
||||
|
||||
By("Verify that the application status has the lastRevision name ")
|
||||
Expect(curApp.Status.LatestRevision.Revision).Should(BeEquivalentTo(1))
|
||||
Expect(curApp.Status.LatestRevision.Name).Should(Equal(app.Name + "-v1"))
|
||||
curAC := &v1alpha2.ApplicationConfiguration{}
|
||||
Expect(handler.r.Get(ctx,
|
||||
types.NamespacedName{Namespace: ns.Name, Name: app.Name + "-v1"},
|
||||
curAC)).NotTo(HaveOccurred())
|
||||
// check that the annotation/labels are correctly applied
|
||||
Expect(curAC.GetAnnotations()[oam.AnnotationNewAppConfig]).Should(BeIdenticalTo("true"))
|
||||
Expect(curAC.GetLabels()[oam.LabelAppConfigHash]).ShouldNot(BeEmpty())
|
||||
hashValue := curAC.GetLabels()[oam.LabelAppConfigHash]
|
||||
|
||||
By("[TEST] apply the same appConfig mimic application controller, should do nothing")
|
||||
// this should not lead to a new AC
|
||||
err = handler.createOrUpdateAppConfig(ctx, appConfig.DeepCopy())
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
// verify the app latest revision is not changed
|
||||
Eventually(
|
||||
func() error {
|
||||
return handler.r.Get(ctx,
|
||||
types.NamespacedName{Namespace: ns.Name, Name: app.Name},
|
||||
curApp)
|
||||
},
|
||||
time.Second*10, time.Millisecond*500).Should(BeNil())
|
||||
|
||||
By("Verify that the lastest revision does not change")
|
||||
Expect(curApp.Status.LatestRevision.Revision).Should(BeEquivalentTo(1))
|
||||
Expect(curApp.Status.LatestRevision.Name).Should(Equal(app.Name + "-v1"))
|
||||
Expect(handler.r.Get(ctx,
|
||||
types.NamespacedName{Namespace: ns.Name, Name: app.Name + "-v1"},
|
||||
curAC)).NotTo(HaveOccurred())
|
||||
|
||||
By("[TEST] Modify the applicationConfiguration mimic AC controller, should only update")
|
||||
// update the status of the AC which is expected after AC controller takes over
|
||||
curAC.Status.SetConditions(readyCondition("newType"))
|
||||
Expect(handler.r.Status().Update(ctx, curAC)).NotTo(HaveOccurred())
|
||||
// remove the new AppConfig annotation as AC controller would do
|
||||
cl := curAC.GetAnnotations()
|
||||
delete(cl, oam.AnnotationNewAppConfig)
|
||||
curAC.SetAnnotations(cl)
|
||||
Expect(handler.r.Update(ctx, curAC)).NotTo(HaveOccurred())
|
||||
// this should not lead to a new AC
|
||||
err = handler.createOrUpdateAppConfig(ctx, curAC.DeepCopy())
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
// verify the app latest revision is not changed
|
||||
Eventually(
|
||||
func() error {
|
||||
return handler.r.Get(ctx,
|
||||
types.NamespacedName{Namespace: ns.Name, Name: app.Name},
|
||||
curApp)
|
||||
},
|
||||
time.Second*10, time.Millisecond*500).Should(BeNil())
|
||||
|
||||
By("Verify that the lastest revision does not change")
|
||||
Expect(curApp.Status.LatestRevision.Revision).Should(BeEquivalentTo(1))
|
||||
Expect(curApp.Status.LatestRevision.Name).Should(Equal(app.Name + "-v1"))
|
||||
Expect(handler.r.Get(ctx,
|
||||
types.NamespacedName{Namespace: ns.Name, Name: app.Name + "-v1"},
|
||||
curAC)).NotTo(HaveOccurred())
|
||||
// check that the new app annotation does not exist
|
||||
_, exist := curAC.GetAnnotations()[oam.AnnotationNewAppConfig]
|
||||
Expect(exist).Should(BeFalse())
|
||||
Expect(curAC.GetLabels()[oam.LabelAppConfigHash]).Should(BeEquivalentTo(hashValue))
|
||||
Expect(curAC.GetCondition("newType").Status).Should(BeEquivalentTo(corev1.ConditionTrue))
|
||||
// check that no new appConfig created
|
||||
Expect(handler.r.Get(ctx, types.NamespacedName{Namespace: ns.Name, Name: app.Name + "-v2"},
|
||||
curAC)).Should(&oamutil.NotFoundMatcher{})
|
||||
|
||||
By("[TEST] Modify the applicationConfiguration spec, should lead to a new AC")
|
||||
// update the spec of the AC which should lead to a new AC being created
|
||||
appConfig.Spec.Components[0].Traits = []v1alpha2.ComponentTrait{
|
||||
{
|
||||
Trait: runtime.RawExtension{
|
||||
Object: &v1alpha1.MetricsTrait{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
Kind: "MetricsTrait",
|
||||
APIVersion: "standard.oam.dev/v1alpha1",
|
||||
},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: app.Name,
|
||||
Namespace: namespaceName,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
// this should lead to a new AC
|
||||
err = handler.createOrUpdateAppConfig(ctx, appConfig)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
// verify the app latest revision is not changed
|
||||
Eventually(
|
||||
func() error {
|
||||
return handler.r.Get(ctx,
|
||||
types.NamespacedName{Namespace: ns.Name, Name: app.Name},
|
||||
curApp)
|
||||
},
|
||||
time.Second*10, time.Millisecond*500).Should(BeNil())
|
||||
|
||||
By("Verify that the lastest revision is advanced")
|
||||
Expect(curApp.Status.LatestRevision.Revision).Should(BeEquivalentTo(2))
|
||||
Expect(curApp.Status.LatestRevision.Name).Should(Equal(app.Name + "-v2"))
|
||||
Expect(handler.r.Get(ctx,
|
||||
types.NamespacedName{Namespace: ns.Name, Name: app.Name + "-v2"},
|
||||
curAC)).NotTo(HaveOccurred())
|
||||
// check that the new app annotation exist and the hash value has changed
|
||||
Expect(curAC.GetAnnotations()[oam.AnnotationNewAppConfig]).Should(BeIdenticalTo("true"))
|
||||
Expect(curAC.GetLabels()[oam.LabelAppConfigHash]).ShouldNot(BeEmpty())
|
||||
Expect(curAC.GetLabels()[oam.LabelAppConfigHash]).ShouldNot(BeEquivalentTo(hashValue))
|
||||
// check that no more new appConfig created
|
||||
Expect(handler.r.Get(ctx, types.NamespacedName{Namespace: ns.Name, Name: app.Name + "-v3"},
|
||||
curAC)).Should(&oamutil.NotFoundMatcher{})
|
||||
})
|
||||
|
||||
It("Test update or create component", func() {
|
||||
ctx := context.TODO()
|
||||
By("[TEST] Setting up the testing environment")
|
||||
imageV1 := "wordpress:4.6.1-apache"
|
||||
imageV2 := "wordpress:4.6.2-apache"
|
||||
cwV1 := v1alpha2.ContainerizedWorkload{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
Kind: "ContainerizedWorkload",
|
||||
APIVersion: "core.oam.dev/v1alpha2",
|
||||
},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: namespaceName,
|
||||
},
|
||||
Spec: v1alpha2.ContainerizedWorkloadSpec{
|
||||
Containers: []v1alpha2.Container{
|
||||
{
|
||||
Name: "wordpress",
|
||||
Image: imageV1,
|
||||
Ports: []v1alpha2.ContainerPort{
|
||||
{
|
||||
Name: "wordpress",
|
||||
Port: 80,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
component := &v1alpha2.Component{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
Kind: "Component",
|
||||
APIVersion: "core.oam.dev/v1alpha2",
|
||||
}, ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "myweb",
|
||||
Namespace: namespaceName,
|
||||
Labels: map[string]string{"application.oam.dev": "test"},
|
||||
},
|
||||
Spec: v1alpha2.ComponentSpec{
|
||||
Workload: runtime.RawExtension{
|
||||
Object: &cwV1,
|
||||
},
|
||||
}}
|
||||
|
||||
By("[TEST] Creating a component the first time")
|
||||
// take a copy so the component's workload still uses object instead of raw data
|
||||
// just like the way we use it in prod. The raw data will be filled by the k8s for some reason.
|
||||
revision, newRevision, err := handler.createOrUpdateComponent(ctx, component.DeepCopy())
|
||||
By("verify that the revision is the set correctly and newRevision is true")
|
||||
Expect(err).ShouldNot(HaveOccurred())
|
||||
Expect(newRevision).Should(BeTrue())
|
||||
// verify the revision actually contains the right component
|
||||
Expect(common.CompareWithRevision(ctx, handler.r, logging.NewLogrLogger(handler.logger), component.GetName(),
|
||||
component.GetNamespace(), revision, &component.Spec)).Should(BeTrue())
|
||||
preRevision := revision
|
||||
|
||||
By("[TEST] update the component without any changes (mimic reconcile behavior)")
|
||||
revision, newRevision, err = handler.createOrUpdateComponent(ctx, component.DeepCopy())
|
||||
By("verify that the revision is the same and newRevision is false")
|
||||
Expect(err).ShouldNot(HaveOccurred())
|
||||
Expect(newRevision).Should(BeFalse())
|
||||
Expect(revision).Should(BeIdenticalTo(preRevision))
|
||||
|
||||
By("[TEST] update the component")
|
||||
// modify the component spec through object
|
||||
cwV2 := cwV1.DeepCopy()
|
||||
cwV2.Spec.Containers[0].Image = imageV2
|
||||
component.Spec.Workload.Object = cwV2
|
||||
revision, newRevision, err = handler.createOrUpdateComponent(ctx, component.DeepCopy())
|
||||
By("verify that the revision is changed and newRevision is true")
|
||||
Expect(err).ShouldNot(HaveOccurred())
|
||||
Expect(newRevision).Should(BeTrue())
|
||||
Expect(revision).ShouldNot(BeIdenticalTo(preRevision))
|
||||
Expect(common.CompareWithRevision(ctx, handler.r, logging.NewLogrLogger(handler.logger), component.GetName(),
|
||||
component.GetNamespace(), revision, &component.Spec)).Should(BeTrue())
|
||||
// revision increased
|
||||
Expect(strings.Compare(revision, preRevision) > 0).Should(BeTrue())
|
||||
})
|
||||
|
||||
})
|
||||
@@ -17,11 +17,16 @@ limitations under the License.
|
||||
package application
|
||||
|
||||
import (
|
||||
"math/rand"
|
||||
"path/filepath"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/crossplane/crossplane-runtime/pkg/logging"
|
||||
"github.com/go-logr/logr"
|
||||
. "github.com/onsi/ginkgo"
|
||||
. "github.com/onsi/gomega"
|
||||
"sigs.k8s.io/controller-runtime/pkg/source"
|
||||
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/client-go/kubernetes/scheme"
|
||||
@@ -35,6 +40,8 @@ import (
|
||||
"sigs.k8s.io/controller-runtime/pkg/log/zap"
|
||||
|
||||
"github.com/oam-dev/kubevela/apis/core.oam.dev/v1alpha2"
|
||||
"github.com/oam-dev/kubevela/apis/standard.oam.dev/v1alpha1"
|
||||
"github.com/oam-dev/kubevela/pkg/controller/core.oam.dev/v1alpha2/applicationconfiguration"
|
||||
"github.com/oam-dev/kubevela/pkg/oam/discoverymapper"
|
||||
// +kubebuilder:scaffold:imports
|
||||
)
|
||||
@@ -45,11 +52,13 @@ var cfg *rest.Config
|
||||
var k8sClient client.Client
|
||||
var testEnv *envtest.Environment
|
||||
var testScheme = runtime.NewScheme()
|
||||
|
||||
var reconciler *Reconciler
|
||||
var stop = make(chan struct{})
|
||||
var ctlManager ctrl.Manager
|
||||
|
||||
// TODO: create a mock client and add UT to cover all the failure cases
|
||||
|
||||
func TestAPIs(t *testing.T) {
|
||||
|
||||
RegisterFailHandler(Fail)
|
||||
|
||||
RunSpecsWithDefaultAndCustomReporters(t,
|
||||
@@ -57,9 +66,18 @@ func TestAPIs(t *testing.T) {
|
||||
[]Reporter{printer.NewlineReporter{}})
|
||||
}
|
||||
|
||||
type NoOpReconciler struct {
|
||||
Log logr.Logger
|
||||
}
|
||||
|
||||
func (r *NoOpReconciler) Reconcile(req ctrl.Request) (ctrl.Result, error) {
|
||||
r.Log.Info("received a request", "object name", req.Name)
|
||||
return ctrl.Result{}, nil
|
||||
}
|
||||
|
||||
var _ = BeforeSuite(func(done Done) {
|
||||
logf.SetLogger(zap.New(zap.UseDevMode(true), zap.WriteTo(GinkgoWriter)))
|
||||
|
||||
rand.Seed(time.Now().UnixNano())
|
||||
By("bootstrapping test environment")
|
||||
testEnv = &envtest.Environment{
|
||||
UseExistingCluster: pointer.BoolPtr(false),
|
||||
@@ -74,6 +92,9 @@ var _ = BeforeSuite(func(done Done) {
|
||||
err = v1alpha2.SchemeBuilder.AddToScheme(testScheme)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
err = v1alpha1.SchemeBuilder.AddToScheme(testScheme)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
err = scheme.AddToScheme(testScheme)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
// +kubebuilder:scaffold:scheme
|
||||
@@ -85,10 +106,37 @@ var _ = BeforeSuite(func(done Done) {
|
||||
Expect(err).To(BeNil())
|
||||
reconciler = &Reconciler{
|
||||
Client: k8sClient,
|
||||
Log: ctrl.Log.WithName("Application"),
|
||||
Log: ctrl.Log.WithName("Application-Test"),
|
||||
Scheme: testScheme,
|
||||
dm: dm,
|
||||
}
|
||||
// setup the controller manager since we need the component handler to run in the background
|
||||
ctlManager, err = ctrl.NewManager(cfg, ctrl.Options{
|
||||
Scheme: testScheme,
|
||||
MetricsBindAddress: ":8080",
|
||||
LeaderElection: false,
|
||||
LeaderElectionNamespace: "default",
|
||||
LeaderElectionID: "test",
|
||||
})
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
// start to run the no op reconciler that creates component revision
|
||||
err = ctrl.NewControllerManagedBy(ctlManager).
|
||||
Named("component").
|
||||
For(&v1alpha2.Component{}).
|
||||
Watches(&source.Kind{Type: &v1alpha2.Component{}}, &applicationconfiguration.ComponentHandler{
|
||||
Client: ctlManager.GetClient(),
|
||||
Logger: logging.NewLogrLogger(ctrl.Log.WithName("application-testsuite-component-handler")),
|
||||
RevisionLimit: 100,
|
||||
CustomRevisionHookURL: "",
|
||||
}).Complete(&NoOpReconciler{
|
||||
Log: ctrl.Log.WithName("NoOp-Reconciler"),
|
||||
})
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
// start the controller in the background so that new componentRevisions are created
|
||||
go func() {
|
||||
err = ctlManager.Start(stop)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
}()
|
||||
close(done)
|
||||
}, 60)
|
||||
|
||||
@@ -96,4 +144,5 @@ var _ = AfterSuite(func() {
|
||||
By("tearing down the test environment")
|
||||
err := testEnv.Stop()
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
close(stop)
|
||||
})
|
||||
|
||||
@@ -24,6 +24,7 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
@@ -232,7 +233,11 @@ func (r *OAMApplicationReconciler) Reconcile(req reconcile.Request) (result reco
|
||||
|
||||
ac := &v1alpha2.ApplicationConfiguration{}
|
||||
if err := r.client.Get(ctx, req.NamespacedName, ac); err != nil {
|
||||
return errResult, errors.Wrap(resource.IgnoreNotFound(err), errGetAppConfig)
|
||||
if apierrors.IsNotFound(err) {
|
||||
// stop processing this resource
|
||||
return ctrl.Result{}, nil
|
||||
}
|
||||
return errResult, errors.Wrap(err, errGetAppConfig)
|
||||
}
|
||||
acPatch := ac.DeepCopy()
|
||||
|
||||
@@ -301,7 +306,7 @@ func (r *OAMApplicationReconciler) Reconcile(req reconcile.Request) (result reco
|
||||
|
||||
applyOpts := []apply.ApplyOption{apply.MustBeControllableBy(ac.GetUID()), applyOnceOnly(ac, r.applyOnceOnlyMode)}
|
||||
if err := r.workloads.Apply(ctx, ac.Status.Workloads, workloads, applyOpts...); err != nil {
|
||||
log.Debug("Cannot apply components", "error", err, "requeue-after", time.Now().Add(shortWait))
|
||||
log.Debug("Cannot apply workload", "error", err, "requeue-after", time.Now().Add(shortWait))
|
||||
r.record.Event(ac, event.Warning(reasonCannotApplyComponents, err))
|
||||
ac.SetConditions(v1alpha1.ReconcileError(errors.Wrap(err, errApplyComponents)))
|
||||
return errResult, errors.Wrap(r.UpdateStatus(ctx, ac), errUpdateAppConfigStatus)
|
||||
|
||||
@@ -3,9 +3,7 @@ package applicationconfiguration
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"reflect"
|
||||
"sort"
|
||||
"strings"
|
||||
|
||||
appsv1 "k8s.io/api/apps/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
@@ -13,6 +11,8 @@ import (
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/client-go/util/retry"
|
||||
"k8s.io/client-go/util/workqueue"
|
||||
"k8s.io/klog/v2"
|
||||
"k8s.io/utils/pointer"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
"sigs.k8s.io/controller-runtime/pkg/event"
|
||||
"sigs.k8s.io/controller-runtime/pkg/reconcile"
|
||||
@@ -20,8 +20,7 @@ import (
|
||||
"github.com/crossplane/crossplane-runtime/pkg/logging"
|
||||
|
||||
"github.com/oam-dev/kubevela/apis/core.oam.dev/v1alpha2"
|
||||
|
||||
util "github.com/oam-dev/kubevela/pkg/oam/util"
|
||||
"github.com/oam-dev/kubevela/pkg/controller/common"
|
||||
)
|
||||
|
||||
// ControllerRevisionComponentLabel indicate which component the revision belong to
|
||||
@@ -104,37 +103,23 @@ func (c *ComponentHandler) getRelatedAppConfig(object metav1.Object) []reconcile
|
||||
}
|
||||
|
||||
// IsRevisionDiff check whether there's any different between two component revision
|
||||
func (c *ComponentHandler) IsRevisionDiff(mt metav1.Object, curComp *v1alpha2.Component) (bool, int64) {
|
||||
func (c *ComponentHandler) IsRevisionDiff(mt klog.KMetadata, curComp *v1alpha2.Component) (bool, int64) {
|
||||
if curComp.Status.LatestRevision == nil {
|
||||
return true, 0
|
||||
}
|
||||
|
||||
// client in controller-runtime will use infoermer cache
|
||||
// client in controller-runtime will use informer cache
|
||||
// use client will be more efficient
|
||||
oldRev := &appsv1.ControllerRevision{}
|
||||
if err := c.Client.Get(context.TODO(), client.ObjectKey{Namespace: mt.GetNamespace(), Name: curComp.Status.LatestRevision.Name}, oldRev); err != nil {
|
||||
c.Logger.Info(fmt.Sprintf("get old controllerRevision %s error %v, will create new revision", curComp.Status.LatestRevision.Name, err), "componentName", mt.GetName())
|
||||
return true, curComp.Status.LatestRevision.Revision
|
||||
}
|
||||
if oldRev.Name == "" {
|
||||
c.Logger.Info(fmt.Sprintf("Not found controllerRevision %s", curComp.Status.LatestRevision.Name), "componentName", mt.GetName())
|
||||
return true, curComp.Status.LatestRevision.Revision
|
||||
}
|
||||
oldComp, err := util.UnpackRevisionData(oldRev)
|
||||
needNewRevision, err := common.CompareWithRevision(context.TODO(), c.Client, c.Logger, mt.GetName(), mt.GetNamespace(),
|
||||
curComp.Status.LatestRevision.Name, &curComp.Spec)
|
||||
// TODO: this might be a bug that we treat all errors getting from k8s as a new revision
|
||||
// but the client go event handler doesn't handle an error. We need to see if we can retry this
|
||||
if err != nil {
|
||||
c.Logger.Info(fmt.Sprintf("Unmarshal old controllerRevision %s error %v, will create new revision", curComp.Status.LatestRevision.Name, err), "componentName", mt.GetName())
|
||||
return true, oldRev.Revision
|
||||
c.Logger.Info(fmt.Sprintf("Failed to compare the component with its latest revision with err = %+v", err),
|
||||
"component", mt.GetName(), "latest revision", curComp.Status.LatestRevision.Name)
|
||||
return true, curComp.Status.LatestRevision.Revision
|
||||
}
|
||||
|
||||
if reflect.DeepEqual(curComp.Spec, oldComp.Spec) {
|
||||
return false, oldRev.Revision
|
||||
}
|
||||
return true, oldRev.Revision
|
||||
}
|
||||
|
||||
func newTrue() *bool {
|
||||
b := true
|
||||
return &b
|
||||
return needNewRevision, curComp.Status.LatestRevision.Revision
|
||||
}
|
||||
|
||||
func (c *ComponentHandler) createControllerRevision(mt metav1.Object, obj runtime.Object) ([]reconcile.Request, bool) {
|
||||
@@ -154,7 +139,7 @@ func (c *ComponentHandler) createControllerRevision(mt metav1.Object, obj runtim
|
||||
}
|
||||
|
||||
nextRevision := curRevision + 1
|
||||
revisionName := ConstructRevisionName(mt.GetName(), nextRevision)
|
||||
revisionName := common.ConstructRevisionName(mt.GetName(), nextRevision)
|
||||
|
||||
if comp.Status.ObservedGeneration != comp.Generation {
|
||||
comp.Status.ObservedGeneration = comp.Generation
|
||||
@@ -175,7 +160,7 @@ func (c *ComponentHandler) createControllerRevision(mt metav1.Object, obj runtim
|
||||
Kind: v1alpha2.ComponentKind,
|
||||
Name: comp.Name,
|
||||
UID: comp.UID,
|
||||
Controller: newTrue(),
|
||||
Controller: pointer.BoolPtr(true),
|
||||
},
|
||||
},
|
||||
Labels: map[string]string{
|
||||
@@ -186,6 +171,7 @@ func (c *ComponentHandler) createControllerRevision(mt metav1.Object, obj runtim
|
||||
Data: runtime.RawExtension{Object: comp},
|
||||
}
|
||||
|
||||
// TODO: we should update the status first. otherwise, the subsequent create will all fail if the update fails
|
||||
err := c.Client.Create(context.TODO(), &revision)
|
||||
if err != nil {
|
||||
c.Logger.Info(fmt.Sprintf("error create controllerRevision %v", err), "componentName", mt.GetName())
|
||||
@@ -197,7 +183,9 @@ func (c *ComponentHandler) createControllerRevision(mt metav1.Object, obj runtim
|
||||
c.Logger.Info(fmt.Sprintf("update component status latestRevision %s err %v", revisionName, err), "componentName", mt.GetName())
|
||||
return nil, false
|
||||
}
|
||||
|
||||
c.Logger.Info(fmt.Sprintf("ControllerRevision %s created", revisionName))
|
||||
// garbage collect
|
||||
if int64(c.RevisionLimit) < nextRevision {
|
||||
if err := c.cleanupControllerRevision(comp); err != nil {
|
||||
c.Logger.Info(fmt.Sprintf("failed to clean up revisions of Component %v.", err))
|
||||
@@ -290,18 +278,6 @@ func (c *ComponentHandler) UpdateStatus(ctx context.Context, comp *v1alpha2.Comp
|
||||
})
|
||||
}
|
||||
|
||||
// ConstructRevisionName will generate revisionName from componentName
|
||||
// will be <componentName>-v<RevisionNumber>, for example: comp-v1
|
||||
func ConstructRevisionName(componentName string, revision int64) string {
|
||||
return strings.Join([]string{componentName, fmt.Sprintf("v%d", revision)}, "-")
|
||||
}
|
||||
|
||||
// ExtractComponentName will extract componentName from revisionName
|
||||
func ExtractComponentName(revisionName string) string {
|
||||
splits := strings.Split(revisionName, "-")
|
||||
return strings.Join(splits[0:len(splits)-1], "-")
|
||||
}
|
||||
|
||||
// historiesByRevision sort controllerRevision by revision
|
||||
type historiesByRevision []appsv1.ControllerRevision
|
||||
|
||||
|
||||
@@ -222,20 +222,6 @@ func TestComponentHandler(t *testing.T) {
|
||||
// ============ Test Revisions End ===================
|
||||
}
|
||||
|
||||
func TestConstructExtract(t *testing.T) {
|
||||
tests := []string{"tam1", "test-comp", "xx", "tt-x-x-c"}
|
||||
revisionNum := []int64{1, 5, 10, 100000}
|
||||
for idx, componentName := range tests {
|
||||
t.Run(fmt.Sprintf("tests %d for component[%s]", idx, componentName), func(t *testing.T) {
|
||||
revisionName := ConstructRevisionName(componentName, revisionNum[idx])
|
||||
got := ExtractComponentName(revisionName)
|
||||
if got != componentName {
|
||||
t.Errorf("want to get %s from %s but got %s", componentName, revisionName, got)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestIsMatch(t *testing.T) {
|
||||
var appConfigs v1alpha2.ApplicationConfigurationList
|
||||
appConfigs.Items = []v1alpha2.ApplicationConfiguration{
|
||||
|
||||
@@ -37,6 +37,7 @@ import (
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
|
||||
"github.com/oam-dev/kubevela/apis/core.oam.dev/v1alpha2"
|
||||
"github.com/oam-dev/kubevela/pkg/controller/common"
|
||||
"github.com/oam-dev/kubevela/pkg/oam"
|
||||
"github.com/oam-dev/kubevela/pkg/oam/discoverymapper"
|
||||
"github.com/oam-dev/kubevela/pkg/oam/util"
|
||||
@@ -125,7 +126,7 @@ func (r *components) Render(ctx context.Context, ac *v1alpha2.ApplicationConfigu
|
||||
|
||||
func (r *components) renderComponent(ctx context.Context, acc v1alpha2.ApplicationConfigurationComponent, ac *v1alpha2.ApplicationConfiguration, dag *dag) (*Workload, error) {
|
||||
if acc.RevisionName != "" {
|
||||
acc.ComponentName = ExtractComponentName(acc.RevisionName)
|
||||
acc.ComponentName = common.ExtractComponentName(acc.RevisionName)
|
||||
}
|
||||
c, componentRevisionName, err := util.GetComponent(ctx, r.client, acc, ac.GetNamespace())
|
||||
if err != nil {
|
||||
@@ -267,10 +268,15 @@ func setTraitProperties(t *unstructured.Unstructured, traitName, namespace strin
|
||||
func SetWorkloadInstanceName(traitDefs []v1alpha2.TraitDefinition, w *unstructured.Unstructured, c *v1alpha2.Component,
|
||||
existingWorkload *unstructured.Unstructured) error {
|
||||
// Don't override the specified name
|
||||
// TODO: revisit this, we might need to ban this type of naming since we have no control over it
|
||||
if w.GetName() != "" {
|
||||
return nil
|
||||
}
|
||||
pv := fieldpath.Pave(w.UnstructuredContent())
|
||||
// TODO: revisit this logic
|
||||
// the name of the workload should depend on the workload type and if we are rolling or replacing upgrade
|
||||
// i.e Cloneset type of workload just use the component name while deployment type of workload will have revision
|
||||
// if we are doing rolling upgrades. We can just override if we are replacing the deployment.
|
||||
if isRevisionEnabled(traitDefs) {
|
||||
if c.Status.LatestRevision == nil {
|
||||
return fmt.Errorf(errFmtCompRevision, c.Name)
|
||||
|
||||
@@ -126,10 +126,10 @@ func (r *Reconciler) Reconcile(req ctrl.Request) (res reconcile.Result, retErr e
|
||||
|
||||
// reconcile the rollout part of the spec given the target and source workload
|
||||
rolloutPlanController := rollout.NewRolloutPlanController(r, &appDeploy, r.record,
|
||||
&appDeploy.Spec.RolloutPlan, appDeploy.Status.RolloutStatus, targetWorkload, sourceWorkload)
|
||||
&appDeploy.Spec.RolloutPlan, &appDeploy.Status.RolloutStatus, targetWorkload, sourceWorkload)
|
||||
result, rolloutStatus := rolloutPlanController.Reconcile(ctx)
|
||||
// make sure that the new status is copied back
|
||||
appDeploy.Status.RolloutStatus = rolloutStatus
|
||||
appDeploy.Status.RolloutStatus = *rolloutStatus
|
||||
// update the appDeploy status
|
||||
return result, r.Update(ctx, &appDeploy)
|
||||
}
|
||||
|
||||
@@ -5,18 +5,28 @@ import (
|
||||
"fmt"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/crossplane/crossplane-runtime/pkg/fieldpath"
|
||||
mapset "github.com/deckarep/golang-set"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||
"k8s.io/apimachinery/pkg/util/intstr"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
|
||||
"github.com/oam-dev/kubevela/apis/core.oam.dev/v1alpha2"
|
||||
"github.com/oam-dev/kubevela/pkg/controller/common"
|
||||
"github.com/oam-dev/kubevela/pkg/oam"
|
||||
)
|
||||
|
||||
// DefaultBackoff is the backoff we use in controller
|
||||
var DefaultBackoff = wait.Backoff{
|
||||
Duration: 1 * time.Second,
|
||||
Factor: 2,
|
||||
Steps: 5,
|
||||
Jitter: 0.1,
|
||||
}
|
||||
|
||||
// LabelPodSpecable defines whether a workload has podSpec or not.
|
||||
const LabelPodSpecable = "workload.oam.dev/podspecable"
|
||||
|
||||
|
||||
@@ -27,6 +27,8 @@ const (
|
||||
LabelAppComponentRevision = "app.oam.dev/revision"
|
||||
// LabelOAMResourceType whether a CR is workload or trait
|
||||
LabelOAMResourceType = "app.oam.dev/resourceType"
|
||||
// LabelAppConfigHash records the Hash value of the application configuration
|
||||
LabelAppConfigHash = "app.oam.dev/appConfig-hash"
|
||||
|
||||
// WorkloadTypeLabel indicates the type of the workloadDefinition
|
||||
WorkloadTypeLabel = "workload.oam.dev/type"
|
||||
@@ -52,6 +54,16 @@ const (
|
||||
AnnotationLastAppliedConfig = "app.oam.dev/last-applied-configuration"
|
||||
|
||||
// AnnotationAppRollout indicates that the application is still rolling out
|
||||
// the application controller will not reconcile it yet
|
||||
// the application controller should not reconcile it yet
|
||||
AnnotationAppRollout = "app.oam.dev/rollout-template"
|
||||
|
||||
// AnnotationNewAppConfig indicates that the application configuration is new
|
||||
// this is to enable the applicationConfiguration controller to handle the
|
||||
// first reconcile logic differently similar to what "finalize" field
|
||||
AnnotationNewAppConfig = "app.oam.dev/new-appConfig"
|
||||
|
||||
// AnnotationNewComponent indicates that the component is new
|
||||
// this is to enable any concerned controllers to handle the first component apply logic differently
|
||||
// the value of the annotation is name of the component revision
|
||||
AnnotationNewComponent = "app.oam.dev/new-component"
|
||||
)
|
||||
|
||||
@@ -412,6 +412,14 @@ func Object2Map(obj interface{}) (map[string]interface{}, error) {
|
||||
return res, err
|
||||
}
|
||||
|
||||
// Object2RawExtension converts an object to a rawExtension
|
||||
func Object2RawExtension(obj interface{}) runtime.RawExtension {
|
||||
bts, _ := json.Marshal(obj)
|
||||
return runtime.RawExtension{
|
||||
Raw: bts,
|
||||
}
|
||||
}
|
||||
|
||||
// RawExtension2Map will convert rawExtension to map
|
||||
func RawExtension2Map(raw *runtime.RawExtension) (map[string]interface{}, error) {
|
||||
if raw == nil {
|
||||
@@ -464,7 +472,8 @@ func DeepHashObject(hasher hash.Hash, objectToWrite interface{}) {
|
||||
}
|
||||
|
||||
// GetComponent will get Component and RevisionName by AppConfigComponent
|
||||
func GetComponent(ctx context.Context, client client.Reader, acc v1alpha2.ApplicationConfigurationComponent, namespace string) (*v1alpha2.Component, string, error) {
|
||||
func GetComponent(ctx context.Context, client client.Reader, acc v1alpha2.ApplicationConfigurationComponent,
|
||||
namespace string) (*v1alpha2.Component, string, error) {
|
||||
c := &v1alpha2.Component{}
|
||||
var revisionName string
|
||||
if acc.RevisionName != "" {
|
||||
|
||||
@@ -17,6 +17,8 @@ func JSONMarshal(o interface{}) []byte {
|
||||
return j
|
||||
}
|
||||
|
||||
var _ types.GomegaMatcher = AlreadyExistMatcher{}
|
||||
|
||||
// AlreadyExistMatcher matches the error to be already exist
|
||||
type AlreadyExistMatcher struct {
|
||||
}
|
||||
@@ -40,6 +42,8 @@ func (matcher AlreadyExistMatcher) NegatedFailureMessage(actual interface{}) (me
|
||||
return format.Message(actual, "not to be already exist")
|
||||
}
|
||||
|
||||
var _ types.GomegaMatcher = NotFoundMatcher{}
|
||||
|
||||
// NotFoundMatcher matches the error to be not found.
|
||||
type NotFoundMatcher struct {
|
||||
}
|
||||
@@ -70,6 +74,8 @@ func BeEquivalentToError(expected error) types.GomegaMatcher {
|
||||
}
|
||||
}
|
||||
|
||||
var _ types.GomegaMatcher = ErrorMatcher{}
|
||||
|
||||
// ErrorMatcher matches errors.
|
||||
type ErrorMatcher struct {
|
||||
ExpectedError error
|
||||
|
||||
@@ -26,8 +26,10 @@ import (
|
||||
"github.com/oam-dev/kubevela/pkg/appfile/api"
|
||||
"github.com/oam-dev/kubevela/pkg/appfile/template"
|
||||
cmdutil "github.com/oam-dev/kubevela/pkg/commands/util"
|
||||
ccom "github.com/oam-dev/kubevela/pkg/controller/common"
|
||||
"github.com/oam-dev/kubevela/pkg/oam"
|
||||
"github.com/oam-dev/kubevela/pkg/oam/discoverymapper"
|
||||
oamutil "github.com/oam-dev/kubevela/pkg/oam/util"
|
||||
"github.com/oam-dev/kubevela/pkg/server/apis"
|
||||
"github.com/oam-dev/kubevela/pkg/utils/common"
|
||||
)
|
||||
@@ -94,7 +96,7 @@ type DeleteOptions struct {
|
||||
}
|
||||
|
||||
// ListApplications lists all applications
|
||||
func ListApplications(ctx context.Context, c client.Client, opt Option) ([]apis.ApplicationMeta, error) {
|
||||
func ListApplications(ctx context.Context, c client.Reader, opt Option) ([]apis.ApplicationMeta, error) {
|
||||
var applicationMetaList applicationMetaList
|
||||
appConfigList, err := ListApplicationConfigurations(ctx, c, opt)
|
||||
if err != nil {
|
||||
@@ -137,7 +139,7 @@ func ListApplicationConfigurations(ctx context.Context, c client.Reader, opt Opt
|
||||
}
|
||||
|
||||
// ListComponents will list all components for dashboard
|
||||
func ListComponents(ctx context.Context, c client.Client, opt Option) ([]apis.ComponentMeta, error) {
|
||||
func ListComponents(ctx context.Context, c client.Reader, opt Option) ([]apis.ComponentMeta, error) {
|
||||
var componentMetaList componentMetaList
|
||||
var appConfigList corev1alpha2.ApplicationConfigurationList
|
||||
var err error
|
||||
@@ -147,7 +149,7 @@ func ListComponents(ctx context.Context, c client.Client, opt Option) ([]apis.Co
|
||||
|
||||
for _, a := range appConfigList.Items {
|
||||
for _, com := range a.Spec.Components {
|
||||
component, err := cmdutil.GetComponent(ctx, c, com.ComponentName, opt.Namespace)
|
||||
component, _, err := oamutil.GetComponent(ctx, c, com, opt.Namespace)
|
||||
if err != nil {
|
||||
return componentMetaList, err
|
||||
}
|
||||
@@ -155,7 +157,7 @@ func ListComponents(ctx context.Context, c client.Client, opt Option) ([]apis.Co
|
||||
Name: com.ComponentName,
|
||||
Status: types.StatusDeployed,
|
||||
CreatedTime: a.ObjectMeta.CreationTimestamp.String(),
|
||||
Component: component,
|
||||
Component: *component,
|
||||
AppConfig: a,
|
||||
App: a.Name,
|
||||
})
|
||||
@@ -166,7 +168,8 @@ func ListComponents(ctx context.Context, c client.Client, opt Option) ([]apis.Co
|
||||
}
|
||||
|
||||
// RetrieveApplicationStatusByName will get app status
|
||||
func RetrieveApplicationStatusByName(ctx context.Context, c client.Client, applicationName string, namespace string) (apis.ApplicationMeta, error) {
|
||||
func RetrieveApplicationStatusByName(ctx context.Context, c client.Reader, applicationName string,
|
||||
namespace string) (apis.ApplicationMeta, error) {
|
||||
var applicationMeta apis.ApplicationMeta
|
||||
var appConfig corev1alpha2.ApplicationConfiguration
|
||||
if err := c.Get(ctx, client.ObjectKey{Name: applicationName, Namespace: namespace}, &appConfig); err != nil {
|
||||
@@ -182,14 +185,13 @@ func RetrieveApplicationStatusByName(ctx context.Context, c client.Client, appli
|
||||
applicationMeta.CreatedTime = appConfig.CreationTimestamp.Format(time.RFC3339)
|
||||
|
||||
for _, com := range appConfig.Spec.Components {
|
||||
componentName := com.ComponentName
|
||||
component, err := cmdutil.GetComponent(ctx, c, componentName, namespace)
|
||||
component, revisionName, err := oamutil.GetComponent(ctx, c, com, namespace)
|
||||
if err != nil {
|
||||
return applicationMeta, err
|
||||
}
|
||||
|
||||
applicationMeta.Components = append(applicationMeta.Components, apis.ComponentMeta{
|
||||
Name: componentName,
|
||||
Name: ccom.ExtractComponentName(revisionName),
|
||||
Status: status,
|
||||
Workload: component.Spec.Workload,
|
||||
Traits: com.Traits,
|
||||
|
||||
@@ -9,7 +9,8 @@ import (
|
||||
)
|
||||
|
||||
// RetrieveComponent will get component status
|
||||
func RetrieveComponent(ctx context.Context, c client.Client, applicationName, componentName, namespace string) (apis.ComponentMeta, error) {
|
||||
func RetrieveComponent(ctx context.Context, c client.Reader, applicationName, componentName,
|
||||
namespace string) (apis.ComponentMeta, error) {
|
||||
var componentMeta apis.ComponentMeta
|
||||
applicationMeta, err := RetrieveApplicationStatusByName(ctx, c, applicationName, namespace)
|
||||
if err != nil {
|
||||
|
||||
@@ -135,24 +135,19 @@ func executeApplyOptions(ctx context.Context, existing, desired runtime.Object,
|
||||
|
||||
// MustBeControllableBy requires that the existing object is controllable by an
|
||||
// object with the supplied UID. An object is controllable if its controller
|
||||
// reference matches the supplied UID, or it has no controller reference. An
|
||||
// error will be returned if the current object cannot be controlled by the
|
||||
// supplied UID.
|
||||
// ACKNOWLEDGMENTS: The code was based in part on the source code of
|
||||
// - github.com/crossplane/crossplane-runtime/pkg/resource/resource.go#L274
|
||||
// reference includes the supplied UID.
|
||||
// There can be multiple controllers and it's ligit as long as one of them matches the UID
|
||||
func MustBeControllableBy(u types.UID) ApplyOption {
|
||||
return func(_ context.Context, existing, _ runtime.Object) error {
|
||||
if existing == nil {
|
||||
return func(_ context.Context, _, newInstance runtime.Object) error {
|
||||
if newInstance == nil {
|
||||
return nil
|
||||
}
|
||||
c := metav1.GetControllerOf(existing.(metav1.Object))
|
||||
if c == nil {
|
||||
return nil
|
||||
owners := newInstance.(metav1.Object).GetOwnerReferences()
|
||||
for _, owner := range owners {
|
||||
if owner.Controller != nil && *owner.Controller && owner.UID == u {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
if c.UID != u {
|
||||
return errors.Errorf("existing object is not controlled by UID %q", u)
|
||||
}
|
||||
return nil
|
||||
return errors.Errorf("existing object is not controlled by UID %q", u)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -14,6 +14,7 @@ import (
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/utils/pointer"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
)
|
||||
|
||||
@@ -298,56 +299,62 @@ func TestCreator(t *testing.T) {
|
||||
|
||||
func TestMustBeControllableBy(t *testing.T) {
|
||||
uid := types.UID("very-unique-string")
|
||||
controller := true
|
||||
type args struct {
|
||||
ctx context.Context
|
||||
current runtime.Object
|
||||
desired runtime.Object
|
||||
}
|
||||
ctx := context.TODO()
|
||||
|
||||
cases := map[string]struct {
|
||||
reason string
|
||||
u types.UID
|
||||
args args
|
||||
want error
|
||||
reason string
|
||||
u types.UID
|
||||
current runtime.Object
|
||||
want error
|
||||
}{
|
||||
"NoExistingObject": {
|
||||
reason: "No error should be returned if no existing object",
|
||||
},
|
||||
"Adoptable": {
|
||||
reason: "A current object with no controller reference may be adopted and controlled",
|
||||
u: uid,
|
||||
args: args{
|
||||
current: &testObject{},
|
||||
},
|
||||
reason: "A current object with no controller reference is not controllable",
|
||||
u: uid,
|
||||
current: &testObject{},
|
||||
want: errors.Errorf("existing object is not controlled by UID %q", uid),
|
||||
},
|
||||
"ControlledBySuppliedUID": {
|
||||
reason: "A current object that is already controlled by the supplied UID is controllable",
|
||||
u: uid,
|
||||
args: args{
|
||||
current: &testObject{ObjectMeta: metav1.ObjectMeta{OwnerReferences: []metav1.OwnerReference{{
|
||||
UID: uid,
|
||||
Controller: &controller,
|
||||
}}}},
|
||||
},
|
||||
current: &testObject{ObjectMeta: metav1.ObjectMeta{OwnerReferences: []metav1.OwnerReference{{
|
||||
UID: uid,
|
||||
Controller: pointer.BoolPtr(true),
|
||||
}}}},
|
||||
},
|
||||
"ControlledBySomeoneElse": {
|
||||
reason: "A current object that is already controlled by a different UID is not controllable",
|
||||
u: uid,
|
||||
args: args{
|
||||
current: &testObject{ObjectMeta: metav1.ObjectMeta{OwnerReferences: []metav1.OwnerReference{{
|
||||
current: &testObject{ObjectMeta: metav1.ObjectMeta{OwnerReferences: []metav1.OwnerReference{
|
||||
{
|
||||
UID: types.UID("some-other-uid"),
|
||||
Controller: &controller,
|
||||
}}}},
|
||||
},
|
||||
Controller: pointer.BoolPtr(true),
|
||||
},
|
||||
}}},
|
||||
want: errors.Errorf("existing object is not controlled by UID %q", uid),
|
||||
},
|
||||
"SharedControlledBySomeoneElse": {
|
||||
reason: "An object that has a shared controlled by a different UID is controllable",
|
||||
u: uid,
|
||||
current: &testObject{ObjectMeta: metav1.ObjectMeta{OwnerReferences: []metav1.OwnerReference{
|
||||
{
|
||||
UID: types.UID("some-other-uid"),
|
||||
Controller: pointer.BoolPtr(true),
|
||||
},
|
||||
{
|
||||
UID: uid,
|
||||
Controller: pointer.BoolPtr(true),
|
||||
},
|
||||
}}},
|
||||
},
|
||||
}
|
||||
|
||||
for name, tc := range cases {
|
||||
t.Run(name, func(t *testing.T) {
|
||||
ao := MustBeControllableBy(tc.u)
|
||||
err := ao(tc.args.ctx, tc.args.current, tc.args.desired)
|
||||
err := ao(ctx, nil, tc.current)
|
||||
if diff := cmp.Diff(tc.want, err, test.EquateErrors()); diff != "" {
|
||||
t.Errorf("\n%s\nMustBeControllableBy(...)(...): -want error, +got error\n%s\n", tc.reason, diff)
|
||||
}
|
||||
|
||||
@@ -16,6 +16,7 @@ import (
|
||||
"cuelang.org/go/cue"
|
||||
"cuelang.org/go/cue/load"
|
||||
"cuelang.org/go/encoding/openapi"
|
||||
"github.com/AlecAivazis/survey/v2"
|
||||
certmanager "github.com/wonderflow/cert-manager-api/pkg/apis/certmanager/v1"
|
||||
k8sruntime "k8s.io/apimachinery/pkg/runtime"
|
||||
clientgoscheme "k8s.io/client-go/kubernetes/scheme"
|
||||
@@ -149,3 +150,23 @@ func RealtimePrintCommandOutput(cmd *exec.Cmd, logFile string) error {
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// AskToChooseOneService will ask users to select one service of the application if more than one exidi
|
||||
func AskToChooseOneService(svcNames []string) (string, error) {
|
||||
if len(svcNames) == 0 {
|
||||
return "", fmt.Errorf("no service exist in the application")
|
||||
}
|
||||
if len(svcNames) == 1 {
|
||||
return svcNames[0], nil
|
||||
}
|
||||
prompt := &survey.Select{
|
||||
Message: "You have multiple services in your app. Please choose one service: ",
|
||||
Options: svcNames,
|
||||
}
|
||||
var svcName string
|
||||
err := survey.AskOne(prompt, &svcName)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("choosing service err %w", err)
|
||||
}
|
||||
return svcName, nil
|
||||
}
|
||||
|
||||
@@ -12,7 +12,7 @@ import (
|
||||
func DefaultRolloutPlan(rollout *v1alpha1.RolloutPlan) {
|
||||
if rollout.TargetSize != nil && rollout.NumBatches != nil && rollout.RolloutBatches == nil {
|
||||
// create the rollout batch based on the total size and num batches if it's not set
|
||||
// leave it for the validator to reject if they are both set
|
||||
// leave it for the validator to valiate if they are both set
|
||||
numBatches := int(*rollout.NumBatches)
|
||||
totalSize := int(*rollout.TargetSize)
|
||||
// create the batch array
|
||||
@@ -33,9 +33,10 @@ func ValidateCreate(rollout *v1alpha1.RolloutPlan, rootPath *field.Path) field.E
|
||||
var allErrs field.ErrorList
|
||||
// The total number of num in the batches match the current target resource pod size
|
||||
|
||||
// The TargetSize and NumBatches are mutually exclusive to RolloutBatches
|
||||
if rollout.NumBatches != nil && rollout.RolloutBatches != nil {
|
||||
allErrs = append(allErrs, field.Duplicate(rootPath.Child("numBatches"), rollout.NumBatches))
|
||||
// NumBatches has to be the size of RolloutBatches
|
||||
if rollout.NumBatches != nil && len(rollout.RolloutBatches) != int(*rollout.NumBatches) {
|
||||
allErrs = append(allErrs, field.Invalid(rootPath.Child("numBatches"), rollout.NumBatches,
|
||||
"the num batches does not match the rollout batch size"))
|
||||
}
|
||||
|
||||
// validate the webhooks
|
||||
|
||||
@@ -8,10 +8,11 @@ import (
|
||||
"github.com/pkg/errors"
|
||||
k8serrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
"k8s.io/client-go/util/retry"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
|
||||
"github.com/oam-dev/kubevela/apis/core.oam.dev/v1alpha2"
|
||||
|
||||
"github.com/oam-dev/kubevela/pkg/oam/discoverymapper"
|
||||
"github.com/oam-dev/kubevela/pkg/oam/util"
|
||||
)
|
||||
@@ -59,19 +60,27 @@ func (v *ValidatingAppConfig) PrepareForValidation(ctx context.Context, c client
|
||||
for _, acc := range ac.Spec.Components {
|
||||
tmp := ValidatingComponent{}
|
||||
tmp.appConfigComponent = acc
|
||||
|
||||
if acc.ComponentName != "" {
|
||||
tmp.compName = acc.ComponentName
|
||||
} else {
|
||||
tmp.compName = acc.RevisionName
|
||||
}
|
||||
comp, _, err := util.GetComponent(ctx, c, acc, ac.Namespace)
|
||||
var comp *v1alpha2.Component
|
||||
accCopy := *acc.DeepCopy()
|
||||
err := wait.ExponentialBackoff(retry.DefaultBackoff, func() (bool, error) {
|
||||
var err error
|
||||
comp, _, err = util.GetComponent(ctx, c, accCopy, ac.Namespace)
|
||||
if err != nil && !k8serrors.IsNotFound(err) {
|
||||
return false, err
|
||||
}
|
||||
return true, nil
|
||||
})
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, errFmtGetComponent, tmp.compName)
|
||||
}
|
||||
tmp.component = *comp
|
||||
|
||||
// get worload content from raw
|
||||
// get workload content from raw
|
||||
var wlContentObject map[string]interface{}
|
||||
if err := json.Unmarshal(comp.Spec.Workload.Raw, &wlContentObject); err != nil {
|
||||
return errors.Wrapf(err, errFmtUnmarshalWorkload, tmp.compName)
|
||||
|
||||
@@ -166,7 +166,8 @@ var _ = Describe("Versioning mechanism of components", func() {
|
||||
revisionNameV1 := cmpV1.Status.LatestRevision.Name
|
||||
By("Get corresponding ControllerRevision of Component v1")
|
||||
cr := &appsv1.ControllerRevision{}
|
||||
Expect(k8sClient.Get(ctx, client.ObjectKey{Namespace: namespace, Name: revisionNameV1}, cr)).Should(Succeed())
|
||||
Expect(k8sClient.Get(ctx,
|
||||
client.ObjectKey{Namespace: namespace, Name: revisionNameV1}, cr)).ShouldNot(HaveOccurred())
|
||||
By("Check revision seq number")
|
||||
Expect(cr.Revision).Should(Equal(int64(1)))
|
||||
|
||||
|
||||
Reference in New Issue
Block a user