diff --git a/apis/core.oam.dev/v1alpha2/appdeploy_types.go b/apis/core.oam.dev/v1alpha2/appdeploy_types.go index 4cb6bc5e6..636a23a6a 100644 --- a/apis/core.oam.dev/v1alpha2/appdeploy_types.go +++ b/apis/core.oam.dev/v1alpha2/appdeploy_types.go @@ -26,11 +26,11 @@ import ( type AppRolloutSpec struct { // TargetAppRevisionName contains the name of the applicationConfiguration that we need to upgrade to. // Here we use an applicationConfiguration as a revision of an application, thus the name alone is suffice - TargetAppRevisionName string `json:"targetApplicationName"` + TargetAppRevisionName string `json:"targetAppRevisionName"` - // SourceApplicationName contains the name of the applicationConfiguration that we need to upgrade from. + // SourceAppRevisionName contains the name of the applicationConfiguration that we need to upgrade from. // it can be empty only when it's the first time to deploy the application - SourceApplicationName string `json:"sourceApplicationName,omitempty"` + SourceAppRevisionName string `json:"sourceAppRevisionName,omitempty"` // The list of component to upgrade in the application. // We only support single component application so far @@ -51,13 +51,13 @@ type AppRolloutSpec struct { type AppRolloutStatus struct { v1alpha1.RolloutStatus `json:",inline"` - // LastTargetAppName contains the name of the app that we upgraded to + // LastUpgradedTargetAppRevision contains the name of the app that we upgraded to // We will restart the rollout if this is not the same as the spec - LastTargetAppName string `json:"lastTargetAppName"` + LastUpgradedTargetAppRevision string `json:"lastTargetAppRevision"` - // LastSourceAppName contains the name of the app that we need to upgrade from. + // LastSourceAppRevision contains the name of the app that we need to upgrade from. // We will restart the rollout if this is not the same as the spec - LastSourceAppName string `json:"LastSourceAppName,omitempty"` + LastSourceAppRevision string `json:"LastSourceAppRevision,omitempty"` } // AppRollout is the Schema for the AppRollout API diff --git a/apis/core.oam.dev/v1alpha2/core_types.go b/apis/core.oam.dev/v1alpha2/core_types.go index 668ebd12a..1fb158b89 100644 --- a/apis/core.oam.dev/v1alpha2/core_types.go +++ b/apis/core.oam.dev/v1alpha2/core_types.go @@ -515,8 +515,10 @@ const ( RollingTemplating RollingStatus = "RollingTemplating" // RollingTemplated means that the AC is rolling and it already templated RollingTemplated RollingStatus = "RollingTemplated" - // RollingComplete means that the AC is not rolling - RollingComplete RollingStatus = "RollingComplete" + // RollingCompleted means that the AC is the new active revision of the application + RollingCompleted RollingStatus = "RollingCompleted" + // InactiveAfterRollingCompleted means that the AC is the inactive revision after the rolling is finished + InactiveAfterRollingCompleted RollingStatus = "InactiveAfterRollingCompleted" ) // An ApplicationConfigurationStatus represents the observed state of a diff --git a/apis/standard.oam.dev/v1alpha1/rollout_plan_types.go b/apis/standard.oam.dev/v1alpha1/rollout_plan_types.go index efee298b1..edd82e912 100644 --- a/apis/standard.oam.dev/v1alpha1/rollout_plan_types.go +++ b/apis/standard.oam.dev/v1alpha1/rollout_plan_types.go @@ -34,9 +34,9 @@ const ( type RollingState string const ( - // VerifyingState verify that the rollout setting is valid and the controller can locate both the + // VerifyingSpecState verify that the rollout setting is valid and the controller can locate both the // target and the source - VerifyingState RollingState = "verifying" + VerifyingSpecState RollingState = "verifyingSpec" // InitializingState rollout is initializing all the new resources InitializingState RollingState = "initializing" // RollingInBatchesState rolling out @@ -45,6 +45,8 @@ const ( FinalisingState RollingState = "finalising" // RolloutSucceedState rollout successfully completed to match the desired target state RolloutSucceedState RollingState = "rolloutSucceed" + // RolloutFailingState finalize the rollout before giving up, possibly clean up the old resources, adjust traffic + RolloutFailingState RollingState = "rolloutFailing" // RolloutFailedState rollout is failed, the target replica is not reached // we can not move forward anymore // we will let the client to decide when or whether to revert @@ -180,7 +182,7 @@ type RolloutWebhookPayload struct { Namespace string `json:"namespace"` // Phase of the rollout - Phase RollingState `json:"phase"` + Phase string `json:"phase"` // Metadata (key-value pairs) are the extra data send to this webhook Metadata map[string]string `json:"metadata,omitempty"` @@ -242,6 +244,6 @@ type RolloutStatus struct { // UpgradedReplicas is the number of Pods upgraded by the rollout controller UpgradedReplicas int32 `json:"upgradedReplicas"` - // UpgradedReplicas is the number of Pods upgraded by the rollout controller that have a Ready Condition. + // UpgradedReadyReplicas is the number of Pods upgraded by the rollout controller that have a Ready Condition. UpgradedReadyReplicas int32 `json:"upgradedReadyReplicas"` } diff --git a/apis/standard.oam.dev/v1alpha1/rollout_state.go b/apis/standard.oam.dev/v1alpha1/rollout_state.go index 4c4361c21..6e85cacc8 100644 --- a/apis/standard.oam.dev/v1alpha1/rollout_state.go +++ b/apis/standard.oam.dev/v1alpha1/rollout_state.go @@ -39,8 +39,8 @@ const ( // FinishedOneBatchEvent indicates that we have successfully rolled out one batch FinishedOneBatchEvent RolloutEvent = "FinishedOneBatchEvent" - // BatchRolloutVerifyingEvent indicates that we are waiting for the approval of resume one batch - BatchRolloutVerifyingEvent RolloutEvent = "BatchRolloutVerifyingEvent" + // RolloutOneBatchEvent indicates that we have rollout one batch + RolloutOneBatchEvent RolloutEvent = "RolloutOneBatchEvent" // OneBatchAvailableEvent indicates that the batch resource is considered available // this events comes after we have examine the pod readiness check and traffic shifting if needed @@ -58,24 +58,30 @@ const ( // These are valid conditions of the rollout. const ( - // RolloutSpecVerified indicates that the rollout spec matches the resource we have in the cluster - RolloutSpecVerified runtimev1alpha1.ConditionType = "RolloutSpecVerified" - // RolloutInitialized means all the needed initialization work is done - RolloutInitialized runtimev1alpha1.ConditionType = "Initialized" + // RolloutSpecVerifying indicates that the rollout just started with verification + RolloutSpecVerifying runtimev1alpha1.ConditionType = "RolloutSpecVerifying" + // RolloutInitializing means we start to initialize the cluster + RolloutInitializing runtimev1alpha1.ConditionType = "RolloutInitializing" // RolloutInProgress means we are upgrading resources. - RolloutInProgress runtimev1alpha1.ConditionType = "Ready" + RolloutInProgress runtimev1alpha1.ConditionType = "RolloutInProgress" + // RolloutFinalizing means the rollout is finalizing + RolloutFinalizing runtimev1alpha1.ConditionType = "RolloutFinalizing" + // RolloutFailing means the rollout is failing + RolloutFailing runtimev1alpha1.ConditionType = "RolloutFailing" + // RolloutFailed means that the rollout failed. + RolloutFailed runtimev1alpha1.ConditionType = "RolloutFailed" // RolloutSucceed means that the rollout is done. - RolloutSucceed runtimev1alpha1.ConditionType = "Succeed" - // BatchInitialized - BatchInitialized runtimev1alpha1.ConditionType = "BatchInitialized" - // BatchInRolled - BatchInRolled runtimev1alpha1.ConditionType = "BatchInRolled" - // BatchVerified - BatchVerified runtimev1alpha1.ConditionType = "BatchVerified" + RolloutSucceed runtimev1alpha1.ConditionType = "RolloutSucceed" + // BatchInitializing + BatchInitializing runtimev1alpha1.ConditionType = "BatchInitializing" + // BatchPaused + BatchPaused runtimev1alpha1.ConditionType = "BatchPaused" + // BatchVerifying + BatchVerifying runtimev1alpha1.ConditionType = "BatchVerifying" // BatchRolloutFailed BatchRolloutFailed runtimev1alpha1.ConditionType = "BatchRolloutFailed" - // BatchFinalized - BatchFinalized runtimev1alpha1.ConditionType = "BatchFinalized" + // BatchFinalizing + BatchFinalizing runtimev1alpha1.ConditionType = "BatchFinalizing" // BatchReady BatchReady runtimev1alpha1.ConditionType = "BatchReady" ) @@ -106,22 +112,25 @@ const invalidBatchRollingStateTransition = "the batch rolling state transition f func (r *RolloutStatus) getRolloutConditionType() runtimev1alpha1.ConditionType { // figure out which condition type should we put in the condition depends on its state switch r.RollingState { - case VerifyingState: - return RolloutSpecVerified + case VerifyingSpecState: + return RolloutSpecVerifying case InitializingState: - return RolloutInitialized + return RolloutInitializing case RollingInBatchesState: switch r.BatchRollingState { case BatchInitializingState: - return BatchInitialized + return BatchInitializing case BatchVerifyingState: - return BatchVerified + return BatchVerifying case BatchFinalizingState: - return BatchFinalized + return BatchFinalizing + + case BatchRolloutFailedState: + return BatchRolloutFailed case BatchReadyState: return BatchReady @@ -131,10 +140,19 @@ func (r *RolloutStatus) getRolloutConditionType() runtimev1alpha1.ConditionType } case FinalisingState: + return RolloutFinalizing + + case RolloutFailingState: + return RolloutFailing + + case RolloutFailedState: + return RolloutFailed + + case RolloutSucceedState: return RolloutSucceed default: - return RolloutSucceed + return RolloutFailed } } @@ -151,6 +169,52 @@ func (r *RolloutStatus) RolloutFailed(reason string) { r.RollingState = RolloutFailedState } +// RolloutFailing is a special state transition that always moves the rollout state to the failing state +func (r *RolloutStatus) RolloutFailing(reason string) { + // set the condition first which depends on the state + r.SetConditions(NewNegativeCondition(r.getRolloutConditionType(), reason)) + r.RollingState = RolloutFailingState + r.BatchRollingState = BatchInitializingState +} + +// ResetStatus resets the status of the rollout to start from beginning +func (r *RolloutStatus) ResetStatus() { + r.NewPodTemplateIdentifier = "" + r.LastAppliedPodTemplateIdentifier = "" + r.RollingState = VerifyingSpecState + r.BatchRollingState = BatchInitializingState + r.CurrentBatch = 0 + r.UpgradedReplicas = 0 + r.UpgradedReadyReplicas = 0 +} + +// SetRolloutCondition sets the supplied condition, replacing any existing condition +// of the same type unless they are identical. +func (r *RolloutStatus) SetRolloutCondition(new runtimev1alpha1.Condition) { + exists := false + for i, existing := range r.Conditions { + if existing.Type != new.Type { + continue + } + // we want to update the condition when the LTT changes + if existing.Type == new.Type && + existing.Status == new.Status && + existing.Reason == new.Reason && + existing.Message == new.Message && + existing.LastTransitionTime == new.LastTransitionTime { + exists = true + continue + } + + r.Conditions[i] = new + exists = true + } + if !exists { + r.Conditions = append(r.Conditions, new) + } + +} + // StateTransition is the center place to do rollout state transition // it returns an error if the transition is invalid // it changes the coming rollout state if it's valid @@ -171,10 +235,10 @@ func (r *RolloutStatus) StateTransition(event RolloutEvent) { } switch rollingState { - case VerifyingState: + case VerifyingSpecState: if event == RollingSpecVerifiedEvent { r.RollingState = InitializingState - r.SetConditions(NewPositiveCondition(r.getRolloutConditionType())) + r.SetRolloutCondition(NewPositiveCondition(r.getRolloutConditionType())) return } panic(fmt.Errorf(invalidRollingStateTransition, rollingState, event)) @@ -183,7 +247,7 @@ func (r *RolloutStatus) StateTransition(event RolloutEvent) { if event == RollingInitializedEvent { r.RollingState = RollingInBatchesState r.BatchRollingState = BatchInitializingState - r.SetConditions(NewPositiveCondition(r.getRolloutConditionType())) + r.SetRolloutCondition(NewPositiveCondition(r.getRolloutConditionType())) return } panic(fmt.Errorf(invalidRollingStateTransition, rollingState, event)) @@ -195,27 +259,31 @@ func (r *RolloutStatus) StateTransition(event RolloutEvent) { case FinalisingState: if event == RollingFinalizedEvent { r.RollingState = RolloutSucceedState - r.SetConditions(NewPositiveCondition(r.getRolloutConditionType())) + r.SetRolloutCondition(NewPositiveCondition(r.getRolloutConditionType())) + return + } + panic(fmt.Errorf(invalidRollingStateTransition, rollingState, event)) + + case RolloutFailingState: + if event == RollingFinalizedEvent { + r.RollingState = RolloutFailedState + r.SetRolloutCondition(NewPositiveCondition(r.getRolloutConditionType())) return } panic(fmt.Errorf(invalidRollingStateTransition, rollingState, event)) case RolloutSucceedState: if event == WorkloadModifiedEvent { - r.RollingState = VerifyingState - r.SetConditions(NewPositiveCondition(r.getRolloutConditionType())) + r.SetRolloutCondition(NewNegativeCondition(r.getRolloutConditionType(), "Rollout Spec is modified")) + r.ResetStatus() return } panic(fmt.Errorf(invalidRollingStateTransition, rollingState, event)) case RolloutFailedState: if event == WorkloadModifiedEvent { - r.RollingState = VerifyingState - r.SetConditions(NewPositiveCondition(r.getRolloutConditionType())) - return - } - if event == RollingFailedEvent { - // no op + r.SetRolloutCondition(NewNegativeCondition(r.getRolloutConditionType(), "Rollout Spec is modified")) + r.ResetStatus() return } panic(fmt.Errorf(invalidRollingStateTransition, rollingState, event)) @@ -243,9 +311,9 @@ func (r *RolloutStatus) batchStateTransition(event RolloutEvent) { panic(fmt.Errorf(invalidBatchRollingStateTransition, batchRollingState, event)) case BatchInRollingState: - if event == BatchRolloutVerifyingEvent { + if event == RolloutOneBatchEvent { r.BatchRollingState = BatchVerifyingState - r.SetConditions(NewPositiveCondition(r.getRolloutConditionType())) + r.SetRolloutCondition(NewPositiveCondition(r.getRolloutConditionType())) return } panic(fmt.Errorf(invalidBatchRollingStateTransition, batchRollingState, event)) @@ -253,7 +321,7 @@ func (r *RolloutStatus) batchStateTransition(event RolloutEvent) { case BatchVerifyingState: if event == OneBatchAvailableEvent { r.BatchRollingState = BatchFinalizingState - r.SetConditions(NewPositiveCondition(r.getRolloutConditionType())) + r.SetRolloutCondition(NewPositiveCondition(r.getRolloutConditionType())) return } panic(fmt.Errorf(invalidBatchRollingStateTransition, batchRollingState, event)) @@ -261,14 +329,14 @@ func (r *RolloutStatus) batchStateTransition(event RolloutEvent) { case BatchFinalizingState: if event == FinishedOneBatchEvent { r.BatchRollingState = BatchReadyState - r.SetConditions(NewPositiveCondition(r.getRolloutConditionType())) + r.SetRolloutCondition(NewPositiveCondition(r.getRolloutConditionType())) return } if event == AllBatchFinishedEvent { // transition out of the batch loop r.BatchRollingState = BatchReadyState r.RollingState = FinalisingState - r.SetConditions(NewPositiveCondition(r.getRolloutConditionType())) + r.SetRolloutCondition(NewPositiveCondition(r.getRolloutConditionType())) return } panic(fmt.Errorf(invalidBatchRollingStateTransition, batchRollingState, event)) @@ -277,7 +345,7 @@ func (r *RolloutStatus) batchStateTransition(event RolloutEvent) { if event == BatchRolloutApprovedEvent { r.BatchRollingState = BatchInitializingState r.CurrentBatch++ - r.SetConditions(NewPositiveCondition(r.getRolloutConditionType())) + r.SetRolloutCondition(NewPositiveCondition(r.getRolloutConditionType())) return } panic(fmt.Errorf(invalidBatchRollingStateTransition, batchRollingState, event)) diff --git a/charts/vela-core/crds/core.oam.dev_applicationdeployments.yaml b/charts/vela-core/crds/core.oam.dev_applicationdeployments.yaml deleted file mode 100644 index 2754aad0a..000000000 --- a/charts/vela-core/crds/core.oam.dev_applicationdeployments.yaml +++ /dev/null @@ -1,351 +0,0 @@ - ---- -apiVersion: apiextensions.k8s.io/v1 -kind: CustomResourceDefinition -metadata: - annotations: - controller-gen.kubebuilder.io/version: v0.2.4 - creationTimestamp: null - name: applicationdeployments.core.oam.dev -spec: - group: core.oam.dev - names: - categories: - - oam - kind: ApplicationDeployment - listKind: ApplicationDeploymentList - plural: applicationdeployments - singular: applicationdeployment - scope: Namespaced - versions: - - name: v1alpha2 - schema: - openAPIV3Schema: - description: ApplicationDeployment is the Schema for the ApplicationDeployment API - properties: - apiVersion: - description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' - type: string - kind: - description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' - type: string - metadata: - type: object - spec: - description: ApplicationDeploymentSpec defines how to describe an upgrade between different application - properties: - componentList: - description: 'The list of component to upgrade in the application. We only support single component application so far TODO: (RZ) Support multiple components in an application' - items: - type: string - type: array - revertOnDelete: - description: RevertOnDelete revert the rollout when the rollout CR is deleted It will remove the target application from the kubernetes if it's set to true - type: boolean - rolloutPlan: - description: RolloutPlan is the details on how to rollout the resources - properties: - batchPartition: - description: All pods in the batches up to the batchPartition (included) will have the target resource specification while the rest still have the source resource This is designed for the operators to manually rollout Default is the the number of batches which will rollout all the batches - format: int32 - type: integer - canaryMetric: - description: CanaryMetric provides a way for the rollout process to automatically check certain metrics before complete the process - items: - description: CanaryMetric holds the reference to metrics used for canary analysis - properties: - interval: - description: Interval represents the windows size - type: string - metricsRange: - description: Range value accepted for this metric - properties: - max: - anyOf: - - type: integer - - type: string - description: Maximum value - x-kubernetes-int-or-string: true - min: - anyOf: - - type: integer - - type: string - description: Minimum value - x-kubernetes-int-or-string: true - type: object - name: - description: Name of the metric - type: string - templateRef: - description: TemplateRef references a metric template object - properties: - apiVersion: - description: APIVersion of the referenced object. - type: string - kind: - description: Kind of the referenced object. - type: string - name: - description: Name of the referenced object. - type: string - uid: - description: UID of the referenced object. - type: string - required: - - apiVersion - - kind - - name - type: object - required: - - name - type: object - type: array - numBatches: - description: The number of batches, default = 1 - format: int32 - type: integer - paused: - description: Paused the rollout, default is false - type: boolean - rolloutBatches: - description: The exact distribution among batches. its size has to be exactly the same as the NumBatches (if set) The total number cannot exceed the targetSize or the size of the source resource We will IGNORE the last batch's replica field if it's a percentage since round errors can lead to inaccurate sum We highly recommend to leave the last batch's replica field empty - items: - description: RolloutBatch is used to describe how the each batch rollout should be - properties: - batchRolloutWebhooks: - description: RolloutWebhooks provides a way for the batch rollout to interact with an external process - items: - description: RolloutWebhook holds the reference to external checks used for canary analysis - properties: - expectedStatus: - description: ExpectedStatus contains all the expected http status code that we will accept as success - items: - type: integer - type: array - metadata: - additionalProperties: - type: string - description: Metadata (key-value pairs) for this webhook - type: object - method: - description: Method the HTTP call method, default is POST - type: string - name: - description: Name of this webhook - type: string - type: - description: Type of this webhook - type: string - url: - description: URL address of this webhook - type: string - required: - - name - - type - - url - type: object - type: array - canaryMetric: - description: CanaryMetric provides a way for the batch rollout process to automatically check certain metrics before moving to the next batch - items: - description: CanaryMetric holds the reference to metrics used for canary analysis - properties: - interval: - description: Interval represents the windows size - type: string - metricsRange: - description: Range value accepted for this metric - properties: - max: - anyOf: - - type: integer - - type: string - description: Maximum value - x-kubernetes-int-or-string: true - min: - anyOf: - - type: integer - - type: string - description: Minimum value - x-kubernetes-int-or-string: true - type: object - name: - description: Name of the metric - type: string - templateRef: - description: TemplateRef references a metric template object - properties: - apiVersion: - description: APIVersion of the referenced object. - type: string - kind: - description: Kind of the referenced object. - type: string - name: - description: Name of the referenced object. - type: string - uid: - description: UID of the referenced object. - type: string - required: - - apiVersion - - kind - - name - type: object - required: - - name - type: object - type: array - instanceInterval: - description: The wait time, in seconds, between instances upgrades, default = 0 - format: int32 - type: integer - maxUnavailable: - anyOf: - - type: integer - - type: string - description: MaxUnavailable is the max allowed number of pods that is unavailable during the upgrade. We will mark the batch as ready as long as there are less or equal number of pods unavailable than this number. default = 0 - x-kubernetes-int-or-string: true - podList: - description: The list of Pods to get upgraded it is mutually exclusive with the Replicas field - items: - type: string - type: array - replicas: - anyOf: - - type: integer - - type: string - description: 'Replicas is the number of pods to upgrade in this batch it can be an absolute number (ex: 5) or a percentage of total pods we will ignore the percentage of the last batch to just fill the gap it is mutually exclusive with the PodList field' - x-kubernetes-int-or-string: true - type: object - type: array - rolloutStrategy: - description: RolloutStrategy defines strategies for the rollout plan - type: string - rolloutWebhooks: - description: RolloutWebhooks provide a way for the rollout to interact with an external process - items: - description: RolloutWebhook holds the reference to external checks used for canary analysis - properties: - expectedStatus: - description: ExpectedStatus contains all the expected http status code that we will accept as success - items: - type: integer - type: array - metadata: - additionalProperties: - type: string - description: Metadata (key-value pairs) for this webhook - type: object - method: - description: Method the HTTP call method, default is POST - type: string - name: - description: Name of this webhook - type: string - type: - description: Type of this webhook - type: string - url: - description: URL address of this webhook - type: string - required: - - name - - type - - url - type: object - type: array - targetSize: - description: The size of the target resource. The default is the same as the size of the source resource. - format: int32 - type: integer - type: object - sourceApplicationName: - description: SourceApplicationName contains the name of the applicationConfiguration that we need to upgrade from. it can be empty only when it's the first time to deploy the application - type: string - targetApplicationName: - description: TargetApplicationName contains the name of the applicationConfiguration that we need to upgrade to. Here we use an applicationConfiguration as a revision of an application, thus the name alone is suffice - type: string - required: - - rolloutPlan - - targetApplicationName - type: object - status: - description: ApplicationDeploymentStatus defines the observed state of ApplicationDeployment - properties: - batchRollingState: - description: BatchRollingState only meaningful when the Status is rolling - type: string - conditions: - description: Conditions of the resource. - items: - description: A Condition that may apply to a resource. - properties: - lastTransitionTime: - description: LastTransitionTime is the last time this condition transitioned from one status to another. - format: date-time - type: string - message: - description: A Message containing details about this condition's last transition from one status to another, if any. - type: string - reason: - description: A Reason for this condition's last transition from one status to another. - type: string - status: - description: Status of this condition; is it currently True, False, or Unknown? - type: string - type: - description: Type of this condition. At most one of each condition type may apply to a resource at any point in time. - type: string - required: - - lastTransitionTime - - reason - - status - - type - type: object - type: array - currentBatch: - description: The current batch the rollout is working on/blocked it starts from 0 - format: int32 - type: integer - lastAppliedPodTemplateIdentifier: - description: lastAppliedPodTemplateIdentifier is a string that uniquely represent the last pod template each workload type could use different ways to identify that so we cannot compare between resources We update this field only after a successful rollout - type: string - lastSourceApplicationName: - description: LastSourceApplicationName contains the name of the application that we need to upgrade from. We will restart the rollout if this is not the same as the spec - type: string - lastTargetApplicationName: - description: LastTargetApplicationName contains the name of the application that we upgraded to We will restart the rollout if this is not the same as the spec - type: string - rollingState: - description: RollingState is the Rollout State - type: string - targetGeneration: - description: NewPodTemplateIdentifier is a string that uniquely represent the new pod template each workload type could use different ways to identify that so we cannot compare between resources - type: string - upgradedReadyReplicas: - description: UpgradedReplicas is the number of Pods upgraded by the rollout controller that have a Ready Condition. - format: int32 - type: integer - upgradedReplicas: - description: UpgradedReplicas is the number of Pods upgraded by the rollout controller - format: int32 - type: integer - required: - - currentBatch - - lastTargetApplicationName - - rollingState - - upgradedReadyReplicas - - upgradedReplicas - type: object - type: object - served: true - storage: true - subresources: - status: {} -status: - acceptedNames: - kind: "" - plural: "" - conditions: [] - storedVersions: [] diff --git a/charts/vela-core/crds/core.oam.dev_applications.yaml b/charts/vela-core/crds/core.oam.dev_applications.yaml index 1a1f9d2c7..4a3812333 100644 --- a/charts/vela-core/crds/core.oam.dev_applications.yaml +++ b/charts/vela-core/crds/core.oam.dev_applications.yaml @@ -410,7 +410,7 @@ spec: description: NewPodTemplateIdentifier is a string that uniquely represent the new pod template each workload type could use different ways to identify that so we cannot compare between resources type: string upgradedReadyReplicas: - description: UpgradedReplicas is the number of Pods upgraded by the rollout controller that have a Ready Condition. + description: UpgradedReadyReplicas is the number of Pods upgraded by the rollout controller that have a Ready Condition. format: int32 type: integer upgradedReplicas: diff --git a/charts/vela-core/crds/core.oam.dev_approllouts.yaml b/charts/vela-core/crds/core.oam.dev_approllouts.yaml index 9ff678571..7833834b2 100644 --- a/charts/vela-core/crds/core.oam.dev_approllouts.yaml +++ b/charts/vela-core/crds/core.oam.dev_approllouts.yaml @@ -260,21 +260,21 @@ spec: format: int32 type: integer type: object - sourceApplicationName: - description: SourceApplicationName contains the name of the applicationConfiguration that we need to upgrade from. it can be empty only when it's the first time to deploy the application + sourceAppRevisionName: + description: SourceAppRevisionName contains the name of the applicationConfiguration that we need to upgrade from. it can be empty only when it's the first time to deploy the application type: string - targetApplicationName: + targetAppRevisionName: description: TargetAppRevisionName contains the name of the applicationConfiguration that we need to upgrade to. Here we use an applicationConfiguration as a revision of an application, thus the name alone is suffice type: string required: - rolloutPlan - - targetApplicationName + - targetAppRevisionName type: object status: description: AppRolloutStatus defines the observed state of AppRollout properties: - LastSourceAppName: - description: LastSourceAppName contains the name of the app that we need to upgrade from. We will restart the rollout if this is not the same as the spec + LastSourceAppRevision: + description: LastSourceAppRevision contains the name of the app that we need to upgrade from. We will restart the rollout if this is not the same as the spec type: string batchRollingState: description: BatchRollingState only meaningful when the Status is rolling @@ -314,8 +314,8 @@ spec: lastAppliedPodTemplateIdentifier: description: lastAppliedPodTemplateIdentifier is a string that uniquely represent the last pod template each workload type could use different ways to identify that so we cannot compare between resources We update this field only after a successful rollout type: string - lastTargetAppName: - description: LastTargetAppName contains the name of the app that we upgraded to We will restart the rollout if this is not the same as the spec + lastTargetAppRevision: + description: LastUpgradedTargetAppRevision contains the name of the app that we upgraded to We will restart the rollout if this is not the same as the spec type: string rollingState: description: RollingState is the Rollout State @@ -324,7 +324,7 @@ spec: description: NewPodTemplateIdentifier is a string that uniquely represent the new pod template each workload type could use different ways to identify that so we cannot compare between resources type: string upgradedReadyReplicas: - description: UpgradedReplicas is the number of Pods upgraded by the rollout controller that have a Ready Condition. + description: UpgradedReadyReplicas is the number of Pods upgraded by the rollout controller that have a Ready Condition. format: int32 type: integer upgradedReplicas: @@ -333,7 +333,7 @@ spec: type: integer required: - currentBatch - - lastTargetAppName + - lastTargetAppRevision - rollingState - upgradedReadyReplicas - upgradedReplicas diff --git a/charts/vela-core/crds/standard.oam.dev_rollouttraits.yaml b/charts/vela-core/crds/standard.oam.dev_rollouttraits.yaml index d31072f0a..540c02d1a 100644 --- a/charts/vela-core/crds/standard.oam.dev_rollouttraits.yaml +++ b/charts/vela-core/crds/standard.oam.dev_rollouttraits.yaml @@ -345,7 +345,7 @@ spec: description: NewPodTemplateIdentifier is a string that uniquely represent the new pod template each workload type could use different ways to identify that so we cannot compare between resources type: string upgradedReadyReplicas: - description: UpgradedReplicas is the number of Pods upgraded by the rollout controller that have a Ready Condition. + description: UpgradedReadyReplicas is the number of Pods upgraded by the rollout controller that have a Ready Condition. format: int32 type: integer upgradedReplicas: diff --git a/charts/vela-core/templates/admission-webhooks/mutatingWebhookConfiguration.yaml b/charts/vela-core/templates/admission-webhooks/mutatingWebhookConfiguration.yaml index bad29419b..150685926 100644 --- a/charts/vela-core/templates/admission-webhooks/mutatingWebhookConfiguration.yaml +++ b/charts/vela-core/templates/admission-webhooks/mutatingWebhookConfiguration.yaml @@ -35,6 +35,32 @@ webhooks: admissionReviewVersions: - v1beta1 timeoutSeconds: 5 + - clientConfig: + caBundle: Cg== + service: + name: {{ template "kubevela.name" . }}-webhook + namespace: {{ .Release.Namespace }} + path: /mutating-core-oam-dev-v1alpha2-approllout + {{- if .Values.admissionWebhooks.patch.enabled }} + failurePolicy: Ignore + {{- else }} + failurePolicy: Fail + {{- end }} + name: mutating.core.oam.dev.v1alpha2.approllouts + rules: + - apiGroups: + - core.oam.dev + apiVersions: + - v1alpha2 + operations: + - CREATE + - UPDATE + resources: + - approllouts + scope: Namespaced + admissionReviewVersions: + - v1beta1 + timeoutSeconds: 5 - clientConfig: caBundle: Cg== service: diff --git a/charts/vela-core/templates/admission-webhooks/validatingWebhookConfiguration.yaml b/charts/vela-core/templates/admission-webhooks/validatingWebhookConfiguration.yaml index 829885270..72ff416d3 100644 --- a/charts/vela-core/templates/admission-webhooks/validatingWebhookConfiguration.yaml +++ b/charts/vela-core/templates/admission-webhooks/validatingWebhookConfiguration.yaml @@ -35,6 +35,32 @@ webhooks: admissionReviewVersions: - v1beta1 timeoutSeconds: 5 + - clientConfig: + caBundle: Cg== + service: + name: {{ template "kubevela.name" . }}-webhook + namespace: {{ .Release.Namespace }} + path: /validating-core-oam-dev-v1alpha2-approllout + {{- if .Values.admissionWebhooks.patch.enabled }} + failurePolicy: Ignore + {{- else }} + failurePolicy: {{ .Values.admissionWebhooks.failurePolicy }} + {{- end }} + name: validating.core.oam.dev.v1alpha2.approllouts + rules: + - apiGroups: + - core.oam.dev + apiVersions: + - v1alpha2 + operations: + - CREATE + - UPDATE + resources: + - approllouts + scope: Namespaced + admissionReviewVersions: + - v1beta1 + timeoutSeconds: 5 - clientConfig: caBundle: Cg== service: diff --git a/cmd/core/main.go b/cmd/core/main.go index d3b8d97e8..5afbf91d6 100644 --- a/cmd/core/main.go +++ b/cmd/core/main.go @@ -27,7 +27,6 @@ import ( crdv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" "k8s.io/apimachinery/pkg/runtime" clientgoscheme "k8s.io/client-go/kubernetes/scheme" - "k8s.io/klog/v2" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/healthz" "sigs.k8s.io/controller-runtime/pkg/log/zap" @@ -120,11 +119,12 @@ func main() { } else { w = os.Stdout } - klog.InitFlags(nil) - ctrl.SetLogger(zap.New(func(o *zap.Options) { + + logger := zap.New(func(o *zap.Options) { o.Development = true o.DestWritter = w - })) + }) + ctrl.SetLogger(logger) setupLog.Info(fmt.Sprintf("KubeVela Version: %s, GIT Revision: %s.", version.VelaVersion, version.GitRevision)) setupLog.Info(fmt.Sprintf("Disable Capabilities: %s.", disableCaps)) diff --git a/docs/examples/rollout/README.md b/docs/examples/rollout/README.md index e69661c3c..698877e70 100644 --- a/docs/examples/rollout/README.md +++ b/docs/examples/rollout/README.md @@ -30,16 +30,21 @@ kubectl apply -f docs/examples/rollout/app-target.yaml ``` Wait for the applicationConfiguration "test-rolling-v2" `Rolling Status` to be "RollingTemplated" -5. Apply the application deployment with pause +5. Mark the application as normal +```shell +kubectl apply -f docs/examples/rollout/app-target-done.yaml +``` + +6. Apply the application deployment with pause ```shell kubectl apply -f docs/examples/rollout/app-deploy-pause.yaml ``` Check the status of the ApplicationDeployment and see the step by step rolling out. This rollout will pause after the second batch. -5. Apply the application deployment that completes the rollout +7. Apply the application deployment that completes the rollout ```shell kubectl apply -f docs/examples/rollout/app-deploy-finish.yaml ``` -Check the status of the ApplicationDeployment and see the rollout completes and the -applicationDeployment's "Rolling State" becomes `rolloutSucceed` +Check the status of the ApplicationDeployment and see the rollout completes, and the +applicationDeployment's "Rolling State" becomes `rolloutSucceed` \ No newline at end of file diff --git a/docs/examples/rollout/app-deploy-finish.yaml b/docs/examples/rollout/app-deploy-finish.yaml index e82fbbed0..b6f402d18 100644 --- a/docs/examples/rollout/app-deploy-finish.yaml +++ b/docs/examples/rollout/app-deploy-finish.yaml @@ -1,11 +1,11 @@ apiVersion: core.oam.dev/v1alpha2 -kind: ApplicationDeployment +kind: AppRollout metadata: name: rolling-test spec: # application (revision) reference - targetApplicationName: test-rolling-v2 - sourceApplicationName: test-rolling-v1 + targetAppRevisionName: test-rolling-v2 + sourceAppRevisionName: test-rolling-v1 # HPA reference (optional) componentList: - metrics-provider @@ -14,5 +14,4 @@ spec: rolloutBatches: - replicas: 10% - replicas: 2 - - replicas: 2 - batchPartition: 2 + - replicas: 2 \ No newline at end of file diff --git a/docs/examples/rollout/app-deploy-pause.yaml b/docs/examples/rollout/app-deploy-pause.yaml index 4a5f12ff7..436691cd6 100644 --- a/docs/examples/rollout/app-deploy-pause.yaml +++ b/docs/examples/rollout/app-deploy-pause.yaml @@ -1,11 +1,11 @@ apiVersion: core.oam.dev/v1alpha2 -kind: ApplicationDeployment +kind: AppRollout metadata: name: rolling-test spec: # application (revision) reference - targetApplicationName: test-rolling-v2 - sourceApplicationName: test-rolling-v1 + targetAppRevisionName: test-rolling-v2 + sourceAppRevisionName: test-rolling-v1 # HPA reference (optional) componentList: - metrics-provider diff --git a/test/e2e-test/testdata/rollout/app-deploy-pause.yaml b/docs/examples/rollout/app-deploy-revert.yaml similarity index 50% rename from test/e2e-test/testdata/rollout/app-deploy-pause.yaml rename to docs/examples/rollout/app-deploy-revert.yaml index c3623a19c..14ff86916 100644 --- a/test/e2e-test/testdata/rollout/app-deploy-pause.yaml +++ b/docs/examples/rollout/app-deploy-revert.yaml @@ -1,18 +1,17 @@ apiVersion: core.oam.dev/v1alpha2 -kind: ApplicationDeployment +kind: AppRollout metadata: - name: rolling-e2e-test + name: rolling-test spec: # application (revision) reference - targetApplicationName: test-e2e-rolling-v2 - sourceApplicationName: test-e2e-rolling-v1 + targetAppRevisionName: test-rolling-v3 + sourceAppRevisionName: test-rolling-v2 # HPA reference (optional) componentList: - metrics-provider rolloutPlan: rolloutStrategy: "IncreaseFirst" rolloutBatches: - - replicas: 10% - - replicas: 2 - - replicas: 2 - batchPartition: 1 + - replicas: 20% + - replicas: 30% + - replicas: 50% \ No newline at end of file diff --git a/test/e2e-test/testdata/rollout/app-target.yaml b/docs/examples/rollout/app-target-done.yaml similarity index 69% rename from test/e2e-test/testdata/rollout/app-target.yaml rename to docs/examples/rollout/app-target-done.yaml index bb1310307..7a09bfb70 100644 --- a/test/e2e-test/testdata/rollout/app-target.yaml +++ b/docs/examples/rollout/app-target-done.yaml @@ -1,10 +1,7 @@ apiVersion: core.oam.dev/v1alpha2 kind: Application metadata: - name: test-e2e-rolling - annotations: - "app.oam.dev/rolling-components": "metrics-provider" - "app.oam.dev/rollout-template": "true" + name: test-rolling spec: components: - name: metrics-provider diff --git a/e2e/application/application_test.go b/e2e/application/application_test.go index 1c17fa0ac..45b60e2fe 100644 --- a/e2e/application/application_test.go +++ b/e2e/application/application_test.go @@ -12,7 +12,6 @@ import ( "github.com/oam-dev/kubevela/apis/core.oam.dev/v1alpha2" "github.com/oam-dev/kubevela/e2e" - "github.com/oam-dev/kubevela/pkg/controller/utils" ) var ( @@ -76,7 +75,7 @@ var ApplicationStatusDeeplyContext = func(context string, applicationName, workl gomega.Eventually(func() int { appConfig := &v1alpha2.ApplicationConfiguration{} _ = k8sclient.Get(context2.Background(), client.ObjectKey{ - Name: utils.ConstructRevisionName(applicationName, app.Status.LatestRevision.Revision), + Name: app.Status.LatestRevision.Name, Namespace: "default"}, appConfig) return len(appConfig.Status.Workloads) }, 180*time.Second, 1*time.Second).ShouldNot(gomega.Equal(0)) diff --git a/legacy/charts/vela-core-legacy/crds/core.oam.dev_applicationdeployments.yaml b/legacy/charts/vela-core-legacy/crds/core.oam.dev_applicationdeployments.yaml deleted file mode 100644 index 7bcd97e84..000000000 --- a/legacy/charts/vela-core-legacy/crds/core.oam.dev_applicationdeployments.yaml +++ /dev/null @@ -1,352 +0,0 @@ - ---- -apiVersion: apiextensions.k8s.io/v1beta1 -kind: CustomResourceDefinition -metadata: - annotations: - controller-gen.kubebuilder.io/version: v0.2.4 - creationTimestamp: null - name: applicationdeployments.core.oam.dev -spec: - group: core.oam.dev - names: - categories: - - oam - kind: ApplicationDeployment - listKind: ApplicationDeploymentList - plural: applicationdeployments - singular: applicationdeployment - scope: Namespaced - subresources: - status: {} - validation: - openAPIV3Schema: - description: ApplicationDeployment is the Schema for the ApplicationDeployment API - properties: - apiVersion: - description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' - type: string - kind: - description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' - type: string - metadata: - type: object - spec: - description: ApplicationDeploymentSpec defines how to describe an upgrade between different application - properties: - componentList: - description: 'The list of component to upgrade in the application. We only support single component application so far TODO: (RZ) Support multiple components in an application' - items: - type: string - type: array - revertOnDelete: - description: RevertOnDelete revert the rollout when the rollout CR is deleted It will remove the target application from the kubernetes if it's set to true - type: boolean - rolloutPlan: - description: RolloutPlan is the details on how to rollout the resources - properties: - batchPartition: - description: All pods in the batches up to the batchPartition (included) will have the target resource specification while the rest still have the source resource This is designed for the operators to manually rollout Default is the the number of batches which will rollout all the batches - format: int32 - type: integer - canaryMetric: - description: CanaryMetric provides a way for the rollout process to automatically check certain metrics before complete the process - items: - description: CanaryMetric holds the reference to metrics used for canary analysis - properties: - interval: - description: Interval represents the windows size - type: string - metricsRange: - description: Range value accepted for this metric - properties: - max: - anyOf: - - type: integer - - type: string - description: Maximum value - x-kubernetes-int-or-string: true - min: - anyOf: - - type: integer - - type: string - description: Minimum value - x-kubernetes-int-or-string: true - type: object - name: - description: Name of the metric - type: string - templateRef: - description: TemplateRef references a metric template object - properties: - apiVersion: - description: APIVersion of the referenced object. - type: string - kind: - description: Kind of the referenced object. - type: string - name: - description: Name of the referenced object. - type: string - uid: - description: UID of the referenced object. - type: string - required: - - apiVersion - - kind - - name - type: object - required: - - name - type: object - type: array - numBatches: - description: The number of batches, default = 1 - format: int32 - type: integer - paused: - description: Paused the rollout, default is false - type: boolean - rolloutBatches: - description: The exact distribution among batches. its size has to be exactly the same as the NumBatches (if set) The total number cannot exceed the targetSize or the size of the source resource We will IGNORE the last batch's replica field if it's a percentage since round errors can lead to inaccurate sum We highly recommend to leave the last batch's replica field empty - items: - description: RolloutBatch is used to describe how the each batch rollout should be - properties: - batchRolloutWebhooks: - description: RolloutWebhooks provides a way for the batch rollout to interact with an external process - items: - description: RolloutWebhook holds the reference to external checks used for canary analysis - properties: - expectedStatus: - description: ExpectedStatus contains all the expected http status code that we will accept as success - items: - type: integer - type: array - metadata: - additionalProperties: - type: string - description: Metadata (key-value pairs) for this webhook - type: object - method: - description: Method the HTTP call method, default is POST - type: string - name: - description: Name of this webhook - type: string - type: - description: Type of this webhook - type: string - url: - description: URL address of this webhook - type: string - required: - - name - - type - - url - type: object - type: array - canaryMetric: - description: CanaryMetric provides a way for the batch rollout process to automatically check certain metrics before moving to the next batch - items: - description: CanaryMetric holds the reference to metrics used for canary analysis - properties: - interval: - description: Interval represents the windows size - type: string - metricsRange: - description: Range value accepted for this metric - properties: - max: - anyOf: - - type: integer - - type: string - description: Maximum value - x-kubernetes-int-or-string: true - min: - anyOf: - - type: integer - - type: string - description: Minimum value - x-kubernetes-int-or-string: true - type: object - name: - description: Name of the metric - type: string - templateRef: - description: TemplateRef references a metric template object - properties: - apiVersion: - description: APIVersion of the referenced object. - type: string - kind: - description: Kind of the referenced object. - type: string - name: - description: Name of the referenced object. - type: string - uid: - description: UID of the referenced object. - type: string - required: - - apiVersion - - kind - - name - type: object - required: - - name - type: object - type: array - instanceInterval: - description: The wait time, in seconds, between instances upgrades, default = 0 - format: int32 - type: integer - maxUnavailable: - anyOf: - - type: integer - - type: string - description: MaxUnavailable is the max allowed number of pods that is unavailable during the upgrade. We will mark the batch as ready as long as there are less or equal number of pods unavailable than this number. default = 0 - x-kubernetes-int-or-string: true - podList: - description: The list of Pods to get upgraded it is mutually exclusive with the Replicas field - items: - type: string - type: array - replicas: - anyOf: - - type: integer - - type: string - description: 'Replicas is the number of pods to upgrade in this batch it can be an absolute number (ex: 5) or a percentage of total pods we will ignore the percentage of the last batch to just fill the gap it is mutually exclusive with the PodList field' - x-kubernetes-int-or-string: true - type: object - type: array - rolloutStrategy: - description: RolloutStrategy defines strategies for the rollout plan - type: string - rolloutWebhooks: - description: RolloutWebhooks provide a way for the rollout to interact with an external process - items: - description: RolloutWebhook holds the reference to external checks used for canary analysis - properties: - expectedStatus: - description: ExpectedStatus contains all the expected http status code that we will accept as success - items: - type: integer - type: array - metadata: - additionalProperties: - type: string - description: Metadata (key-value pairs) for this webhook - type: object - method: - description: Method the HTTP call method, default is POST - type: string - name: - description: Name of this webhook - type: string - type: - description: Type of this webhook - type: string - url: - description: URL address of this webhook - type: string - required: - - name - - type - - url - type: object - type: array - targetSize: - description: The size of the target resource. The default is the same as the size of the source resource. - format: int32 - type: integer - type: object - sourceApplicationName: - description: SourceApplicationName contains the name of the applicationConfiguration that we need to upgrade from. it can be empty only when it's the first time to deploy the application - type: string - targetApplicationName: - description: TargetApplicationName contains the name of the applicationConfiguration that we need to upgrade to. Here we use an applicationConfiguration as a revision of an application, thus the name alone is suffice - type: string - required: - - rolloutPlan - - targetApplicationName - type: object - status: - description: ApplicationDeploymentStatus defines the observed state of ApplicationDeployment - properties: - batchRollingState: - description: BatchRollingState only meaningful when the Status is rolling - type: string - conditions: - description: Conditions of the resource. - items: - description: A Condition that may apply to a resource. - properties: - lastTransitionTime: - description: LastTransitionTime is the last time this condition transitioned from one status to another. - format: date-time - type: string - message: - description: A Message containing details about this condition's last transition from one status to another, if any. - type: string - reason: - description: A Reason for this condition's last transition from one status to another. - type: string - status: - description: Status of this condition; is it currently True, False, or Unknown? - type: string - type: - description: Type of this condition. At most one of each condition type may apply to a resource at any point in time. - type: string - required: - - lastTransitionTime - - reason - - status - - type - type: object - type: array - currentBatch: - description: The current batch the rollout is working on/blocked it starts from 0 - format: int32 - type: integer - lastAppliedPodTemplateIdentifier: - description: lastAppliedPodTemplateIdentifier is a string that uniquely represent the last pod template each workload type could use different ways to identify that so we cannot compare between resources We update this field only after a successful rollout - type: string - lastSourceApplicationName: - description: LastSourceApplicationName contains the name of the application that we need to upgrade from. We will restart the rollout if this is not the same as the spec - type: string - lastTargetApplicationName: - description: LastTargetApplicationName contains the name of the application that we upgraded to We will restart the rollout if this is not the same as the spec - type: string - rollingState: - description: RollingState is the Rollout State - type: string - targetGeneration: - description: NewPodTemplateIdentifier is a string that uniquely represent the new pod template each workload type could use different ways to identify that so we cannot compare between resources - type: string - upgradedReadyReplicas: - description: UpgradedReplicas is the number of Pods upgraded by the rollout controller that have a Ready Condition. - format: int32 - type: integer - upgradedReplicas: - description: UpgradedReplicas is the number of Pods upgraded by the rollout controller - format: int32 - type: integer - required: - - currentBatch - - lastTargetApplicationName - - rollingState - - upgradedReadyReplicas - - upgradedReplicas - type: object - type: object - version: v1alpha2 - versions: - - name: v1alpha2 - served: true - storage: true -status: - acceptedNames: - kind: "" - plural: "" - conditions: [] - storedVersions: [] diff --git a/legacy/charts/vela-core-legacy/crds/core.oam.dev_applications.yaml b/legacy/charts/vela-core-legacy/crds/core.oam.dev_applications.yaml index 6abea5cff..f2aa256df 100644 --- a/legacy/charts/vela-core-legacy/crds/core.oam.dev_applications.yaml +++ b/legacy/charts/vela-core-legacy/crds/core.oam.dev_applications.yaml @@ -410,7 +410,7 @@ spec: description: NewPodTemplateIdentifier is a string that uniquely represent the new pod template each workload type could use different ways to identify that so we cannot compare between resources type: string upgradedReadyReplicas: - description: UpgradedReplicas is the number of Pods upgraded by the rollout controller that have a Ready Condition. + description: UpgradedReadyReplicas is the number of Pods upgraded by the rollout controller that have a Ready Condition. format: int32 type: integer upgradedReplicas: diff --git a/legacy/charts/vela-core-legacy/crds/core.oam.dev_approllouts.yaml b/legacy/charts/vela-core-legacy/crds/core.oam.dev_approllouts.yaml index 305b8d0e0..e1b4dddcf 100644 --- a/legacy/charts/vela-core-legacy/crds/core.oam.dev_approllouts.yaml +++ b/legacy/charts/vela-core-legacy/crds/core.oam.dev_approllouts.yaml @@ -260,21 +260,21 @@ spec: format: int32 type: integer type: object - sourceApplicationName: - description: SourceApplicationName contains the name of the applicationConfiguration that we need to upgrade from. it can be empty only when it's the first time to deploy the application + sourceAppRevisionName: + description: SourceAppRevisionName contains the name of the applicationConfiguration that we need to upgrade from. it can be empty only when it's the first time to deploy the application type: string - targetApplicationName: + targetAppRevisionName: description: TargetAppRevisionName contains the name of the applicationConfiguration that we need to upgrade to. Here we use an applicationConfiguration as a revision of an application, thus the name alone is suffice type: string required: - rolloutPlan - - targetApplicationName + - targetAppRevisionName type: object status: description: AppRolloutStatus defines the observed state of AppRollout properties: - LastSourceAppName: - description: LastSourceAppName contains the name of the app that we need to upgrade from. We will restart the rollout if this is not the same as the spec + LastSourceAppRevision: + description: LastSourceAppRevision contains the name of the app that we need to upgrade from. We will restart the rollout if this is not the same as the spec type: string batchRollingState: description: BatchRollingState only meaningful when the Status is rolling @@ -314,8 +314,8 @@ spec: lastAppliedPodTemplateIdentifier: description: lastAppliedPodTemplateIdentifier is a string that uniquely represent the last pod template each workload type could use different ways to identify that so we cannot compare between resources We update this field only after a successful rollout type: string - lastTargetAppName: - description: LastTargetAppName contains the name of the app that we upgraded to We will restart the rollout if this is not the same as the spec + lastTargetAppRevision: + description: LastUpgradedTargetAppRevision contains the name of the app that we upgraded to We will restart the rollout if this is not the same as the spec type: string rollingState: description: RollingState is the Rollout State @@ -324,7 +324,7 @@ spec: description: NewPodTemplateIdentifier is a string that uniquely represent the new pod template each workload type could use different ways to identify that so we cannot compare between resources type: string upgradedReadyReplicas: - description: UpgradedReplicas is the number of Pods upgraded by the rollout controller that have a Ready Condition. + description: UpgradedReadyReplicas is the number of Pods upgraded by the rollout controller that have a Ready Condition. format: int32 type: integer upgradedReplicas: @@ -333,7 +333,7 @@ spec: type: integer required: - currentBatch - - lastTargetAppName + - lastTargetAppRevision - rollingState - upgradedReadyReplicas - upgradedReplicas diff --git a/legacy/charts/vela-core-legacy/crds/standard.oam.dev_rollouttraits.yaml b/legacy/charts/vela-core-legacy/crds/standard.oam.dev_rollouttraits.yaml index b0590596b..28e82a4b1 100644 --- a/legacy/charts/vela-core-legacy/crds/standard.oam.dev_rollouttraits.yaml +++ b/legacy/charts/vela-core-legacy/crds/standard.oam.dev_rollouttraits.yaml @@ -345,7 +345,7 @@ spec: description: NewPodTemplateIdentifier is a string that uniquely represent the new pod template each workload type could use different ways to identify that so we cannot compare between resources type: string upgradedReadyReplicas: - description: UpgradedReplicas is the number of Pods upgraded by the rollout controller that have a Ready Condition. + description: UpgradedReadyReplicas is the number of Pods upgraded by the rollout controller that have a Ready Condition. format: int32 type: integer upgradedReplicas: diff --git a/pkg/appfile/parser.go b/pkg/appfile/parser.go index 6e42e85fa..3482e4acc 100644 --- a/pkg/appfile/parser.go +++ b/pkg/appfile/parser.go @@ -142,7 +142,7 @@ func (p *Parser) GenerateAppFile(ctx context.Context, name string, app *v1alpha2 wds = append(wds, wd) } appfile.Workloads = wds - appfile.RevisionName, _ = utils.GetAppRevision(app) + appfile.RevisionName, _ = utils.GetAppNextRevision(app) return appfile, nil } diff --git a/pkg/controller/common/rollout/rollout_plan_controller.go b/pkg/controller/common/rollout/rollout_plan_controller.go index 6fcf12151..1b5d32912 100644 --- a/pkg/controller/common/rollout/rollout_plan_controller.go +++ b/pkg/controller/common/rollout/rollout_plan_controller.go @@ -44,7 +44,7 @@ func NewRolloutPlanController(client client.Client, parentController oam.Object, initializedRolloutStatus := rolloutStatus.DeepCopy() // use Mutation webhook? if len(initializedRolloutStatus.RollingState) == 0 { - initializedRolloutStatus.RollingState = v1alpha1.VerifyingState + initializedRolloutStatus.ResetStatus() } if len(initializedRolloutStatus.BatchRollingState) == 0 { initializedRolloutStatus.BatchRollingState = v1alpha1.BatchInitializingState @@ -69,12 +69,13 @@ func (r *Controller) Reconcile(ctx context.Context) (res reconcile.Result, statu } klog.InfoS("rollout status", "rollout state", r.rolloutStatus.RollingState, "batch rolling state", r.rolloutStatus.BatchRollingState, "current batch", r.rolloutStatus.CurrentBatch, "upgraded Replicas", - r.rolloutStatus.UpgradedReplicas) + r.rolloutStatus.UpgradedReplicas, "ready Replicas", r.rolloutStatus.UpgradedReadyReplicas) defer func() { - klog.InfoS("Finished reconciling rollout plan", "rollout state", status.RollingState, + klog.InfoS("Finished one round of reconciling rollout plan", "rollout state", status.RollingState, "batch rolling state", status.BatchRollingState, "current batch", status.CurrentBatch, - "upgraded Replicas", status.UpgradedReplicas, "reconcile result ", res) + "upgraded Replicas", status.UpgradedReplicas, "ready Replicas", r.rolloutStatus.UpgradedReadyReplicas, + "reconcile result ", res) }() status = r.rolloutStatus @@ -98,21 +99,32 @@ func (r *Controller) Reconcile(ctx context.Context) (res reconcile.Result, statu } switch r.rolloutStatus.RollingState { - case v1alpha1.VerifyingState: - r.rolloutStatus = workloadController.Verify(ctx) + case v1alpha1.VerifyingSpecState: + if r.rolloutStatus, err = workloadController.VerifySpec(ctx); err != nil { + // we can fail it right away, everything after initialized need to be finalized + r.rolloutStatus.RolloutFailed(err.Error()) + } else { + r.rolloutStatus.StateTransition(v1alpha1.RollingSpecVerifiedEvent) + } case v1alpha1.InitializingState: if err := r.initializeRollout(ctx); err == nil { - r.rolloutStatus = workloadController.Initialize(ctx) + if r.rolloutStatus, err = workloadController.Initialize(ctx); err == nil { + r.rolloutStatus.StateTransition(v1alpha1.RollingInitializedEvent) + } } case v1alpha1.RollingInBatchesState: r.reconcileBatchInRolling(ctx, workloadController) + case v1alpha1.RolloutFailingState: + if r.rolloutStatus, err = workloadController.Finalize(ctx, false); err == nil { + r.finalizeRollout(ctx) + } + case v1alpha1.FinalisingState: - r.rolloutStatus = workloadController.Finalize(ctx) - // if we are still going to finalize it - if r.rolloutStatus.RollingState == v1alpha1.FinalisingState { + if r.rolloutStatus, err = workloadController.Finalize(ctx, true); err == nil { + // if we are still going to finalize it r.finalizeRollout(ctx) } @@ -129,12 +141,12 @@ func (r *Controller) Reconcile(ctx context.Context) (res reconcile.Result, statu return res, r.rolloutStatus } -// reconcile logic when we are in the middle of rollout +// reconcile logic when we are in the middle of rollout, we have to go through finalizing state before succeed or fail func (r *Controller) reconcileBatchInRolling(ctx context.Context, workloadController workloads.WorkloadController) { if r.rolloutSpec.Paused { r.recorder.Event(r.parentController, event.Normal("Rollout paused", "Rollout paused")) - r.rolloutStatus.SetConditions(v1alpha1.NewPositiveCondition("Paused")) + r.rolloutStatus.SetConditions(v1alpha1.NewPositiveCondition(v1alpha1.BatchPaused)) return } @@ -144,7 +156,11 @@ func (r *Controller) reconcileBatchInRolling(ctx context.Context, workloadContro r.rolloutStatus.RolloutRetry(err.Error()) return } - r.validateRollingBatchStatus(int(replicas)) + + if err := r.validateRollingBatchStatus(int(replicas)); err != nil { + r.rolloutStatus.RolloutFailing(err.Error()) + return + } switch r.rolloutStatus.BatchRollingState { case v1alpha1.BatchInitializingState: @@ -152,17 +168,28 @@ func (r *Controller) reconcileBatchInRolling(ctx context.Context, workloadContro case v1alpha1.BatchInRollingState: // still rolling the batch, the batch rolling is not completed yet - r.rolloutStatus = workloadController.RolloutOneBatchPods(ctx) + if r.rolloutStatus, err = workloadController.RolloutOneBatchPods(ctx); err != nil { + r.rolloutStatus.RolloutFailing(err.Error()) + } else { + r.rolloutStatus.StateTransition(v1alpha1.RolloutOneBatchEvent) + } case v1alpha1.BatchVerifyingState: // verifying if the application is ready to roll // need to check if they meet the availability requirements in the rollout spec. // TODO: evaluate any metrics/analysis - r.rolloutStatus = workloadController.CheckOneBatchPods(ctx) + finished := false + if r.rolloutStatus, finished = workloadController.CheckOneBatchPods(ctx); finished { + r.rolloutStatus.StateTransition(v1alpha1.OneBatchAvailableEvent) + } case v1alpha1.BatchFinalizingState: - // all the pods in the are available - r.finalizeOneBatch(ctx) + // finalize one batch + if r.rolloutStatus, err = workloadController.FinalizeOneBatch(ctx); err != nil { + r.rolloutStatus.RolloutFailing(err.Error()) + } else { + r.finalizeOneBatch(ctx) + } case v1alpha1.BatchReadyState: // all the pods in the are upgraded and their state are ready @@ -175,15 +202,16 @@ func (r *Controller) reconcileBatchInRolling(ctx context.Context, workloadContro } // all the common initialize work before we rollout +// TODO: fail the rollout if the webhook call is explicitly rejected (through http status code) func (r *Controller) initializeRollout(ctx context.Context) error { // call the pre-rollout webhooks for _, rw := range r.rolloutSpec.RolloutWebhooks { if rw.Type == v1alpha1.InitializeRolloutHook { - err := callWebhook(ctx, r.parentController, v1alpha1.InitializingState, rw) + err := callWebhook(ctx, r.parentController, string(v1alpha1.InitializingState), rw) if err != nil { klog.ErrorS(err, "failed to invoke a webhook", "webhook name", rw.Name, "webhook end point", rw.URL) - r.rolloutStatus.RolloutFailed("failed to invoke a webhook") + r.rolloutStatus.RolloutRetry("failed to invoke a webhook") return err } klog.InfoS("successfully invoked a pre rollout webhook", "webhook name", rw.Name, "webhook end point", @@ -200,11 +228,11 @@ func (r *Controller) initializeOneBatch(ctx context.Context) { // call all the pre-batch rollout webhooks for _, rh := range rolloutHooks { if rh.Type == v1alpha1.PreBatchRolloutHook { - err := callWebhook(ctx, r.parentController, v1alpha1.InitializingState, rh) + err := callWebhook(ctx, r.parentController, string(v1alpha1.BatchInitializingState), rh) if err != nil { klog.ErrorS(err, "failed to invoke a webhook", "webhook name", rh.Name, "webhook end point", rh.URL) - r.rolloutStatus.RolloutFailed("failed to invoke a webhook") + r.rolloutStatus.RolloutRetry("failed to invoke a webhook") return } klog.InfoS("successfully invoked a pre batch webhook", "webhook name", rh.Name, "webhook end point", @@ -240,11 +268,11 @@ func (r *Controller) finalizeOneBatch(ctx context.Context) { // call all the post-batch rollout webhooks for _, rh := range rolloutHooks { if rh.Type == v1alpha1.PostBatchRolloutHook { - err := callWebhook(ctx, r.parentController, v1alpha1.FinalisingState, rh) + err := callWebhook(ctx, r.parentController, string(v1alpha1.BatchFinalizingState), rh) if err != nil { klog.ErrorS(err, "failed to invoke a webhook", "webhook name", rh.Name, "webhook end point", rh.URL) - r.rolloutStatus.RolloutFailed("failed to invoke a webhook") + r.rolloutStatus.RolloutRetry("failed to invoke a webhook") return } klog.InfoS("successfully invoked a post batch webhook", "webhook name", rh.Name, "webhook end point", @@ -256,14 +284,14 @@ func (r *Controller) finalizeOneBatch(ctx context.Context) { if currentBatch == len(r.rolloutSpec.RolloutBatches)-1 { // this is the last batch, mark the rollout finalized r.rolloutStatus.StateTransition(v1alpha1.AllBatchFinishedEvent) - r.recorder.Event(r.parentController, event.Normal("all batches rolled out", + r.recorder.Event(r.parentController, event.Normal("All batches rolled out", fmt.Sprintf("upgrade pod = %d, total ready pod = %d", r.rolloutStatus.UpgradedReplicas, r.rolloutStatus.UpgradedReadyReplicas))) } else { klog.InfoS("finished one batch rollout", "current batch", r.rolloutStatus.CurrentBatch) // th - r.recorder.Event(r.parentController, event.Normal("Batch finalized", - fmt.Sprintf("the batch num = %d is ready", r.rolloutStatus.CurrentBatch))) + r.recorder.Event(r.parentController, event.Normal("Batch Finalized", + fmt.Sprintf("Batch %d is finalized and ready to go", r.rolloutStatus.CurrentBatch))) r.rolloutStatus.StateTransition(v1alpha1.FinishedOneBatchEvent) } } @@ -273,11 +301,12 @@ func (r *Controller) finalizeRollout(ctx context.Context) { // call the post-rollout webhooks for _, rw := range r.rolloutSpec.RolloutWebhooks { if rw.Type == v1alpha1.FinalizeRolloutHook { - err := callWebhook(ctx, r.parentController, v1alpha1.FinalisingState, rw) + err := callWebhook(ctx, r.parentController, string(r.rolloutStatus.RollingState), rw) if err != nil { klog.ErrorS(err, "failed to invoke a webhook", "webhook name", rw.Name, "webhook end point", rw.URL) - r.rolloutStatus.RolloutFailed("failed to invoke a post rollout webhook") + r.rolloutStatus.RolloutRetry("failed to invoke a post rollout webhook") + return } klog.InfoS("successfully invoked a post rollout webhook", "webhook name", rw.Name, "webhook end point", rw.URL) @@ -287,14 +316,15 @@ func (r *Controller) finalizeRollout(ctx context.Context) { } // verify that the upgradedReplicas and current batch in the status are valid according to the spec -func (r *Controller) validateRollingBatchStatus(totalSize int) bool { +func (r *Controller) validateRollingBatchStatus(totalSize int) error { status := r.rolloutStatus spec := r.rolloutSpec podCount := 0 if spec.BatchPartition != nil && *spec.BatchPartition < status.CurrentBatch { - klog.ErrorS(fmt.Errorf("the current batch value in the status is greater than the batch partition"), - "batch partition", *spec.BatchPartition, "current batch status", status.CurrentBatch) - return false + err := fmt.Errorf("the current batch value in the status is greater than the batch partition") + klog.ErrorS(err, "we have moved past the user defined partition", "user specified batch partition", + *spec.BatchPartition, "current batch we are working on", status.CurrentBatch) + return err } upgradedReplicas := int(status.UpgradedReplicas) currentBatch := int(status.CurrentBatch) @@ -309,9 +339,9 @@ func (r *Controller) validateRollingBatchStatus(totalSize int) bool { } // the recorded number should be at least as much as the all the pods before the current batch if podCount > upgradedReplicas { - klog.ErrorS(fmt.Errorf("the upgraded replica in the status is too small"), "upgraded num status", - upgradedReplicas, "pods in all the previous batches", podCount) - return false + err := fmt.Errorf("the upgraded replica in the status is less than all the pods in the previous batch") + klog.ErrorS(err, "upgraded num status", upgradedReplicas, "pods in all the previous batches", podCount) + return err } // calculate the upper bound with the current batch if currentBatch == len(spec.RolloutBatches)-1 { @@ -324,11 +354,11 @@ func (r *Controller) validateRollingBatchStatus(totalSize int) bool { } // the recorded number should be not as much as the all the pods including the active batch if podCount < upgradedReplicas { - klog.ErrorS(fmt.Errorf("the upgraded replica in the status is too large"), "upgraded num status", - upgradedReplicas, "pods in the batches including the current batch", podCount) - return false + err := fmt.Errorf("the upgraded replica in the status is greater than all the pods in the current batch") + klog.ErrorS(err, "upgraded num status", upgradedReplicas, "pods in the batches including the current batch", podCount) + return err } - return true + return nil } // GetWorkloadController pick the right workload controller to work on the workload diff --git a/pkg/controller/common/rollout/rollout_webhook.go b/pkg/controller/common/rollout/rollout_webhook.go index 9b3a32f9a..3708fe4c7 100644 --- a/pkg/controller/common/rollout/rollout_webhook.go +++ b/pkg/controller/common/rollout/rollout_webhook.go @@ -74,7 +74,7 @@ func makeHTTPRequest(ctx context.Context, webhookEndPoint, method string, payloa // callWebhook does a HTTP POST to an external service and // returns an error if the response status code is non-2xx -func callWebhook(ctx context.Context, resource klog.KMetadata, phase v1alpha1.RollingState, rw v1alpha1.RolloutWebhook) error { +func callWebhook(ctx context.Context, resource klog.KMetadata, phase string, rw v1alpha1.RolloutWebhook) error { payload := v1alpha1.RolloutWebhookPayload{ Name: resource.GetName(), Namespace: resource.GetNamespace(), diff --git a/pkg/controller/common/rollout/rollout_webhook_test.go b/pkg/controller/common/rollout/rollout_webhook_test.go index 48ad8dea4..90076268c 100644 --- a/pkg/controller/common/rollout/rollout_webhook_test.go +++ b/pkg/controller/common/rollout/rollout_webhook_test.go @@ -139,7 +139,7 @@ func Test_callWebhook(t *testing.T) { } type args struct { resource oam.Object - phase v1alpha1.RollingState + phase string rw v1alpha1.RolloutWebhook } tests := map[string]struct { @@ -151,7 +151,7 @@ func Test_callWebhook(t *testing.T) { returnedStatusCode: http.StatusAccepted, args: args{ resource: &res, - phase: v1alpha1.RollingInBatchesState, + phase: string(v1alpha1.RollingInBatchesState), rw: v1alpha1.RolloutWebhook{ URL: url, }, @@ -162,7 +162,7 @@ func Test_callWebhook(t *testing.T) { returnedStatusCode: http.StatusAlreadyReported, args: args{ resource: &res, - phase: v1alpha1.RollingInBatchesState, + phase: string(v1alpha1.RollingInBatchesState), rw: v1alpha1.RolloutWebhook{ URL: url, }, @@ -173,7 +173,7 @@ func Test_callWebhook(t *testing.T) { returnedStatusCode: http.StatusAlreadyReported, args: args{ resource: &res, - phase: v1alpha1.RollingInBatchesState, + phase: string(v1alpha1.RollingInBatchesState), rw: v1alpha1.RolloutWebhook{ URL: url, ExpectedStatus: []int{http.StatusNoContent, http.StatusAlreadyReported}, @@ -185,7 +185,7 @@ func Test_callWebhook(t *testing.T) { returnedStatusCode: http.StatusGone, args: args{ resource: &res, - phase: v1alpha1.RolloutFailedState, + phase: string(v1alpha1.RolloutFailedState), rw: v1alpha1.RolloutWebhook{ URL: url, ExpectedStatus: []int{http.StatusNoContent, http.StatusAlreadyReported}, diff --git a/pkg/controller/common/rollout/workloads/cloneset_controller.go b/pkg/controller/common/rollout/workloads/cloneset_controller.go index b71e63926..a53c26e02 100644 --- a/pkg/controller/common/rollout/workloads/cloneset_controller.go +++ b/pkg/controller/common/rollout/workloads/cloneset_controller.go @@ -7,11 +7,13 @@ import ( "github.com/crossplane/crossplane-runtime/pkg/event" kruise "github.com/openkruise/kruise-api/apps/v1alpha1" apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/util/intstr" "k8s.io/klog/v2" "sigs.k8s.io/controller-runtime/pkg/client" + "github.com/oam-dev/kubevela/apis/core.oam.dev/v1alpha2" "github.com/oam-dev/kubevela/apis/standard.oam.dev/v1alpha1" "github.com/oam-dev/kubevela/pkg/controller/common" "github.com/oam-dev/kubevela/pkg/oam" @@ -57,10 +59,9 @@ func (c *CloneSetController) Size(ctx context.Context) (int32, error) { return *c.cloneSet.Spec.Replicas, nil } -// Verify verifies that the target rollout resource is consistent with the rollout spec -func (c *CloneSetController) Verify(ctx context.Context) (status *v1alpha1.RolloutStatus) { +// VerifySpec verifies that the target rollout resource is consistent with the rollout spec +func (c *CloneSetController) VerifySpec(ctx context.Context) (*v1alpha1.RolloutStatus, error) { var verifyErr error - status = c.rolloutStatus defer func() { if verifyErr != nil { @@ -70,15 +71,15 @@ func (c *CloneSetController) Verify(ctx context.Context) (status *v1alpha1.Rollo }() if verifyErr = c.fetchCloneSet(ctx); verifyErr != nil { - return + // do not fail the rollout because we can't get the resource + c.rolloutStatus.RolloutRetry(verifyErr.Error()) + return c.rolloutStatus, nil } - // make sure that there are changes in the pod template + // make sure that the updateRevision is different from what we have already done targetHash := c.cloneSet.Status.UpdateRevision if targetHash == c.rolloutStatus.LastAppliedPodTemplateIdentifier { - verifyErr = fmt.Errorf("there is no difference between the source and target, hash = %s", targetHash) - c.rolloutStatus.RolloutFailed(verifyErr.Error()) - return + return nil, fmt.Errorf("there is no difference between the source and target, hash = %s", targetHash) } // record the new pod template hash c.rolloutStatus.NewPodTemplateIdentifier = targetHash @@ -88,32 +89,34 @@ func (c *CloneSetController) Verify(ctx context.Context) (status *v1alpha1.Rollo // check if the rollout batch replicas added up to the Cloneset replicas if verifyErr = c.verifyRolloutBatchReplicaValue(totalReplicas); verifyErr != nil { - c.rolloutStatus.RolloutFailed(verifyErr.Error()) - return + return nil, verifyErr } if !c.cloneSet.Spec.UpdateStrategy.Paused { verifyErr = fmt.Errorf("the cloneset %s is in the middle of updating, need to be paused first", c.cloneSet.GetName()) c.rolloutStatus.RolloutRetry(verifyErr.Error()) - return + return c.rolloutStatus, nil } // mark the rollout verified - c.recorder.Event(c.parentController, event.Normal("Verified", + c.recorder.Event(c.parentController, event.Normal("Rollout Verified", "Rollout spec and the CloneSet resource are verified")) - c.rolloutStatus.StateTransition(v1alpha1.RollingSpecVerifiedEvent) - return c.rolloutStatus + return c.rolloutStatus, nil } -// Initialize makes sure that -func (c *CloneSetController) Initialize(ctx context.Context) *v1alpha1.RolloutStatus { +// Initialize makes sure that the cloneset is under our control +func (c *CloneSetController) Initialize(ctx context.Context) (*v1alpha1.RolloutStatus, error) { totalReplicas, err := c.Size(ctx) if err != nil { - return c.rolloutStatus + c.rolloutStatus.RolloutRetry(err.Error()) + return c.rolloutStatus, nil } - // kick start the update and start from every pod in the old version + // add the parent controller to the owner of the cloneset + // before kicking start the update and start from every pod in the old version clonePatch := client.MergeFrom(c.cloneSet.DeepCopyObject()) + ref := metav1.NewControllerRef(c.parentController, v1alpha2.AppRolloutKindVersionKind) + c.cloneSet.SetOwnerReferences(append(c.cloneSet.GetOwnerReferences(), *ref)) c.cloneSet.Spec.UpdateStrategy.Paused = false c.cloneSet.Spec.UpdateStrategy.Partition = &intstr.IntOrString{Type: intstr.Int, IntVal: totalReplicas} @@ -121,17 +124,16 @@ func (c *CloneSetController) Initialize(ctx context.Context) *v1alpha1.RolloutSt if err := c.client.Patch(ctx, c.cloneSet, clonePatch, client.FieldOwner(c.parentController.GetUID())); err != nil { c.recorder.Event(c.parentController, event.Warning("Failed to the start the cloneset update", err)) c.rolloutStatus.RolloutRetry(err.Error()) - return c.rolloutStatus + return c.rolloutStatus, err } // mark the rollout initialized - c.recorder.Event(c.parentController, event.Normal("Initialized", "Rollout resource are initialized")) - c.rolloutStatus.StateTransition(v1alpha1.RollingInitializedEvent) - return c.rolloutStatus + c.recorder.Event(c.parentController, event.Normal("Rollout Initialized", "Rollout resource are initialized")) + return c.rolloutStatus, nil } // RolloutOneBatchPods calculates the number of pods we can upgrade once according to the rollout spec // and then set the partition accordingly -func (c *CloneSetController) RolloutOneBatchPods(ctx context.Context) *v1alpha1.RolloutStatus { +func (c *CloneSetController) RolloutOneBatchPods(ctx context.Context) (*v1alpha1.RolloutStatus, error) { // calculate what's the total pods that should be upgraded given the currentBatch in the status cloneSetSize, _ := c.Size(ctx) newPodTarget := c.calculateNewPodTarget(int(cloneSetSize)) @@ -143,19 +145,18 @@ func (c *CloneSetController) RolloutOneBatchPods(ctx context.Context) *v1alpha1. if err := c.client.Patch(ctx, c.cloneSet, clonePatch, client.FieldOwner(c.parentController.GetUID())); err != nil { c.recorder.Event(c.parentController, event.Warning("Failed to update the cloneset to upgrade", err)) c.rolloutStatus.RolloutRetry(err.Error()) - return c.rolloutStatus + return c.rolloutStatus, nil } // record the upgrade klog.InfoS("upgraded one batch", "current batch", c.rolloutStatus.CurrentBatch) - c.recorder.Event(c.parentController, event.Normal("Rollout", - fmt.Sprintf("upgraded the batch num = %d", c.rolloutStatus.CurrentBatch))) - c.rolloutStatus.StateTransition(v1alpha1.BatchRolloutVerifyingEvent) + c.recorder.Event(c.parentController, event.Normal("Batch Rollout", + fmt.Sprintf("Submitted upgrade quest for batch %d", c.rolloutStatus.CurrentBatch))) c.rolloutStatus.UpgradedReplicas = int32(newPodTarget) - return c.rolloutStatus + return c.rolloutStatus, nil } -// CheckOneBatchPods checks to see if the pods are all available according to -func (c *CloneSetController) CheckOneBatchPods(ctx context.Context) *v1alpha1.RolloutStatus { +// CheckOneBatchPods checks to see if the pods are all available according to the rollout plan +func (c *CloneSetController) CheckOneBatchPods(ctx context.Context) (*v1alpha1.RolloutStatus, bool) { cloneSetSize, _ := c.Size(ctx) newPodTarget := c.calculateNewPodTarget(int(cloneSetSize)) // get the number of ready pod from cloneset @@ -173,30 +174,48 @@ func (c *CloneSetController) CheckOneBatchPods(ctx context.Context) *v1alpha1.Ro // record the successful upgrade klog.InfoS("pods are ready", "current batch", currentBatch) c.recorder.Event(c.parentController, event.Normal("Batch Available", - fmt.Sprintf("the batch num = %d is available", c.rolloutStatus.CurrentBatch))) - c.rolloutStatus.StateTransition(v1alpha1.OneBatchAvailableEvent) + fmt.Sprintf("Batch %d is available", c.rolloutStatus.CurrentBatch))) c.rolloutStatus.LastAppliedPodTemplateIdentifier = c.rolloutStatus.NewPodTemplateIdentifier - } else { - // continue to verify - klog.V(common.LogDebug).InfoS("the batch is not ready yet", "current batch", currentBatch) - c.rolloutStatus.RolloutRetry("the batch is not ready yet") + return c.rolloutStatus, true } - return c.rolloutStatus + // continue to verify + klog.InfoS("the batch is not ready yet", "current batch", currentBatch) + c.rolloutStatus.RolloutRetry("the batch is not ready yet") + return c.rolloutStatus, false } // FinalizeOneBatch makes sure that the rollout status are updated correctly -func (c *CloneSetController) FinalizeOneBatch(ctx context.Context) *v1alpha1.RolloutStatus { - // nothing to do for now - return c.rolloutStatus +func (c *CloneSetController) FinalizeOneBatch(ctx context.Context) (*v1alpha1.RolloutStatus, error) { + // nothing to do for cloneset for now + return c.rolloutStatus, nil } // Finalize makes sure the Cloneset is all upgraded -func (c *CloneSetController) Finalize(ctx context.Context) *v1alpha1.RolloutStatus { - if c.fetchCloneSet(ctx) != nil { - return c.rolloutStatus +func (c *CloneSetController) Finalize(ctx context.Context, succeed bool) (*v1alpha1.RolloutStatus, error) { + if err := c.fetchCloneSet(ctx); err != nil { + c.rolloutStatus.RolloutRetry(err.Error()) + return c.rolloutStatus, nil } - - return c.rolloutStatus + clonePatch := client.MergeFrom(c.cloneSet.DeepCopyObject()) + // remove the parent controller from the resources' owner list + var newOwnerList []metav1.OwnerReference + for _, owner := range c.cloneSet.GetOwnerReferences() { + if owner.Kind == v1alpha2.AppRolloutKind && owner.APIVersion == v1alpha2.SchemeGroupVersion.String() { + continue + } + newOwnerList = append(newOwnerList, owner) + } + c.cloneSet.SetOwnerReferences(newOwnerList) + // patch the CloneSet + if err := c.client.Patch(ctx, c.cloneSet, clonePatch, client.FieldOwner(c.parentController.GetUID())); err != nil { + c.recorder.Event(c.parentController, event.Warning("Failed to the finalize the cloneset", err)) + c.rolloutStatus.RolloutRetry(err.Error()) + return c.rolloutStatus, err + } + // mark the resource finalized + c.recorder.Event(c.parentController, event.Normal("Rollout Finalized", + fmt.Sprintf("Rollout resource are finalized, succeed := %t", succeed))) + return c.rolloutStatus, nil } /* -------------------- @@ -225,7 +244,6 @@ func (c *CloneSetController) fetchCloneSet(ctx context.Context) error { if !apierrors.IsNotFound(err) { c.recorder.Event(c.parentController, event.Warning("Failed to get the Cloneset", err)) } - c.rolloutStatus.RolloutRetry(err.Error()) return err } c.cloneSet = &workload diff --git a/pkg/controller/common/rollout/workloads/controller.go b/pkg/controller/common/rollout/workloads/controller.go index 99f9da3fb..fbefd4848 100644 --- a/pkg/controller/common/rollout/workloads/controller.go +++ b/pkg/controller/common/rollout/workloads/controller.go @@ -11,31 +11,31 @@ type WorkloadController interface { // Size returns the total number of pods in the resources according to the spec Size(ctx context.Context) (int32, error) - // Verify makes sure that the resources can be upgraded according to the rollout plan + // VerifySpec makes sure that the resources can be upgraded according to the rollout plan // it returns new rollout status - Verify(ctx context.Context) *v1alpha1.RolloutStatus + VerifySpec(ctx context.Context) (*v1alpha1.RolloutStatus, error) - // Initialize make sure that the resource is ready to be upgraded. - Initialize(ctx context.Context) *v1alpha1.RolloutStatus + // Initialize make sure that the resource is ready to be upgraded + // this function is tasked to change rollout status + Initialize(ctx context.Context) (*v1alpha1.RolloutStatus, error) // RolloutOneBatchPods tries to upgrade pods in the resources following the rollout plan // it will upgrade as many pods as the rollout plan allows at once, the routine does not block on any operations. // Instead, we rely on the go-client's requeue mechanism to drive this towards the spec goal // it returns the number of pods upgraded in this round - RolloutOneBatchPods(ctx context.Context) *v1alpha1.RolloutStatus + RolloutOneBatchPods(ctx context.Context) (*v1alpha1.RolloutStatus, error) - // CheckOneBatchPods tries to upgrade pods in the resources following the rollout plan - // it will upgrade as many pods as the rollout plan allows at once, the routine does not block on any operations. - // Instead, we rely on the go-client's requeue mechanism to drive this towards the spec goal - // it returns the number of pods upgraded in this round - CheckOneBatchPods(ctx context.Context) *v1alpha1.RolloutStatus + // CheckOneBatchPods checks how many pods are ready to serve requests in the current batch + // it returns whether the number of pods upgraded in this round satisfies the rollout plan + CheckOneBatchPods(ctx context.Context) (*v1alpha1.RolloutStatus, bool) // FinalizeOneBatch makes sure that the rollout can start the next batch // it also needs to handle the corner cases around the very last batch - FinalizeOneBatch(ctx context.Context) *v1alpha1.RolloutStatus + FinalizeOneBatch(ctx context.Context) (*v1alpha1.RolloutStatus, error) // Finalize makes sure the resources are in a good final state. + // It might depend on if the rollout succeeded or not. // For example, we may remove the source object to prevent scalar traits to ever work - // and we will call the finalize rollout web hooks - Finalize(ctx context.Context) *v1alpha1.RolloutStatus + // and the finalize rollout web hooks will be called after this call succeeds + Finalize(ctx context.Context, succeed bool) (*v1alpha1.RolloutStatus, error) } diff --git a/pkg/controller/core.oam.dev/v1alpha2/application/application_controller.go b/pkg/controller/core.oam.dev/v1alpha2/application/application_controller.go index a4fe31356..3deb37e91 100644 --- a/pkg/controller/core.oam.dev/v1alpha2/application/application_controller.go +++ b/pkg/controller/core.oam.dev/v1alpha2/application/application_controller.go @@ -166,7 +166,7 @@ func (r *Reconciler) UpdateStatus(ctx context.Context, app *v1alpha2.Application }) } -// Setup adds a controller that reconciles ApplicationDeployment. +// Setup adds a controller that reconciles AppRollout. func Setup(mgr ctrl.Manager, _ core.Args, _ logging.Logger) error { dm, err := discoverymapper.New(mgr.GetConfig()) if err != nil { diff --git a/pkg/controller/core.oam.dev/v1alpha2/application/apply.go b/pkg/controller/core.oam.dev/v1alpha2/application/apply.go index e0bfb3880..500a82dd9 100644 --- a/pkg/controller/core.oam.dev/v1alpha2/application/apply.go +++ b/pkg/controller/core.oam.dev/v1alpha2/application/apply.go @@ -246,45 +246,62 @@ func (h *appHandler) createOrUpdateComponent(ctx context.Context, comp *v1alpha2 // it will create a new revision if the appConfig is different from the existing one func (h *appHandler) createOrUpdateAppConfig(ctx context.Context, appConfig *v1alpha2.ApplicationConfiguration) error { var curAppConfig v1alpha2.ApplicationConfiguration - // initialized - if h.app.Status.LatestRevision == nil { - revisionName := utils.ConstructRevisionName(h.app.Name, 0) - h.app.Status.LatestRevision = &v1alpha2.Revision{ - Name: revisionName, - Revision: 0, - } - } // compute a hash value of the appConfig spec specHash, err := hashstructure.Hash(appConfig.Spec, hashstructure.FormatV2, nil) if err != nil { return err } + specHashLabel := strconv.FormatUint(specHash, 16) appConfig.SetLabels(oamutil.MergeMapOverrideWithDst(appConfig.GetLabels(), map[string]string{ - oam.LabelAppConfigHash: strconv.FormatUint(specHash, 16), + oam.LabelAppConfigHash: specHashLabel, })) - + // first time ever + if h.app.Status.LatestRevision == nil { + h.logger.Info("create the first appConfig", "application name", h.app.GetName()) + return h.createNewAppConfig(ctx, appConfig) + } // get the AC with the last revision name stored in the application key := ctypes.NamespacedName{Name: h.app.Status.LatestRevision.Name, Namespace: h.app.Namespace} if err := h.r.Get(ctx, key, &curAppConfig); err != nil { if !apierrors.IsNotFound(err) { return err } - h.logger.Info("create a new appConfig", "application name", h.app.GetName(), - "latest revision that does not exist", h.app.Status.LatestRevision.Name) + h.logger.Info("create a new appConfig that the last creation failed to create", "application name", + h.app.GetName(), "latest revision that does not exist", h.app.Status.LatestRevision.Name) return h.createNewAppConfig(ctx, appConfig) } - // check if the old AC has the same HASH value + // check if the old AC has the same HASH value first, just replace lable/annotation if that's the case if curAppConfig.GetLabels()[oam.LabelAppConfigHash] == appConfig.GetLabels()[oam.LabelAppConfigHash] { // Just to be safe that it's not because of a random Hash collision if apiequality.Semantic.DeepEqual(&curAppConfig.Spec, &appConfig.Spec) { // same spec, no need to create another AC, still need to update the AC to apply label/annotation + h.logger.Info("update latest application config", "application name", + h.app.GetName(), "latest revision to be updated", h.app.Status.LatestRevision.Name) oamutil.PassLabelAndAnnotation(appConfig, &curAppConfig) return h.r.Update(ctx, &curAppConfig) } h.logger.Info("encountered a different app spec with same hash", "current spec", curAppConfig.Spec, "new appConfig spec", appConfig.Spec) } + nextRevisionName, _ := utils.GetAppNextRevision(h.app) + if nextRevisionName == h.app.Status.LatestRevision.Name { + // we don't need to create another appConfig + h.logger.Info("replace the existing application config", "application name", + h.app.GetName(), "latest revision to be replaced", h.app.Status.LatestRevision.Name, "new hash value", specHashLabel) + appConfig.ResourceVersion = curAppConfig.ResourceVersion + appConfig.Name = nextRevisionName + h.app.Status.LatestRevision.RevisionHash = specHashLabel + + // record that last appConfig we created first in the app's status + // make sure that we persist the latest revision first + if err := h.r.UpdateStatus(ctx, h.app); err != nil { + return err + } + // it ok if the update fails, we will update again in the next loop + return h.r.Update(ctx, appConfig) + } + // create the next version h.logger.Info("create a new appConfig", "application name", h.app.GetName(), "latest revision that does not match the appConfig", h.app.Status.LatestRevision.Name) @@ -293,7 +310,7 @@ func (h *appHandler) createOrUpdateAppConfig(ctx context.Context, appConfig *v1a // create a new appConfig given the latest revision in the application func (h *appHandler) createNewAppConfig(ctx context.Context, appConfig *v1alpha2.ApplicationConfiguration) error { - revisionName, nextRevision := utils.GetAppRevision(h.app) + revisionName, nextRevision := utils.GetAppNextRevision(h.app) // update the next revision in the application's status h.app.Status.LatestRevision = &v1alpha2.Revision{ Name: revisionName, diff --git a/pkg/controller/core.oam.dev/v1alpha2/application/apply_test.go b/pkg/controller/core.oam.dev/v1alpha2/application/apply_test.go index 76a1efb36..9dce7c59b 100644 --- a/pkg/controller/core.oam.dev/v1alpha2/application/apply_test.go +++ b/pkg/controller/core.oam.dev/v1alpha2/application/apply_test.go @@ -25,12 +25,7 @@ import ( var _ = Describe("Test Application apply", func() { var handler appHandler - app := &v1alpha2.Application{ - TypeMeta: metav1.TypeMeta{ - Kind: "Application", - APIVersion: "core.oam.dev/v1alpha2", - }, - } + var app *v1alpha2.Application var appConfig *v1alpha2.ApplicationConfiguration var namespaceName string var componentName string @@ -44,6 +39,12 @@ var _ = Describe("Test Application apply", func() { Name: namespaceName, }, } + app = &v1alpha2.Application{ + TypeMeta: metav1.TypeMeta{ + Kind: "Application", + APIVersion: "core.oam.dev/v1alpha2", + }, + } app.Namespace = namespaceName app.Spec = v1alpha2.ApplicationSpec{ Components: []v1alpha2.ApplicationComponent{{ @@ -91,11 +92,101 @@ var _ = Describe("Test Application apply", func() { Expect(k8sClient.Delete(context.TODO(), &ns)).Should(Succeed()) }) - It("Test creating applicationConfiguration revision", func() { + It("Test creating applicationConfiguration without revisions", func() { + ctx := context.TODO() + By("[TEST] Test application without AC revision") + app.Name = "test-revision" + annoKey1 := "testKey1" + annoKey2 := "testKey2" + Expect(handler.r.Create(ctx, app)).NotTo(HaveOccurred()) + // Test create or update + appConfig := appConfig.DeepCopy() + appConfig.SetAnnotations(map[string]string{annoKey1: strconv.FormatBool(true)}) + err := handler.createOrUpdateAppConfig(ctx, appConfig) + Expect(err).ToNot(HaveOccurred()) + // verify + curApp := &v1alpha2.Application{} + Eventually( + func() error { + return handler.r.Get(ctx, + types.NamespacedName{Namespace: ns.Name, Name: app.Name}, + curApp) + }, + time.Second*10, time.Millisecond*500).Should(BeNil()) + + By("Verify that the application status has the lastRevision name ") + Expect(curApp.Status.LatestRevision.Revision).Should(BeEquivalentTo(1)) + Expect(curApp.Status.LatestRevision.Name).Should(Equal(utils.ConstructRevisionName(app.Name, 1))) + curAC := &v1alpha2.ApplicationConfiguration{} + Expect(handler.r.Get(ctx, + types.NamespacedName{Namespace: ns.Name, Name: utils.ConstructRevisionName(app.Name, 1)}, + curAC)).NotTo(HaveOccurred()) + // check that the annotation/labels are correctly applied + Expect(curAC.GetAnnotations()[annoKey1]).ShouldNot(BeEmpty()) + Expect(curAC.GetLabels()[oam.LabelAppConfigHash]).ShouldNot(BeEmpty()) + hashValue := curAC.GetLabels()[oam.LabelAppConfigHash] + Expect(hashValue).ShouldNot(BeEmpty()) + Expect(curApp.Status.LatestRevision.RevisionHash).Should(Equal(hashValue)) + + By("[TEST] Modify the applicationConfiguration spec, should not lead to a new AC") + // update the spec of the AC which should lead to a new AC being created + appConfig.Spec.Components[0].Traits = []v1alpha2.ComponentTrait{ + { + Trait: runtime.RawExtension{ + Object: &v1alpha1.MetricsTrait{ + TypeMeta: metav1.TypeMeta{ + Kind: "MetricsTrait", + APIVersion: "standard.oam.dev/v1alpha1", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: app.Name, + Namespace: namespaceName, + }, + }, + }, + }, + } + // this should not lead to a new AC but replace it with a completely different one + // the entire annotation should be changed too + appConfig.SetAnnotations(map[string]string{annoKey2: strconv.FormatBool(true)}) + + err = handler.createOrUpdateAppConfig(ctx, appConfig) + Expect(err).ToNot(HaveOccurred()) + // verify the app latest revision is not changed + Eventually( + func() error { + return handler.r.Get(ctx, + types.NamespacedName{Namespace: ns.Name, Name: app.Name}, + curApp) + }, + time.Second*10, time.Millisecond*500).Should(BeNil()) + + By("Verify that the lastest revision does not change, the hashvalue should though") + Expect(curApp.Status.LatestRevision.Revision).Should(BeEquivalentTo(1)) + Expect(curApp.Status.LatestRevision.Name).Should(Equal(utils.ConstructRevisionName(app.Name, 1))) + newHash := curApp.Status.LatestRevision.RevisionHash + Expect(newHash).ShouldNot(Equal(hashValue)) + // check that no new appConfig created + Expect(handler.r.Get(ctx, types.NamespacedName{Namespace: ns.Name, + Name: utils.ConstructRevisionName(app.Name, 2)}, curAC)).Should(&oamutil.NotFoundMatcher{}) + + // check that the new app annotation exist and the hash value has changed + updatedAC := v1alpha2.ApplicationConfiguration{} + Expect(handler.r.Get(ctx, types.NamespacedName{Namespace: ns.Name, Name: curApp.Status.LatestRevision.Name}, + &updatedAC)).Should(Succeed()) + Expect(updatedAC.GetAnnotations()[annoKey1]).Should(BeEmpty()) + Expect(updatedAC.GetAnnotations()[annoKey2]).ShouldNot(BeEmpty()) + Expect(updatedAC.GetLabels()[oam.LabelAppConfigHash]).ShouldNot(BeEmpty()) + Expect(updatedAC.GetLabels()[oam.LabelAppConfigHash]).Should(Equal(newHash)) + }) + + It("Test creating applicationConfiguration revisions", func() { ctx := context.TODO() By("[TEST] Test application without AC revision") app.Name = "test-revision" + // we want the app to generate new AC revision + app.SetAnnotations(map[string]string{oam.AnnotationAppRollout: strconv.FormatBool(true)}) Expect(handler.r.Create(ctx, app)).NotTo(HaveOccurred()) // Test create or update err := handler.createOrUpdateAppConfig(ctx, appConfig.DeepCopy()) diff --git a/pkg/controller/core.oam.dev/v1alpha2/applicationconfiguration/applicationconfiguration.go b/pkg/controller/core.oam.dev/v1alpha2/applicationconfiguration/applicationconfiguration.go index 558ccfcde..0dbfb61ef 100644 --- a/pkg/controller/core.oam.dev/v1alpha2/applicationconfiguration/applicationconfiguration.go +++ b/pkg/controller/core.oam.dev/v1alpha2/applicationconfiguration/applicationconfiguration.go @@ -69,6 +69,7 @@ const ( // Reconcile event reasons. const ( + reasonRevision = "ACRevision" reasonRenderComponents = "RenderedComponents" reasonExecutePrehook = "ExecutePrehook" reasonExecutePosthook = "ExecutePosthook" @@ -249,6 +250,8 @@ func (r *OAMApplicationReconciler) Reconcile(req reconcile.Request) (result reco // make sure this is the last functional defer function to be called defer func() { + // always update ac status and set the error + returnErr = errors.Wrap(r.UpdateStatus(ctx, ac), errUpdateAppConfigStatus) // Make sure if error occurs, reconcile will not happen too frequency if returnErr != nil { result.RequeueAfter = 0 @@ -270,7 +273,6 @@ func (r *OAMApplicationReconciler) Reconcile(req reconcile.Request) (result reco } r.record.Event(ac, event.Normal(reasonExecutePosthook, "Successfully executed a posthook", "posthook name", name)) } - returnErr = errors.Wrap(r.UpdateStatus(ctx, ac), errUpdateAppConfigStatus) }() // execute the prehooks @@ -280,19 +282,32 @@ func (r *OAMApplicationReconciler) Reconcile(req reconcile.Request) (result reco log.Debug("Failed to execute pre-hooks", "hook name", name, "error", err, "requeue-after", result.RequeueAfter) r.record.Event(ac, event.Warning(reasonCannotExecutePrehooks, err)) ac.SetConditions(v1alpha1.ReconcileError(errors.Wrap(err, errExecutePrehooks))) - return result, errors.Wrap(r.UpdateStatus(ctx, ac), errUpdateAppConfigStatus) + return result, nil } r.record.Event(ac, event.Normal(reasonExecutePrehook, "Successfully executed a prehook", "prehook name ", name)) } log = log.WithValues("uid", ac.GetUID(), "version", ac.GetResourceVersion()) + // we have special logics for application generated applicationConfiguration + if isControlledByApp(ac) { + if ac.GetAnnotations()[oam.AnnotationAppRevision] == strconv.FormatBool(true) { + msg := "Encounter an application revision, no need to reconcile" + log.Info(msg) + r.record.Event(ac, event.Normal(reasonRevision, msg)) + ac.SetConditions(v1alpha1.Unavailable()) + ac.Status.RollingStatus = v1alpha2.InactiveAfterRollingCompleted + // TODO: GC the traits/workloads + return reconcile.Result{}, nil + } + } + workloads, depStatus, err := r.components.Render(ctx, ac) if err != nil { log.Info("Cannot render components", "error", err) r.record.Event(ac, event.Warning(reasonCannotRenderComponents, err)) ac.SetConditions(v1alpha1.ReconcileError(errors.Wrap(err, errRenderComponents))) - return reconcile.Result{}, errors.Wrap(r.UpdateStatus(ctx, ac), errUpdateAppConfigStatus) + return reconcile.Result{}, nil } log.Debug("Successfully rendered components", "workloads", len(workloads)) r.record.Event(ac, event.Normal(reasonRenderComponents, "Successfully rendered components", "workloads", strconv.Itoa(len(workloads)))) @@ -302,7 +317,7 @@ func (r *OAMApplicationReconciler) Reconcile(req reconcile.Request) (result reco log.Debug("Cannot apply workload", "error", err) r.record.Event(ac, event.Warning(reasonCannotApplyComponents, err)) ac.SetConditions(v1alpha1.ReconcileError(errors.Wrap(err, errApplyComponents))) - return reconcile.Result{}, errors.Wrap(r.UpdateStatus(ctx, ac), errUpdateAppConfigStatus) + return reconcile.Result{}, nil } log.Debug("Successfully applied components", "workloads", len(workloads)) r.record.Event(ac, event.Normal(reasonApplyComponents, "Successfully applied components", "workloads", strconv.Itoa(len(workloads)))) @@ -323,13 +338,13 @@ func (r *OAMApplicationReconciler) Reconcile(req reconcile.Request) (result reco log.Debug("confirm component can't be garbage collected", "error", err) record.Event(ac, event.Warning(reasonCannotGGComponents, err)) ac.SetConditions(v1alpha1.ReconcileError(errors.Wrap(err, errGCComponent))) - return reconcile.Result{}, errors.Wrap(r.UpdateStatus(ctx, ac), errUpdateAppConfigStatus) + return reconcile.Result{}, nil } if err := r.client.Delete(ctx, &e); resource.IgnoreNotFound(err) != nil { log.Debug("Cannot garbage collect component", "error", err) record.Event(ac, event.Warning(reasonCannotGGComponents, err)) ac.SetConditions(v1alpha1.ReconcileError(errors.Wrap(err, errGCComponent))) - return reconcile.Result{}, errors.Wrap(r.UpdateStatus(ctx, ac), errUpdateAppConfigStatus) + return reconcile.Result{}, nil } log.Debug("Garbage collected resource") record.Event(ac, event.Normal(reasonGGComponent, "Successfully garbage collected component")) @@ -345,7 +360,7 @@ func (r *OAMApplicationReconciler) Reconcile(req reconcile.Request) (result reco ac.Status.Dependency = *depStatus } - // the posthook function will do the final status update + // the defer function will do the final status update return reconcile.Result{RequeueAfter: waitTime}, nil } diff --git a/pkg/controller/core.oam.dev/v1alpha2/applicationconfiguration/render.go b/pkg/controller/core.oam.dev/v1alpha2/applicationconfiguration/render.go index cb11a5025..8a9525fb7 100644 --- a/pkg/controller/core.oam.dev/v1alpha2/applicationconfiguration/render.go +++ b/pkg/controller/core.oam.dev/v1alpha2/applicationconfiguration/render.go @@ -35,6 +35,7 @@ import ( "k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/util/intstr" "k8s.io/klog/v2" + "k8s.io/utils/pointer" "sigs.k8s.io/controller-runtime/pkg/client" "github.com/oam-dev/kubevela/apis/core.oam.dev/v1alpha2" @@ -113,7 +114,8 @@ func (r *components) Render(ctx context.Context, ac *v1alpha2.ApplicationConfigu // we need to do a template roll out if it's not done yet needRolloutTemplate = ac.Status.RollingStatus != v1alpha2.RollingTemplated } else if ac.Status.RollingStatus == v1alpha2.RollingTemplated { - ac.Status.RollingStatus = v1alpha2.RollingComplete + klog.InfoS("mark the ac rolling status as completed", "appConfig", klog.KRef(ac.Namespace, ac.Name)) + ac.Status.RollingStatus = v1alpha2.RollingCompleted } for _, acc := range ac.Spec.Components { @@ -146,6 +148,7 @@ func (r *components) Render(ctx context.Context, ac *v1alpha2.ApplicationConfigu } // set the ac rollingStatus to be RollingTemplated if all workloads are going to be applied if workloadsAllClear && ac.Status.RollingStatus == v1alpha2.RollingTemplating { + klog.InfoS("mark the ac rolling status as templated", "appConfig", klog.KRef(ac.Namespace, ac.Name)) ac.Status.RollingStatus = v1alpha2.RollingTemplated } @@ -192,7 +195,6 @@ func (r *components) renderComponent(ctx context.Context, acc v1alpha2.Applicati // don't pass the following annotation as those are for appConfig only util.RemoveAnnotations(w, []string{oam.AnnotationAppRollout, oam.AnnotationRollingComponent}) ref := metav1.NewControllerRef(ac, v1alpha2.ApplicationConfigurationGroupVersionKind) - w.SetOwnerReferences([]metav1.OwnerReference{*ref}) w.SetNamespace(ac.GetNamespace()) traits := make([]*Trait, 0, len(acc.Traits)) @@ -232,12 +234,18 @@ func (r *components) renderComponent(ctx context.Context, acc v1alpha2.Applicati if isComponentRolling && needRolloutTemplate { // we have a special logic to emit the workload as a template so that the rollout // controller can take over. + // TODO: We might need to add the owner reference to the existing object in case the resource + // is going to be shared (ie. CloneSet) if err := prepWorkloadInstanceForRollout(w); err != nil { return nil, err } + // yield the controller to the rollout + ref.Controller = pointer.BoolPtr(false) klog.InfoS("Successfully rendered a workload instance for rollout", "workload", w.GetName()) } } + // set the owner reference after its ref is edited + w.SetOwnerReferences([]metav1.OwnerReference{*ref}) // create the ref after the workload name is set workloadRef := runtimev1alpha1.TypedReference{ diff --git a/pkg/controller/core.oam.dev/v1alpha2/applicationconfiguration/workloads.go b/pkg/controller/core.oam.dev/v1alpha2/applicationconfiguration/workloads.go index 7519a09c6..6436b3a61 100644 --- a/pkg/controller/core.oam.dev/v1alpha2/applicationconfiguration/workloads.go +++ b/pkg/controller/core.oam.dev/v1alpha2/applicationconfiguration/workloads.go @@ -43,6 +43,7 @@ func SetAppWorkloadInstanceName(componentName string, w *unstructured.Unstructur klog.InfoS("we encountered an unknown resources, assume that it does not support in-place upgrade", "GVK", w.GroupVersionKind(), "instance name", instanceName) w.SetName(instanceName) + } // prepWorkloadInstanceForRollout prepare the workload before it is emit to the k8s. The current approach is to mark it diff --git a/pkg/controller/core.oam.dev/v1alpha2/applicationdeployment/application-workload.go b/pkg/controller/core.oam.dev/v1alpha2/applicationdeployment/application-workload.go index d732ded43..fd6edec11 100644 --- a/pkg/controller/core.oam.dev/v1alpha2/applicationdeployment/application-workload.go +++ b/pkg/controller/core.oam.dev/v1alpha2/applicationdeployment/application-workload.go @@ -37,13 +37,14 @@ func (r *Reconciler) extractWorkloads(ctx context.Context, componentList []strin if err != nil { return nil, nil, err } - klog.InfoS("get the target workload we need to work on", "targetWorkload", klog.KObj(targetWorkload)) + klog.InfoS("successfully get the target workload we need to work on", "targetWorkload", klog.KObj(targetWorkload)) if sourceApp != nil { sourceWorkload, err := r.fetchWorkload(ctx, componentName, sourceApp) if err != nil { return nil, nil, err } - klog.InfoS("get the source workload we need to work on", "sourceWorkload", klog.KObj(sourceWorkload)) + klog.InfoS("successfully get the source workload we need to work on", "sourceWorkload", + klog.KObj(sourceWorkload)) return targetWorkload, sourceWorkload, nil } return targetWorkload, nil, nil @@ -84,14 +85,11 @@ func (r *Reconciler) fetchWorkload(ctx context.Context, componentName string, } // reuse the same appConfig controller logic that determines the workload name given an ACC applicationconfiguration.SetAppWorkloadInstanceName(componentName, w, revision) - klog.InfoS("get the workload we need to work on", "workload gvk", w.GroupVersionKind(), "workload name", w.GetName()) // get the real workload object from api-server given GVK and name workload, err := oamutil.GetObjectGivenGVKAndName(ctx, r, w.GroupVersionKind(), targetApp.GetNamespace(), w.GetName()) if err != nil { return nil, errors.Wrap(err, fmt.Sprintf("failed to get workload %s with gvk %+v ", w.GetName(), w.GroupVersionKind())) } - klog.InfoS("successfully get the workload we need to work on", "workload gvk", w.GroupVersionKind(), - "workload name", w.GetName()) return workload, nil } diff --git a/pkg/controller/core.oam.dev/v1alpha2/applicationdeployment/applicationdeployment_controller.go b/pkg/controller/core.oam.dev/v1alpha2/applicationdeployment/applicationdeployment_controller.go index 01fa6d460..c9ae22adf 100644 --- a/pkg/controller/core.oam.dev/v1alpha2/applicationdeployment/applicationdeployment_controller.go +++ b/pkg/controller/core.oam.dev/v1alpha2/applicationdeployment/applicationdeployment_controller.go @@ -3,6 +3,7 @@ package applicationdeployment import ( "context" "fmt" + "strconv" "time" "github.com/crossplane/crossplane-runtime/pkg/event" @@ -26,10 +27,10 @@ import ( oamutil "github.com/oam-dev/kubevela/pkg/oam/util" ) -const appDeployFinalizer = "finalizers.applicationdeployment.oam.dev" +const appRolloutFinalizer = "finalizers.approllout.oam.dev" const reconcileTimeOut = 60 * time.Second -// Reconciler reconciles an ApplicationDeployment object +// Reconciler reconciles an AppRollout object type Reconciler struct { client.Client dm discoverymapper.DiscoveryMapper @@ -42,7 +43,7 @@ type Reconciler struct { // +kubebuilder:rbac:groups=core.oam.dev,resources=applications,verbs=get;list;watch;create;update;patch;delete // +kubebuilder:rbac:groups=core.oam.dev,resources=applications/status,verbs=get;update;patch -// Reconcile is the main logic of applicationdeployment controller +// Reconcile is the main logic of appRollout controller func (r *Reconciler) Reconcile(req ctrl.Request) (res reconcile.Result, retErr error) { var appRollout oamv1alpha2.AppRollout ctx, cancel := context.WithTimeout(context.TODO(), reconcileTimeOut) @@ -52,23 +53,24 @@ func (r *Reconciler) Reconcile(req ctrl.Request) (res reconcile.Result, retErr e defer func() { if retErr == nil { if res.Requeue || res.RequeueAfter > 0 { - klog.InfoS("Finished reconciling appDeployment", "deployment", req, "time spent", + klog.InfoS("Finished reconciling appRollout", "controller request", req, "time spent", time.Since(startTime), "result", res) } else { - klog.InfoS("Finished reconcile appDeployment", "deployment", req, "time spent", time.Since(startTime)) + klog.InfoS("Finished reconcile appRollout", "controller request", req, "time spent", + time.Since(startTime)) } } else { - klog.Errorf("Failed to reconcile appDeployment %s: %v", req, retErr) + klog.Errorf("Failed to reconcile appRollout %s: %v", req, retErr) } }() if err := r.Get(ctx, req.NamespacedName, &appRollout); err != nil { if apierrors.IsNotFound(err) { - klog.InfoS("application deployment does not exist", "appRollout", klog.KRef(req.Namespace, req.Name)) + klog.InfoS("appRollout does not exist", "appRollout", klog.KRef(req.Namespace, req.Name)) } return ctrl.Result{}, client.IgnoreNotFound(err) } - klog.InfoS("Start to reconcile ", "application deployment", klog.KObj(&appRollout)) + klog.InfoS("Start to reconcile ", "appRollout", klog.KObj(&appRollout)) // TODO: check if the target/source has changed r.handleFinalizer(&appRollout) @@ -87,7 +89,7 @@ func (r *Reconciler) Reconcile(req ctrl.Request) (res reconcile.Result, retErr e } // Get the source application - sourceAppName := appRollout.Spec.SourceApplicationName + sourceAppName := appRollout.Spec.SourceAppRevisionName if sourceAppName == "" { klog.Info("source app fields not filled, we assume it is deployed for the first time") sourceApp = nil @@ -110,13 +112,38 @@ func (r *Reconciler) Reconcile(req ctrl.Request) (res reconcile.Result, retErr e klog.InfoS("get the source workload we need to work on", "sourceWorkload", klog.KObj(sourceWorkload)) } + if appRollout.Status.RollingState == v1alpha1.RolloutSucceedState || + appRollout.Status.RollingState == v1alpha1.RolloutFailedState { + if appRollout.Status.LastUpgradedTargetAppRevision == appRollout.Spec.TargetAppRevisionName && + appRollout.Status.LastSourceAppRevision == appRollout.Spec.SourceAppRevisionName { + klog.InfoS("rollout terminated, no need to reconcile", "source", sourceAppName, + "target", targetAppName) + return ctrl.Result{}, nil + } + klog.InfoS("rollout target changed, restart the rollout", "source", sourceAppName, + "target", targetAppName) + appRollout.Status.StateTransition(v1alpha1.WorkloadModifiedEvent) + } + // reconcile the rollout part of the spec given the target and source workload rolloutPlanController := rollout.NewRolloutPlanController(r, &appRollout, r.record, &appRollout.Spec.RolloutPlan, &appRollout.Status.RolloutStatus, targetWorkload, sourceWorkload) result, rolloutStatus := rolloutPlanController.Reconcile(ctx) // make sure that the new status is copied back appRollout.Status.RolloutStatus = *rolloutStatus + appRollout.Status.LastUpgradedTargetAppRevision = targetAppName + appRollout.Status.LastSourceAppRevision = sourceAppName if rolloutStatus.RollingState == v1alpha1.RolloutSucceedState { + if sourceApp != nil { + // mark the source app as an application revision only so that it stop being reconciled + oamutil.RemoveAnnotations(sourceApp, []string{oam.AnnotationAppRollout}) + oamutil.AddAnnotations(sourceApp, map[string]string{oam.AnnotationAppRevision: strconv.FormatBool(true)}) + if err := r.Update(ctx, sourceApp); err != nil { + klog.ErrorS(err, "cannot add the app revision annotation", "source application", + klog.KRef(req.Namespace, sourceAppName)) + return ctrl.Result{}, err + } + } // remove the rollout annotation so that the target appConfig controller can take over the rest of the work oamutil.RemoveAnnotations(&targetApp, []string{oam.AnnotationAppRollout}) if err := r.Update(ctx, &targetApp); err != nil { @@ -124,6 +151,8 @@ func (r *Reconciler) Reconcile(req ctrl.Request) (res reconcile.Result, retErr e klog.KRef(req.Namespace, targetAppName)) return ctrl.Result{}, err } + klog.InfoS("rollout succeeded, record the source and target app revision", "source", sourceAppName, + "target", targetAppName) } // update the appRollout status return result, r.updateStatus(ctx, &appRollout) @@ -143,11 +172,11 @@ func (r *Reconciler) updateStatus(ctx context.Context, appRollout *oamv1alpha2.A func (r *Reconciler) handleFinalizer(appRollout *oamv1alpha2.AppRollout) { if appRollout.DeletionTimestamp.IsZero() { - if !slice.ContainsString(appRollout.Finalizers, appDeployFinalizer, nil) { + if !slice.ContainsString(appRollout.Finalizers, appRolloutFinalizer, nil) { // TODO: add finalizer klog.Info("add finalizer") } - } else if slice.ContainsString(appRollout.Finalizers, appDeployFinalizer, nil) { + } else if slice.ContainsString(appRollout.Finalizers, appRolloutFinalizer, nil) { // TODO: perform finalize klog.Info("perform clean up") } @@ -155,15 +184,15 @@ func (r *Reconciler) handleFinalizer(appRollout *oamv1alpha2.AppRollout) { // SetupWithManager setup the controller with manager func (r *Reconciler) SetupWithManager(mgr ctrl.Manager) error { - r.record = event.NewAPIRecorder(mgr.GetEventRecorderFor("ApplicationDeployment")). - WithAnnotations("controller", "ApplicationDeployment") + r.record = event.NewAPIRecorder(mgr.GetEventRecorderFor("AppRollout")). + WithAnnotations("controller", "AppRollout") return ctrl.NewControllerManagedBy(mgr). For(&oamv1alpha2.AppRollout{}). Owns(&oamv1alpha2.Application{}). Complete(r) } -// Setup adds a controller that reconciles ApplicationDeployment. +// Setup adds a controller that reconciles AppRollout. func Setup(mgr ctrl.Manager, _ controller.Args, _ logging.Logger) error { dm, err := discoverymapper.New(mgr.GetConfig()) if err != nil { diff --git a/pkg/controller/utils/utils.go b/pkg/controller/utils/utils.go index ac9011898..112bd4419 100644 --- a/pkg/controller/utils/utils.go +++ b/pkg/controller/utils/utils.go @@ -158,15 +158,20 @@ func StoreInSet(disableCaps string) mapset.Set { return mapset.NewSetFromSlice(disableSlice) } -// GetAppRevision will generate revision name and revision number for application -func GetAppRevision(app *v1alpha2.Application) (string, int64) { +// GetAppNextRevision will generate the next revision name and revision number for application +func GetAppNextRevision(app *v1alpha2.Application) (string, int64) { if app == nil { // should never happen return "", 0 } var nextRevision int64 = 1 if app.Status.LatestRevision != nil { - nextRevision = app.Status.LatestRevision.Revision + 1 + // we only bump the version when we are rolling + if _, exist := app.GetAnnotations()[oam.AnnotationAppRollout]; exist { + nextRevision = app.Status.LatestRevision.Revision + 1 + } else { + nextRevision = app.Status.LatestRevision.Revision + } } return ConstructRevisionName(app.Name, nextRevision), nextRevision } diff --git a/pkg/controller/utils/utils_test.go b/pkg/controller/utils/utils_test.go index d33b48797..c8134fcaf 100644 --- a/pkg/controller/utils/utils_test.go +++ b/pkg/controller/utils/utils_test.go @@ -3,6 +3,7 @@ package utils import ( "context" "fmt" + "strconv" "testing" . "github.com/onsi/ginkgo" @@ -18,6 +19,8 @@ import ( controllerruntime "sigs.k8s.io/controller-runtime" "github.com/oam-dev/kubevela/apis/core.oam.dev/v1alpha2" + "github.com/oam-dev/kubevela/pkg/oam" + oamutil "github.com/oam-dev/kubevela/pkg/oam/util" ) var _ = Describe("utils", func() { @@ -187,19 +190,43 @@ func TestCompareWithRevision(t *testing.T) { } func TestGetAppRevison(t *testing.T) { - name, number := GetAppRevision(nil) - assert.Equal(t, name, "") - assert.Equal(t, number, int64(0)) + revisionName, latestRevision := GetAppNextRevision(nil) + assert.Equal(t, revisionName, "") + assert.Equal(t, latestRevision, int64(0)) + // the first is always 1 app := &v1alpha2.Application{} app.Name = "myapp" - name, number = GetAppRevision(app) - assert.Equal(t, name, "myapp-v1") - assert.Equal(t, number, int64(1)) + revisionName, latestRevision = GetAppNextRevision(app) + assert.Equal(t, revisionName, "myapp-v1") + assert.Equal(t, latestRevision, int64(1)) app.Status.LatestRevision = &v1alpha2.Revision{ Name: "myapp-v1", Revision: 1, } - name, number = GetAppRevision(app) - assert.Equal(t, name, "myapp-v2") - assert.Equal(t, number, int64(2)) + // we don't automatically advance the revision + revisionName, latestRevision = GetAppNextRevision(app) + assert.Equal(t, revisionName, "myapp-v1") + assert.Equal(t, latestRevision, int64(1)) + // we generate new revisions if the app is rolling + app.SetAnnotations(map[string]string{oam.AnnotationAppRollout: strconv.FormatBool(true)}) + revisionName, latestRevision = GetAppNextRevision(app) + assert.Equal(t, revisionName, "myapp-v2") + assert.Equal(t, latestRevision, int64(2)) + app.Status.LatestRevision = &v1alpha2.Revision{ + Name: revisionName, + Revision: latestRevision, + } + // try again + revisionName, latestRevision = GetAppNextRevision(app) + assert.Equal(t, revisionName, "myapp-v3") + assert.Equal(t, latestRevision, int64(3)) + app.Status.LatestRevision = &v1alpha2.Revision{ + Name: revisionName, + Revision: latestRevision, + } + // remove the annotation and it will stop + oamutil.RemoveAnnotations(app, []string{oam.AnnotationAppRollout}) + revisionName, latestRevision = GetAppNextRevision(app) + assert.Equal(t, revisionName, "myapp-v3") + assert.Equal(t, latestRevision, int64(3)) } diff --git a/pkg/oam/labels.go b/pkg/oam/labels.go index 2ac49f198..bd0c8338c 100644 --- a/pkg/oam/labels.go +++ b/pkg/oam/labels.go @@ -56,11 +56,15 @@ const ( AnnotationLastAppliedConfig = "app.oam.dev/last-applied-configuration" // AnnotationAppRollout indicates that the application is still rolling out - // the application controller should not reconcile it yet + // the application controller should treat it differently AnnotationAppRollout = "app.oam.dev/rollout-template" // AnnotationRollingComponent indicates that the component is rolling out // this is to enable any concerned controllers to handle the first component apply logic differently // the value of the annotation is a list of component name of all the new component AnnotationRollingComponent = "app.oam.dev/rolling-components" + + // AnnotationAppRevision indicates that the object is an application revision + // its controller should not try to reconcile it + AnnotationAppRevision = "app.oam.dev/app-revision" ) diff --git a/pkg/oam/util/helper_test.go b/pkg/oam/util/helper_test.go index ed5f506a7..5d7187aa7 100644 --- a/pkg/oam/util/helper_test.go +++ b/pkg/oam/util/helper_test.go @@ -1155,20 +1155,18 @@ func TestUnpackRevisionData(t *testing.T) { func TestPassThroughObjMeta(t *testing.T) { ac := &v1alpha2.ApplicationConfiguration{} - labels := map[string]string{ "core.oam.dev/ns": "oam-system", "core.oam.dev/controller": "oam-kubernetes-runtime", } - annotation := map[string]string{ "key1": "value1", "key2": "value2", } - ac.SetLabels(labels) ac.SetAnnotations(annotation) t.Log("workload and trait have no labels and annotation") + // test initial pass var u unstructured.Unstructured util.PassLabelAndAnnotation(ac, &u) got := u.GetLabels() @@ -1177,6 +1175,7 @@ func TestPassThroughObjMeta(t *testing.T) { gotAnnotation := u.GetAnnotations() wantAnnotation := annotation assert.Equal(t, wantAnnotation, gotAnnotation) + // test overlapping keys t.Log("workload and trait contains overlapping keys") existAnnotation := map[string]string{ "key1": "exist value1", @@ -1188,9 +1187,7 @@ func TestPassThroughObjMeta(t *testing.T) { } u.SetLabels(existLabels) u.SetAnnotations(existAnnotation) - util.PassLabelAndAnnotation(ac, &u) - gotAnnotation = u.GetAnnotations() wantAnnotation = map[string]string{ "key1": "value1", @@ -1198,7 +1195,6 @@ func TestPassThroughObjMeta(t *testing.T) { "key3": "value3", } assert.Equal(t, wantAnnotation, gotAnnotation) - gotLabels := u.GetLabels() wantLabels := map[string]string{ "core.oam.dev/ns": "oam-system", @@ -1206,6 +1202,16 @@ func TestPassThroughObjMeta(t *testing.T) { "core.oam.dev/controller": "oam-kubernetes-runtime", } assert.Equal(t, wantLabels, gotLabels) + + // test removing annotation + t.Log("removing parent key doesn't remove child's") + util.RemoveAnnotations(ac, []string{"key1", "key2"}) + assert.Equal(t, len(ac.GetAnnotations()), 0) + util.PassLabelAndAnnotation(ac, &u) + gotAnnotation = u.GetAnnotations() + assert.Equal(t, wantAnnotation, gotAnnotation) + gotLabels = u.GetLabels() + assert.Equal(t, wantLabels, gotLabels) } func TestAddLabels(t *testing.T) { diff --git a/pkg/utils/apply/apply.go b/pkg/utils/apply/apply.go index 789bc5cd8..fe38921f7 100644 --- a/pkg/utils/apply/apply.go +++ b/pkg/utils/apply/apply.go @@ -151,18 +151,19 @@ func executeApplyOptions(ctx context.Context, existing, desired runtime.Object, // MustBeControllableBy requires that the new object is controllable by an // object with the supplied UID. An object is controllable if its controller // reference includes the supplied UID. -// There can be multiple controllers and it's ligit as long as one of them matches the UID func MustBeControllableBy(u types.UID) ApplyOption { - return func(_ context.Context, _, newInstance runtime.Object) error { - if newInstance == nil { + return func(_ context.Context, existing, _ runtime.Object) error { + if existing == nil { return nil } - owners := newInstance.(metav1.Object).GetOwnerReferences() - for _, owner := range owners { - if owner.Controller != nil && *owner.Controller && owner.UID == u { - return nil - } + c := metav1.GetControllerOf(existing.(metav1.Object)) + if c == nil { + return nil } - return errors.Errorf("existing object is not controlled by UID %q", u) + + if c.UID != u { + return errors.Errorf("existing object is not controlled by UID %q", u) + } + return nil } } diff --git a/pkg/utils/apply/apply_test.go b/pkg/utils/apply/apply_test.go index cf545c712..4a50740b3 100644 --- a/pkg/utils/apply/apply_test.go +++ b/pkg/utils/apply/apply_test.go @@ -14,7 +14,6 @@ import ( "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/types" - "k8s.io/utils/pointer" "sigs.k8s.io/controller-runtime/pkg/client" ) @@ -299,62 +298,46 @@ func TestCreator(t *testing.T) { func TestMustBeControllableBy(t *testing.T) { uid := types.UID("very-unique-string") + controller := true ctx := context.TODO() cases := map[string]struct { reason string - u types.UID current runtime.Object + u types.UID want error }{ "NoExistingObject": { reason: "No error should be returned if no existing object", }, "Adoptable": { - reason: "A current object with no controller reference is not controllable", + reason: "A current object with no controller reference may be adopted and controlled", u: uid, current: &testObject{}, - want: errors.Errorf("existing object is not controlled by UID %q", uid), }, "ControlledBySuppliedUID": { reason: "A current object that is already controlled by the supplied UID is controllable", u: uid, current: &testObject{ObjectMeta: metav1.ObjectMeta{OwnerReferences: []metav1.OwnerReference{{ UID: uid, - Controller: pointer.BoolPtr(true), + Controller: &controller, }}}}, }, "ControlledBySomeoneElse": { reason: "A current object that is already controlled by a different UID is not controllable", u: uid, - current: &testObject{ObjectMeta: metav1.ObjectMeta{OwnerReferences: []metav1.OwnerReference{ - { - UID: types.UID("some-other-uid"), - Controller: pointer.BoolPtr(true), - }, - }}}, + current: &testObject{ObjectMeta: metav1.ObjectMeta{OwnerReferences: []metav1.OwnerReference{{ + UID: types.UID("some-other-uid"), + Controller: &controller, + }}}}, want: errors.Errorf("existing object is not controlled by UID %q", uid), }, - "SharedControlledBySomeoneElse": { - reason: "An object that has a shared controlled by a different UID is controllable", - u: uid, - current: &testObject{ObjectMeta: metav1.ObjectMeta{OwnerReferences: []metav1.OwnerReference{ - { - UID: types.UID("some-other-uid"), - Controller: pointer.BoolPtr(true), - }, - { - UID: uid, - Controller: pointer.BoolPtr(true), - }, - }}}, - }, } for name, tc := range cases { t.Run(name, func(t *testing.T) { ao := MustBeControllableBy(tc.u) - err := ao(ctx, nil, tc.current) + err := ao(ctx, tc.current, nil) if diff := cmp.Diff(tc.want, err, test.EquateErrors()); diff != "" { t.Errorf("\n%s\nMustBeControllableBy(...)(...): -want error, +got error\n%s\n", tc.reason, diff) } diff --git a/pkg/webhook/common/rollout/rollout_plan.go b/pkg/webhook/common/rollout/rollout_plan.go index dd13d310a..0666a978f 100644 --- a/pkg/webhook/common/rollout/rollout_plan.go +++ b/pkg/webhook/common/rollout/rollout_plan.go @@ -83,10 +83,8 @@ func validateWebhook(rollout *v1alpha1.RolloutPlan, rootPath *field.Path) (allEr // ValidateUpdate validate if one can change the rollout plan from the previous psec func ValidateUpdate(new *v1alpha1.RolloutPlan, prev *v1alpha1.RolloutPlan, rootPath *field.Path) field.ErrorList { - // makes sure the new rollout alone is valid - allErrs := ValidateCreate(new, rootPath) - // TODO: Enforce that only a few fields can change after a rollout plan is set + var allErrs field.ErrorList return allErrs } diff --git a/pkg/webhook/core.oam.dev/register.go b/pkg/webhook/core.oam.dev/register.go index 6b5a42919..3c85fe0ca 100644 --- a/pkg/webhook/core.oam.dev/register.go +++ b/pkg/webhook/core.oam.dev/register.go @@ -23,6 +23,7 @@ func Register(mgr manager.Manager) error { } applicationconfiguration.RegisterMutatingHandler(mgr) applicationdeployment.RegisterMutatingHandler(mgr) + applicationdeployment.RegisterValidatingHandler(mgr) if err := component.RegisterMutatingHandler(mgr); err != nil { return err } diff --git a/pkg/webhook/core.oam.dev/v1alpha2/applicationdeployment/mutating_handler.go b/pkg/webhook/core.oam.dev/v1alpha2/applicationdeployment/mutating_handler.go index cb667babf..23eef1b32 100644 --- a/pkg/webhook/core.oam.dev/v1alpha2/applicationdeployment/mutating_handler.go +++ b/pkg/webhook/core.oam.dev/v1alpha2/applicationdeployment/mutating_handler.go @@ -19,7 +19,7 @@ import ( "github.com/oam-dev/kubevela/pkg/webhook/common/rollout" ) -// MutatingHandler handles ApplicationDeployment +// MutatingHandler handles AppRollout type MutatingHandler struct { Client client.Client @@ -37,7 +37,7 @@ func (h *MutatingHandler) Handle(ctx context.Context, req admission.Request) adm if err != nil { return admission.Errored(http.StatusBadRequest, err) } - DefaultApplicationDeployment(obj) + DefaultAppRollout(obj) marshalled, err := json.Marshal(obj) if err != nil { @@ -45,14 +45,14 @@ func (h *MutatingHandler) Handle(ctx context.Context, req admission.Request) adm } resp := admission.PatchResponseFromRaw(req.AdmissionRequest.Object.Raw, marshalled) if len(resp.Patches) > 0 { - klog.V(common.LogDebugWithContent).Infof("Admit ApplicationDeployment %s/%s patches: %v", obj.Namespace, obj.Name, + klog.V(common.LogDebugWithContent).Infof("Admit AppRollout %s/%s patches: %v", obj.Namespace, obj.Name, util.DumpJSON(resp.Patches)) } return resp } -// DefaultApplicationDeployment will set the default value for the ApplicationDeployment -func DefaultApplicationDeployment(obj *v1alpha2.AppRollout) { +// DefaultAppRollout will set the default value for the AppRolloutĀ® +func DefaultAppRollout(obj *v1alpha2.AppRollout) { klog.InfoS("default", "name", obj.Name) if obj.Spec.RevertOnDelete == nil { klog.V(common.LogDebug).Info("default RevertOnDelete as false") @@ -82,6 +82,6 @@ func (h *MutatingHandler) InjectDecoder(d *admission.Decoder) error { // RegisterMutatingHandler will register component mutation handler to the webhook func RegisterMutatingHandler(mgr manager.Manager) { server := mgr.GetWebhookServer() - server.Register("/mutating-core-oam-dev-v1alpha2-applicationdeployments", + server.Register("/mutating-core-oam-dev-v1alpha2-approllout", &webhook.Admission{Handler: &MutatingHandler{}}) } diff --git a/pkg/webhook/core.oam.dev/v1alpha2/applicationdeployment/validating_handler.go b/pkg/webhook/core.oam.dev/v1alpha2/applicationdeployment/validating_handler.go index c3f021218..30403923e 100644 --- a/pkg/webhook/core.oam.dev/v1alpha2/applicationdeployment/validating_handler.go +++ b/pkg/webhook/core.oam.dev/v1alpha2/applicationdeployment/validating_handler.go @@ -15,7 +15,7 @@ import ( "github.com/oam-dev/kubevela/apis/core.oam.dev/v1alpha2" ) -// ValidatingHandler handles ApplicationDeployment +// ValidatingHandler handles AppRollout type ValidatingHandler struct { client.Client @@ -76,6 +76,6 @@ func (h *ValidatingHandler) InjectDecoder(d *admission.Decoder) error { // RegisterValidatingHandler will register application configuration validation to webhook func RegisterValidatingHandler(mgr manager.Manager) { server := mgr.GetWebhookServer() - server.Register("/validating-core-oam-dev-v1alpha2-applicationdeployments", + server.Register("/validating-core-oam-dev-v1alpha2-approllout", &webhook.Admission{Handler: &ValidatingHandler{}}) } diff --git a/pkg/webhook/core.oam.dev/v1alpha2/applicationdeployment/validation.go b/pkg/webhook/core.oam.dev/v1alpha2/applicationdeployment/validation.go index 26cf90bd2..fc983f2fb 100644 --- a/pkg/webhook/core.oam.dev/v1alpha2/applicationdeployment/validation.go +++ b/pkg/webhook/core.oam.dev/v1alpha2/applicationdeployment/validation.go @@ -3,6 +3,7 @@ package applicationdeployment import ( "context" + apiequality "k8s.io/apimachinery/pkg/api/equality" apimachineryvalidation "k8s.io/apimachinery/pkg/api/validation" ktypes "k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/util/validation/field" @@ -10,10 +11,11 @@ import ( "k8s.io/kubectl/pkg/util/slice" "github.com/oam-dev/kubevela/apis/core.oam.dev/v1alpha2" + "github.com/oam-dev/kubevela/apis/standard.oam.dev/v1alpha1" "github.com/oam-dev/kubevela/pkg/webhook/common/rollout" ) -// ValidateCreate validates the ApplicationDeployment on creation +// ValidateCreate validates the AppRollout on creation func (h *ValidatingHandler) ValidateCreate(appRollout *v1alpha2.AppRollout) field.ErrorList { klog.InfoS("validate create", "name", appRollout.Name) allErrs := apimachineryvalidation.ValidateObjectMeta(&appRollout.ObjectMeta, true, @@ -38,7 +40,7 @@ func (h *ValidatingHandler) ValidateCreate(appRollout *v1alpha2.AppRollout) fiel // can't continue without target return allErrs } - sourceAppName := appRollout.Spec.SourceApplicationName + sourceAppName := appRollout.Spec.SourceAppRevisionName if sourceAppName != "" { if err := h.Get(context.Background(), ktypes.NamespacedName{Namespace: appRollout.Namespace, Name: sourceAppName}, &sourceApp); err != nil { @@ -65,7 +67,7 @@ func (h *ValidatingHandler) ValidateCreate(appRollout *v1alpha2.AppRollout) fiel func validateComponent(componentList []string, targetApp, sourceApp *v1alpha2.ApplicationConfiguration, fldPath *field.Path) field.ErrorList { var componentErrs field.ErrorList - var commmonComponentName string + var commonComponentName string if len(componentList) > 1 { componentErrs = append(componentErrs, field.TooLong(fldPath, componentList, 1)) return componentErrs @@ -79,7 +81,7 @@ func validateComponent(componentList []string, targetApp, sourceApp *v1alpha2.Ap componentErrs = append(componentErrs, field.TooMany(fldPath, len(commons), 1)) return componentErrs } - commmonComponentName = commons[0] + commonComponentName = commons[0] } else { // the component need to be one of the common components if !slice.ContainsString(commons, componentList[0], nil) { @@ -89,12 +91,12 @@ func validateComponent(componentList []string, targetApp, sourceApp *v1alpha2.Ap "it is not a common component in the application")) return componentErrs } - commmonComponentName = componentList[0] + commonComponentName = componentList[0] } // check if the workload type are the same in the source and target application - if len(commmonComponentName) == 0 { + if len(commonComponentName) == 0 { klog.Error("the common component have different types in the application", - "common component", commmonComponentName) + "common component", commonComponentName) componentErrs = append(componentErrs, field.Invalid(fldPath, componentList[0], "the common component have different types in the application")) } @@ -102,13 +104,27 @@ func validateComponent(componentList []string, targetApp, sourceApp *v1alpha2.Ap return componentErrs } -// ValidateUpdate validates the ApplicationDeployment on update +// ValidateUpdate validates the AppRollout on update func (h *ValidatingHandler) ValidateUpdate(new, old *v1alpha2.AppRollout) field.ErrorList { klog.InfoS("validate update", "name", new.Name) errList := h.ValidateCreate(new) + fldPath := field.NewPath("spec").Child("rolloutPlan") + if len(errList) > 0 { return errList } - fldPath := field.NewPath("spec").Child("rolloutPlan") + // we can only reuse the rollout after reaching terminating state if the target and source has changed + if old.Status.RollingState == v1alpha1.RolloutSucceedState || + old.Status.RollingState == v1alpha1.RolloutFailedState { + if old.Spec.SourceAppRevisionName == new.Spec.SourceAppRevisionName && + old.Spec.TargetAppRevisionName == new.Spec.TargetAppRevisionName { + if !apiequality.Semantic.DeepEqual(&old.Spec.RolloutPlan, &new.Spec.RolloutPlan) { + errList = append(errList, field.Invalid(fldPath, new.Spec, + "a successful or failed rollout cannot be modified without changing the target or the source")) + return errList + } + } + } + return rollout.ValidateUpdate(&new.Spec.RolloutPlan, &old.Spec.RolloutPlan, fldPath) } diff --git a/references/appfile/addon.go b/references/appfile/addon.go index ab5732d29..bff636a31 100644 --- a/references/appfile/addon.go +++ b/references/appfile/addon.go @@ -49,7 +49,7 @@ func ApplyTerraform(app *v1alpha2.Application, k8sClient client.Client, ioStream return nil, err } - revisionName, _ := utils.GetAppRevision(app) + revisionName, _ := utils.GetAppNextRevision(app) for i, wl := range appFile.Workloads { switch wl.CapabilityCategory { diff --git a/test/e2e-test/rollout_plan_test.go b/test/e2e-test/rollout_plan_test.go index 7f950708a..60316ec50 100644 --- a/test/e2e-test/rollout_plan_test.go +++ b/test/e2e-test/rollout_plan_test.go @@ -2,43 +2,49 @@ package controllers_test import ( "context" - "math/rand" + "fmt" "strconv" "time" . "github.com/onsi/ginkgo" . "github.com/onsi/gomega" - "github.com/crossplane/crossplane-runtime/apis/core/v1alpha1" + cpv1 "github.com/crossplane/crossplane-runtime/apis/core/v1alpha1" kruise "github.com/openkruise/kruise-api/apps/v1alpha1" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/utils/pointer" "sigs.k8s.io/controller-runtime/pkg/client" - logf "sigs.k8s.io/controller-runtime/pkg/log" "github.com/oam-dev/kubevela/apis/core.oam.dev/v1alpha2" oamstd "github.com/oam-dev/kubevela/apis/standard.oam.dev/v1alpha1" "github.com/oam-dev/kubevela/pkg/controller/utils" + "github.com/oam-dev/kubevela/pkg/oam" "github.com/oam-dev/kubevela/pkg/oam/util" ) -var _ = Describe("Test Rolling out Application", func() { +var _ = Describe("Cloneset based rollout tests", func() { ctx := context.Background() - namespace := "rolling" + var namespace, clonesetName string var ns corev1.Namespace + var app v1alpha2.Application + var appConfig1, appConfig2 v1alpha2.ApplicationConfiguration + var kc kruise.CloneSet + var appRollout v1alpha2.AppRollout - BeforeEach(func() { - logf.Log.Info("Start to run a test, clean up previous resources") - namespace = string(strconv.AppendInt([]byte(namespace), rand.Int63(), 16)) + createNamespace := func(namespace string) { ns = corev1.Namespace{ ObjectMeta: metav1.ObjectMeta{ Name: namespace, }, } // delete the namespace with all its resources - Expect(k8sClient.Delete(ctx, &ns, client.PropagationPolicy(metav1.DeletePropagationForeground))). - Should(SatisfyAny(BeNil(), &util.NotFoundMatcher{})) - logf.Log.Info("make sure all the resources are removed") + Eventually( + func() error { + return k8sClient.Delete(ctx, &ns, client.PropagationPolicy(metav1.DeletePropagationForeground)) + }, + time.Second*120, time.Millisecond*500).Should(SatisfyAny(BeNil(), &util.NotFoundMatcher{})) + By("make sure all the resources are removed") objectKey := client.ObjectKey{ Name: namespace, } @@ -53,155 +59,574 @@ var _ = Describe("Test Rolling out Application", func() { return k8sClient.Create(ctx, &ns) }, time.Second*3, time.Millisecond*300).Should(SatisfyAny(BeNil(), &util.AlreadyExistMatcher{})) + } + + CreateClonesetDef := func() { By("Install CloneSet based workloadDefinition") var cd v1alpha2.WorkloadDefinition - Expect(readYaml("testdata/rollout/clonesetDefinition.yaml", &cd)).Should(BeNil()) + Expect(readYaml("testdata/rollout/cloneset/clonesetDefinition.yaml", &cd)).Should(BeNil()) // create the workloadDefinition if not exist Eventually( func() error { return k8sClient.Create(ctx, &cd) }, time.Second*3, time.Millisecond*300).Should(SatisfyAny(BeNil(), &util.AlreadyExistMatcher{})) - }) + } - AfterEach(func() { - logf.Log.Info("Clean up resources") - // delete the namespace with all its resources - Expect(k8sClient.Delete(ctx, &ns, client.PropagationPolicy(metav1.DeletePropagationForeground))).Should(BeNil()) - }) - - It("Basic cloneset rollout", func() { - By("Apply an application") - var app v1alpha2.Application - Expect(readYaml("testdata/rollout/app-source.yaml", &app)).Should(BeNil()) - app.Namespace = namespace - Expect(k8sClient.Create(ctx, &app)).Should(Succeed()) - By("Get Application latest status after AppConfig created") - Eventually( - func() *v1alpha2.Revision { - k8sClient.Get(ctx, client.ObjectKey{Namespace: namespace, Name: app.Name}, &app) - return app.Status.LatestRevision - }, - time.Second*30, time.Millisecond*500).ShouldNot(BeNil()) - By("Wait for AppConfig1 synced") - var appConfig1 v1alpha2.ApplicationConfiguration - Eventually( - func() corev1.ConditionStatus { - k8sClient.Get(ctx, client.ObjectKey{Namespace: namespace, Name: app.Status.LatestRevision.Name}, &appConfig1) - return appConfig1.Status.GetCondition(v1alpha1.TypeSynced).Status - }, - time.Second*30, time.Millisecond*500).Should(BeEquivalentTo(corev1.ConditionTrue)) - - By("Mark the application as rolling") - Expect(readYaml("testdata/rollout/app-source-prep.yaml", &app)).Should(BeNil()) - app.Namespace = namespace - Expect(k8sClient.Update(ctx, &app)).Should(Succeed()) - By("Wait for AppConfig1 to be templated") - Eventually( - func() v1alpha2.RollingStatus { - k8sClient.Get(ctx, client.ObjectKey{Namespace: namespace, Name: app.Status.LatestRevision.Name}, &appConfig1) - return appConfig1.Status.RollingStatus - }, - time.Second*60, time.Millisecond*500).Should(BeEquivalentTo(v1alpha2.RollingTemplated)) - - By("Update the application during rolling") - Expect(readYaml("testdata/rollout/app-target.yaml", &app)).Should(BeNil()) - app.Namespace = namespace - Expect(k8sClient.Update(ctx, &app)).Should(Succeed()) + VerifyAppConfigTemplated := func(revision int64) { + var appConfigName string By("Get Application latest status after AppConfig created") Eventually( func() int64 { k8sClient.Get(ctx, client.ObjectKey{Namespace: namespace, Name: app.Name}, &app) return app.Status.LatestRevision.Revision }, - time.Second*10, time.Millisecond*500).ShouldNot(BeEquivalentTo(1)) - By("Wait for AppConfig2 synced") - var appConfig2 v1alpha2.ApplicationConfiguration + time.Second*30, time.Millisecond*500).Should(BeEquivalentTo(revision)) + appConfigName = app.Status.LatestRevision.Name + By(fmt.Sprintf("Wait for AppConfig %s synced", appConfigName)) + var appConfig v1alpha2.ApplicationConfiguration + Eventually( + func() corev1.ConditionStatus { + k8sClient.Get(ctx, client.ObjectKey{Namespace: namespace, Name: appConfigName}, &appConfig) + return appConfig.Status.GetCondition(cpv1.TypeSynced).Status + }, + time.Second*30, time.Millisecond*500).Should(BeEquivalentTo(corev1.ConditionTrue)) + + By(fmt.Sprintf("Wait for AppConfig %s to be templated", appConfigName)) + Eventually( + func() v1alpha2.RollingStatus { + k8sClient.Get(ctx, client.ObjectKey{Namespace: namespace, Name: appConfigName}, &appConfig) + return appConfig.Status.RollingStatus + }, + time.Second*60, time.Millisecond*500).Should(BeEquivalentTo(v1alpha2.RollingTemplated)) + } + + ApplySourceApp := func() { + By("Apply an application") + var newApp v1alpha2.Application + Expect(readYaml("testdata/rollout/cloneset/app-source.yaml", &newApp)).Should(BeNil()) + newApp.Namespace = namespace + Expect(k8sClient.Create(ctx, &newApp)).Should(Succeed()) + + By("Get Application latest status after AppConfig created") + Eventually( + func() *v1alpha2.Revision { + k8sClient.Get(ctx, client.ObjectKey{Namespace: namespace, Name: newApp.Name}, &app) + return app.Status.LatestRevision + }, + time.Second*30, time.Millisecond*500).ShouldNot(BeNil()) + By("Wait for AppConfig1 synced") + Eventually( + func() corev1.ConditionStatus { + k8sClient.Get(ctx, client.ObjectKey{Namespace: namespace, Name: app.Status.LatestRevision.Name}, &appConfig1) + return appConfig1.Status.GetCondition(cpv1.TypeSynced).Status + }, + time.Second*30, time.Millisecond*500).Should(BeEquivalentTo(corev1.ConditionTrue)) + } + + MarkSourceAppRolling := func() { + By("Mark the application as rolling") + Eventually( + func() error { + k8sClient.Get(ctx, client.ObjectKey{Namespace: namespace, Name: app.Name}, &app) + app.SetAnnotations(util.MergeMapOverrideWithDst(app.GetAnnotations(), + map[string]string{oam.AnnotationRollingComponent: app.Spec.Components[0].Name, + oam.AnnotationAppRollout: strconv.FormatBool(true)})) + return k8sClient.Update(ctx, &app) + }, time.Second*5, time.Millisecond*500).Should(Succeed()) + + VerifyAppConfigTemplated(1) + } + + ApplyTargetApp := func() { + By("Update the application to target spec during rolling") + var targetApp v1alpha2.Application + Expect(readYaml("testdata/rollout/cloneset/app-target.yaml", &targetApp)).Should(BeNil()) + + Eventually( + func() error { + k8sClient.Get(ctx, client.ObjectKey{Namespace: namespace, Name: app.Name}, &app) + app.Spec = targetApp.Spec + return k8sClient.Update(ctx, &app) + }, time.Second*5, time.Millisecond*500).Should(Succeed()) + + VerifyAppConfigTemplated(2) + + By("Remove the application rolling annotation") + Eventually( + func() error { + k8sClient.Get(ctx, client.ObjectKey{Namespace: namespace, Name: app.Name}, &app) + util.RemoveAnnotations(&app, []string{oam.AnnotationAppRollout}) + return k8sClient.Update(ctx, &app) + }, time.Second*5, time.Millisecond*500).Should(Succeed()) + Eventually( func() corev1.ConditionStatus { k8sClient.Get(ctx, client.ObjectKey{Namespace: namespace, Name: app.Status.LatestRevision.Name}, &appConfig2) - return appConfig2.Status.GetCondition(v1alpha1.TypeSynced).Status + return appConfig2.Status.GetCondition(cpv1.TypeSynced).Status }, - time.Second*60, time.Millisecond*500).Should(BeEquivalentTo(corev1.ConditionTrue)) + time.Second*30, time.Millisecond*500).Should(BeEquivalentTo(corev1.ConditionTrue)) + } - By("Wait for AppConfig2 to be templated") - Eventually( - func() v1alpha2.RollingStatus { - k8sClient.Get(ctx, client.ObjectKey{Namespace: namespace, Name: app.Status.LatestRevision.Name}, &appConfig2) - return appConfig2.Status.RollingStatus - }, - time.Second*60, time.Millisecond*500).Should(BeEquivalentTo(v1alpha2.RollingTemplated)) - - By("Get the cloneset workload") - var kc kruise.CloneSet - workloadName := utils.ExtractComponentName(appConfig2.Spec.Components[0].RevisionName) - Expect(k8sClient.Get(ctx, client.ObjectKey{Namespace: namespace, Name: workloadName}, + VerifyCloneSetPaused := func() { + By("Get the cloneset workload and make sure it's paused") + clonesetName = utils.ExtractComponentName(appConfig2.Spec.Components[0].RevisionName) + Expect(k8sClient.Get(ctx, client.ObjectKey{Namespace: namespace, Name: clonesetName}, &kc)).ShouldNot(HaveOccurred()) Expect(kc.Spec.UpdateStrategy.Paused).Should(BeTrue()) + } - By("Apply the application rollout that stops after two batches") - var appRollout v1alpha2.AppRollout - Expect(readYaml("testdata/rollout/app-deploy-pause.yaml", &appRollout)).Should(BeNil()) - appRollout.Namespace = namespace - Expect(k8sClient.Create(ctx, &appRollout)).Should(Succeed()) + VerifyRolloutOwnsCloneset := func() { + By("VerifySpec that rollout controller owns the cloneset") + Eventually( + func() string { + k8sClient.Get(ctx, client.ObjectKey{Namespace: namespace, Name: clonesetName}, &kc) + clonesetOwner := metav1.GetControllerOf(&kc) + if clonesetOwner == nil { + return "" + } + return clonesetOwner.Kind + }, time.Second*10, time.Second).Should(BeEquivalentTo(v1alpha2.AppRolloutKind)) + clonesetOwner := metav1.GetControllerOf(&kc) + Expect(clonesetOwner.APIVersion).Should(BeEquivalentTo(v1alpha2.SchemeGroupVersion.String())) + } - By("Wait for the rollout phase change to rolling in batches") + VerifyRolloutSucceeded := func() { + By("Wait for the rollout phase change to succeed") Eventually( func() oamstd.RollingState { k8sClient.Get(ctx, client.ObjectKey{Namespace: namespace, Name: appRollout.Name}, &appRollout) return appRollout.Status.RollingState }, + time.Second*240, time.Second).Should(Equal(oamstd.RolloutSucceedState)) + } + + VerifyAppConfigRollingCompleted := func(appConfigName string) { + By("Wait for AppConfig2 to resume the control of cloneset") + var clonesetOwner *metav1.OwnerReference + Eventually( + func() string { + k8sClient.Get(ctx, client.ObjectKey{Namespace: namespace, Name: clonesetName}, &kc) + clonesetOwner = metav1.GetControllerOf(&kc) + if clonesetOwner != nil { + return clonesetOwner.Kind + } + return "" + }, + time.Second*5, time.Millisecond*500).Should(BeEquivalentTo(v1alpha2.ApplicationConfigurationKind)) + Expect(clonesetOwner.Name).Should(BeEquivalentTo(appConfig2.Name)) + Expect(kc.Status.UpdatedReplicas).Should(BeEquivalentTo(*kc.Spec.Replicas)) + Expect(kc.Status.UpdatedReadyReplicas).Should(BeEquivalentTo(*kc.Spec.Replicas)) + + By("VerifySpec AppConfig rolling status") + var appConfig v1alpha2.ApplicationConfiguration + + Eventually( + func() v1alpha2.RollingStatus { + k8sClient.Get(ctx, client.ObjectKey{Namespace: namespace, Name: appConfigName}, &appConfig) + return appConfig.Status.RollingStatus + }, + time.Second*30, time.Millisecond*500).Should(BeEquivalentTo(v1alpha2.RollingCompleted)) + } + + VerifyAppConfigInactive := func(appConfigName string) { + By("VerifySpec AppConfig is inactive") + var appConfig v1alpha2.ApplicationConfiguration + + Eventually( + func() v1alpha2.RollingStatus { + k8sClient.Get(ctx, client.ObjectKey{Namespace: namespace, Name: appConfigName}, &appConfig) + return appConfig.Status.RollingStatus + }, + time.Second*30, time.Millisecond*500).Should(BeEquivalentTo(v1alpha2.InactiveAfterRollingCompleted)) + } + + BeforeEach(func() { + By("Start to run a test, clean up previous resources") + namespace = "rolling-e2e-test" // + "-" + strconv.FormatInt(rand.Int63(), 16) + createNamespace(namespace) + }) + + AfterEach(func() { + By("Clean up resources after a test") + k8sClient.Delete(ctx, &appConfig2) + k8sClient.Delete(ctx, &appConfig1) + k8sClient.Delete(ctx, &app) + By(fmt.Sprintf("Delete the entire namespace %s", ns.Name)) + // delete the namespace with all its resources + Expect(k8sClient.Delete(ctx, &ns, client.PropagationPolicy(metav1.DeletePropagationForeground))).Should(BeNil()) + time.Sleep(15 * time.Second) + }) + + PIt("Test cloneset rollout first time (no source)", func() { + CreateClonesetDef() + ApplySourceApp() + MarkSourceAppRolling() + ApplyTargetApp() + VerifyCloneSetPaused() + By("Apply the application rollout go directly to the target") + var newAppRollout v1alpha2.AppRollout + Expect(readYaml("testdata/rollout/cloneset/app-rollout.yaml", &newAppRollout)).Should(BeNil()) + newAppRollout.Namespace = namespace + newAppRollout.Spec.SourceAppRevisionName = "" + newAppRollout.Spec.RolloutPlan.BatchPartition = pointer.Int32Ptr(int32(len(newAppRollout.Spec.RolloutPlan. + RolloutBatches) - 1)) + Expect(k8sClient.Create(ctx, &newAppRollout)).Should(Succeed()) + + By("Wait for the rollout phase change to rolling in batches") + Eventually( + func() oamstd.RollingState { + k8sClient.Get(ctx, client.ObjectKey{Namespace: namespace, Name: newAppRollout.Name}, &appRollout) + return appRollout.Status.RollingState + }, time.Second*60, time.Millisecond*500).Should(BeEquivalentTo(oamstd.RollingInBatchesState)) - By("Wait for rollout to finish two batches") + VerifyRolloutOwnsCloneset() + + VerifyRolloutSucceeded() + + VerifyAppConfigRollingCompleted(appConfig2.Name) + // Clean up + k8sClient.Delete(ctx, &appRollout) + }) + + It("Test cloneset rollout with a manual check", func() { + CreateClonesetDef() + ApplySourceApp() + MarkSourceAppRolling() + ApplyTargetApp() + VerifyCloneSetPaused() + By("Apply the application rollout that stops after the first batche") + var newAppRollout v1alpha2.AppRollout + Expect(readYaml("testdata/rollout/cloneset/app-rollout.yaml", &newAppRollout)).Should(BeNil()) + newAppRollout.Namespace = namespace + batchPartition := 0 + newAppRollout.Spec.RolloutPlan.BatchPartition = pointer.Int32Ptr(int32(batchPartition)) + Expect(k8sClient.Create(ctx, &newAppRollout)).Should(Succeed()) + + By("Wait for the rollout phase change to rolling in batches") + Eventually( + func() oamstd.RollingState { + k8sClient.Get(ctx, client.ObjectKey{Namespace: namespace, Name: newAppRollout.Name}, &appRollout) + return appRollout.Status.RollingState + }, + time.Second*60, time.Millisecond*500).Should(BeEquivalentTo(oamstd.RollingInBatchesState)) + + By("Wait for rollout to finish one batch") Eventually( func() int32 { k8sClient.Get(ctx, client.ObjectKey{Namespace: namespace, Name: appRollout.Name}, &appRollout) return appRollout.Status.CurrentBatch }, - time.Second*60, time.Millisecond*500).Should(BeEquivalentTo(1)) + time.Second*15, time.Millisecond*500).Should(BeEquivalentTo(batchPartition)) - By("Verify that the rollout stops at two batches") + By("VerifySpec that the rollout stops at the first batch") // wait for the batch to be ready Eventually( func() oamstd.BatchRollingState { k8sClient.Get(ctx, client.ObjectKey{Namespace: namespace, Name: appRollout.Name}, &appRollout) return appRollout.Status.BatchRollingState }, - time.Second*60, time.Millisecond*500).Should(Equal(oamstd.BatchReadyState)) - // wait for 30 seconds, it should still be at 1 - time.Sleep(30 * time.Second) + time.Second*30, time.Millisecond*500).Should(Equal(oamstd.BatchReadyState)) + // wait for 15 seconds, it should stop at 1 + time.Sleep(15 * time.Second) k8sClient.Get(ctx, client.ObjectKey{Namespace: namespace, Name: appRollout.Name}, &appRollout) - Expect(appRollout.Status.CurrentBatch).Should(BeEquivalentTo(1)) + Expect(appRollout.Status.RollingState).Should(BeEquivalentTo(oamstd.RollingInBatchesState)) Expect(appRollout.Status.BatchRollingState).Should(BeEquivalentTo(oamstd.BatchReadyState)) + Expect(appRollout.Status.CurrentBatch).Should(BeEquivalentTo(batchPartition)) - Expect(k8sClient.Get(ctx, client.ObjectKey{Namespace: namespace, Name: workloadName}, - &kc)).ShouldNot(HaveOccurred()) - Expect(kc.Status.UpdatedReplicas).Should(BeEquivalentTo(3)) - Expect(kc.Status.UpdatedReadyReplicas).Should(BeEquivalentTo(3)) + VerifyRolloutOwnsCloneset() By("Finish the application rollout") - Expect(readYaml("testdata/rollout/app-deploy-finish.yaml", &appRollout)).Should(BeNil()) - appRollout.Namespace = namespace + // set the partition as the same size as the array + appRollout.Spec.RolloutPlan.BatchPartition = pointer.Int32Ptr(int32(len(appRollout.Spec.RolloutPlan. + RolloutBatches) - 1)) Expect(k8sClient.Update(ctx, &appRollout)).Should(Succeed()) - By("Wait for the rollout phase change to succeeded") - Eventually( - func() oamstd.RollingState { - k8sClient.Get(ctx, client.ObjectKey{Namespace: namespace, Name: appRollout.Name}, &appRollout) - return appRollout.Status.RollingState - }, - time.Second*60, time.Millisecond*500).Should(Equal(oamstd.RolloutSucceedState)) + VerifyRolloutSucceeded() + + VerifyAppConfigRollingCompleted(appConfig2.Name) + + VerifyAppConfigInactive(appConfig1.Name) + + // Clean up + k8sClient.Delete(ctx, &appRollout) + }) + + PIt("Test pause and modify rollout plan after rolling succeeded", func() { + CreateClonesetDef() + ApplySourceApp() + MarkSourceAppRolling() + ApplyTargetApp() + VerifyCloneSetPaused() + By("Apply the application rollout that stops after two batches") + var newAppRollout v1alpha2.AppRollout + Expect(readYaml("testdata/rollout/cloneset/app-rollout.yaml", &newAppRollout)).Should(BeNil()) + newAppRollout.Namespace = namespace + batchPartition := 0 + newAppRollout.Spec.RolloutPlan.BatchPartition = pointer.Int32Ptr(int32(batchPartition)) + Expect(k8sClient.Create(ctx, &newAppRollout)).Should(Succeed()) + + By("Wait for the rollout phase change to rolling in batches") + Eventually( + func() oamstd.RollingState { + k8sClient.Get(ctx, client.ObjectKey{Namespace: namespace, Name: newAppRollout.Name}, &appRollout) + return appRollout.Status.RollingState + }, + time.Second*60, time.Millisecond*500).Should(BeEquivalentTo(oamstd.RollingInBatchesState)) + + By("Pause the rollout") + Eventually( + func() error { + k8sClient.Get(ctx, client.ObjectKey{Namespace: namespace, Name: appRollout.Name}, &appRollout) + appRollout.Spec.RolloutPlan.Paused = true + err := k8sClient.Update(ctx, &appRollout) + return err + }, + time.Second*5, time.Millisecond*500).ShouldNot(HaveOccurred()) + Eventually( + func() int32 { + k8sClient.Get(ctx, client.ObjectKey{Namespace: namespace, Name: appRollout.Name}, &appRollout) + return appRollout.Status.CurrentBatch + }, + time.Second*15, time.Millisecond*500).Should(BeEquivalentTo(batchPartition)) + + By("VerifySpec that the rollout stops at the first batch") + // wait for the batch to be ready + Eventually( + func() corev1.ConditionStatus { + k8sClient.Get(ctx, client.ObjectKey{Namespace: namespace, Name: appRollout.Name}, &appRollout) + return appRollout.Status.GetCondition(oamstd.BatchPaused).Status + }, + time.Second*30, time.Millisecond*500).Should(Equal(corev1.ConditionTrue)) + // wait for 15 seconds, it should stop at 1 + time.Sleep(15 * time.Second) + k8sClient.Get(ctx, client.ObjectKey{Namespace: namespace, Name: appRollout.Name}, &appRollout) + Expect(appRollout.Status.RollingState).Should(BeEquivalentTo(oamstd.RollingInBatchesState)) + Expect(appRollout.Status.CurrentBatch).Should(BeEquivalentTo(batchPartition)) + k8sClient.Get(ctx, client.ObjectKey{Namespace: namespace, Name: appRollout.Name}, &appRollout) + lt := appRollout.Status.GetCondition(oamstd.BatchPaused).LastTransitionTime + beforeSleep := metav1.Time{ + Time: time.Now().Add(-15 * time.Second), + } + Expect((<).Before(&beforeSleep)).Should(BeTrue()) + + VerifyRolloutOwnsCloneset() + + By("Finish the application rollout") + // set the partition as the same size as the array + appRollout.Spec.RolloutPlan.Paused = false + appRollout.Spec.RolloutPlan.BatchPartition = pointer.Int32Ptr(int32(len(appRollout.Spec.RolloutPlan. + RolloutBatches) - 1)) + Expect(k8sClient.Update(ctx, &appRollout)).Should(Succeed()) + + VerifyRolloutSucceeded() + // record the transition time + k8sClient.Get(ctx, client.ObjectKey{Namespace: namespace, Name: appRollout.Name}, &appRollout) + lt = appRollout.Status.GetCondition(oamstd.RolloutSucceed).LastTransitionTime + + // move the batch partition back to 1 to see if it will roll again + appRollout.Spec.RolloutPlan.BatchPartition = pointer.Int32Ptr(0) + Expect(k8sClient.Update(ctx, &appRollout)).Should(Succeed()) + + // nothing should happen, the transition time should be the same + VerifyRolloutSucceeded() + k8sClient.Get(ctx, client.ObjectKey{Namespace: namespace, Name: appRollout.Name}, &appRollout) + Expect(appRollout.Status.RollingState).Should(BeEquivalentTo(oamstd.RolloutSucceedState)) + Expect(appRollout.Status.GetCondition(oamstd.RolloutSucceed).LastTransitionTime).Should(BeEquivalentTo(lt)) + + // Clean up + k8sClient.Delete(ctx, &appRollout) + }) + + PIt("Test rolling back after a successful rollout", func() { + CreateClonesetDef() + ApplySourceApp() + MarkSourceAppRolling() + ApplyTargetApp() + VerifyCloneSetPaused() + By("Apply the application rollout") + var newAppRollout v1alpha2.AppRollout + Expect(readYaml("testdata/rollout/cloneset/app-rollout.yaml", &newAppRollout)).Should(BeNil()) + newAppRollout.Namespace = namespace + newAppRollout.Spec.RolloutPlan.BatchPartition = pointer.Int32Ptr(int32(len(newAppRollout.Spec.RolloutPlan. + RolloutBatches) - 1)) + Expect(k8sClient.Create(ctx, &newAppRollout)).Should(Succeed()) + By("Wait for the rollout phase change to rolling in batches") + Eventually( + func() oamstd.RollingState { + k8sClient.Get(ctx, client.ObjectKey{Namespace: namespace, Name: newAppRollout.Name}, &appRollout) + return appRollout.Status.RollingState + }, + time.Second*60, time.Millisecond*500).Should(BeEquivalentTo(oamstd.RollingInBatchesState)) + VerifyRolloutOwnsCloneset() + + VerifyRolloutSucceeded() + + VerifyAppConfigRollingCompleted(appConfig2.Name) + + VerifyAppConfigInactive(appConfig1.Name) + + By("Revert the change by first marking the application as rolling") + var appConfig3 v1alpha2.ApplicationConfiguration + k8sClient.Get(ctx, client.ObjectKey{Namespace: namespace, Name: app.Name}, &app) + app.SetAnnotations(util.MergeMapOverrideWithDst(app.GetAnnotations(), + map[string]string{oam.AnnotationRollingComponent: app.Spec.Components[0].Name, + oam.AnnotationAppRollout: strconv.FormatBool(true)})) + Expect(k8sClient.Update(ctx, &app)).Should(Succeed()) + By("Wait for AppConfig2 to be templated") + Eventually( + func() v1alpha2.RollingStatus { + k8sClient.Get(ctx, client.ObjectKey{Namespace: namespace, Name: app.Status.LatestRevision.Name}, &appConfig2) + return appConfig2.Status.RollingStatus + }, + time.Second*30, time.Millisecond*500).Should(BeEquivalentTo(v1alpha2.RollingTemplated)) + By("Revert the application back to source") + var sourceApp v1alpha2.Application + Expect(readYaml("testdata/rollout/cloneset/app-source.yaml", &sourceApp)).Should(BeNil()) + sourceApp.SetAnnotations(util.MergeMapOverrideWithDst(app.GetAnnotations(), + map[string]string{oam.AnnotationRollingComponent: app.Spec.Components[0].Name, + oam.AnnotationAppRollout: strconv.FormatBool(true)})) + Eventually( + func() error { + k8sClient.Get(ctx, client.ObjectKey{Namespace: namespace, Name: app.Name}, &app) + app.Spec = sourceApp.Spec + return k8sClient.Update(ctx, &app) + }, + time.Second*60, time.Millisecond*500).Should(Succeed()) + By("Wait for AppConfig3 to be templated") + Eventually( + func() v1alpha2.RollingStatus { + k8sClient.Get(ctx, client.ObjectKey{Namespace: namespace, Name: app.Status.LatestRevision.Name}, &appConfig3) + return appConfig3.Status.RollingStatus + }, + time.Second*30, time.Millisecond*500).Should(BeEquivalentTo(v1alpha2.RollingTemplated)) + By("Modify the application rollout with new target and source") + Eventually( + func() error { + k8sClient.Get(ctx, client.ObjectKey{Namespace: namespace, Name: appRollout.Name}, &appRollout) + appRollout.Spec.SourceAppRevisionName = appConfig2.Name + appRollout.Spec.TargetAppRevisionName = appConfig3.Name + return k8sClient.Update(ctx, &appRollout) + }, + time.Second*5, time.Millisecond*500).Should(Succeed()) + + VerifyRolloutOwnsCloneset() + + VerifyRolloutSucceeded() + + By("VerifySpec AppConfig rolling status") + Eventually( + func() v1alpha2.RollingStatus { + k8sClient.Get(ctx, client.ObjectKey{Namespace: namespace, Name: appConfig2.Name}, &appConfig2) + return appConfig2.Status.RollingStatus + }, + time.Second*30, time.Millisecond*500).Should(BeEquivalentTo(v1alpha2.InactiveAfterRollingCompleted)) + + Eventually( + func() v1alpha2.RollingStatus { + k8sClient.Get(ctx, client.ObjectKey{Namespace: namespace, Name: appConfig2.Name}, &appConfig3) + return appConfig3.Status.RollingStatus + }, + time.Second*30, time.Millisecond*500).Should(BeEquivalentTo(v1alpha2.RollingCompleted)) + + // Clean up + k8sClient.Delete(ctx, &appRollout) + k8sClient.Delete(ctx, &appConfig3) + }) + + PIt("Test rolling back after a failed rollout", func() { + CreateClonesetDef() + ApplySourceApp() + MarkSourceAppRolling() + ApplyTargetApp() + VerifyCloneSetPaused() + By("Apply the application rollout that stops after the first batche") + var newAppRollout v1alpha2.AppRollout + Expect(readYaml("testdata/rollout/cloneset/app-rollout.yaml", &newAppRollout)).Should(BeNil()) + newAppRollout.Namespace = namespace + batchPartition := 1 + newAppRollout.Spec.RolloutPlan.BatchPartition = pointer.Int32Ptr(int32(batchPartition)) + Expect(k8sClient.Create(ctx, &newAppRollout)).Should(Succeed()) + + By("Wait for the rollout phase change to rolling in batches") + Eventually( + func() oamstd.RollingState { + k8sClient.Get(ctx, client.ObjectKey{Namespace: namespace, Name: newAppRollout.Name}, &appRollout) + return appRollout.Status.RollingState + }, + time.Second*60, time.Millisecond*500).Should(BeEquivalentTo(oamstd.RollingInBatchesState)) + + By("Wait for rollout to finish the batches") + Eventually( + func() int32 { + k8sClient.Get(ctx, client.ObjectKey{Namespace: namespace, Name: appRollout.Name}, &appRollout) + return appRollout.Status.CurrentBatch + }, + time.Second*15, time.Millisecond*500).Should(BeEquivalentTo(batchPartition)) + + By("VerifySpec that the rollout stops") + // wait for the batch to be ready + Eventually( + func() oamstd.BatchRollingState { + k8sClient.Get(ctx, client.ObjectKey{Namespace: namespace, Name: appRollout.Name}, &appRollout) + return appRollout.Status.BatchRollingState + }, + time.Second*30, time.Millisecond*500).Should(Equal(oamstd.BatchReadyState)) + + By("Move back the partition to cause the rollout to fail") + Eventually( + func() error { + k8sClient.Get(ctx, client.ObjectKey{Namespace: namespace, Name: appRollout.Name}, &appRollout) + appRollout.Spec.RolloutPlan.BatchPartition = pointer.Int32Ptr(0) + return k8sClient.Update(ctx, &newAppRollout) + }, + time.Second*3, time.Millisecond*500).Should(Succeed()) + + By("Wait for the rollout phase change to fail") + Eventually( + func() oamstd.RollingState { + k8sClient.Get(ctx, client.ObjectKey{Namespace: namespace, Name: newAppRollout.Name}, &appRollout) + return appRollout.Status.RollingState + }, + time.Second*5, time.Millisecond*500).Should(BeEquivalentTo(oamstd.RolloutFailedState)) + + // Clean up + k8sClient.Delete(ctx, &appRollout) + }) + + PIt("Test rolling by changing the definition", func() { + CreateClonesetDef() + ApplySourceApp() + MarkSourceAppRolling() + By("Apply the definition change") + var cd, newCD v1alpha2.WorkloadDefinition + Expect(readYaml("testdata/rollout/cloneset/clonesetDefinitionModified.yaml.yaml", &newCD)).Should(BeNil()) + Eventually( + func() error { + k8sClient.Get(ctx, client.ObjectKey{Namespace: namespace, Name: newCD.Name}, &cd) + cd.Spec = newCD.Spec + return k8sClient.Update(ctx, &cd) + }, + time.Second*3, time.Millisecond*300).Should(Succeed()) + VerifyAppConfigTemplated(2) + By("Apply the application rollout") + var newAppRollout v1alpha2.AppRollout + Expect(readYaml("testdata/rollout/cloneset/app-rollout.yaml", &newAppRollout)).Should(BeNil()) + newAppRollout.Namespace = namespace + newAppRollout.Spec.RolloutPlan.BatchPartition = pointer.Int32Ptr(int32(len(newAppRollout.Spec.RolloutPlan. + RolloutBatches) - 1)) + Expect(k8sClient.Create(ctx, &newAppRollout)).Should(Succeed()) + + VerifyRolloutOwnsCloneset() + + VerifyRolloutSucceeded() + + VerifyAppConfigRollingCompleted(appConfig2.Name) + + VerifyAppConfigInactive(appConfig1.Name) - By("Wait for rollout to finish two batches") - Expect(k8sClient.Get(ctx, client.ObjectKey{Namespace: namespace, Name: workloadName}, - &kc)).ShouldNot(HaveOccurred()) - Expect(kc.Status.UpdatedReplicas).Should(BeEquivalentTo(5)) - Expect(kc.Status.UpdatedReadyReplicas).Should(BeEquivalentTo(5)) // Clean up k8sClient.Delete(ctx, &appRollout) - k8sClient.Delete(ctx, &appConfig2) - k8sClient.Delete(ctx, &appConfig1) - k8sClient.Delete(ctx, &app) }) }) diff --git a/test/e2e-test/suite_test.go b/test/e2e-test/suite_test.go index d28b1231c..469ea4b8e 100644 --- a/test/e2e-test/suite_test.go +++ b/test/e2e-test/suite_test.go @@ -96,6 +96,16 @@ var _ = BeforeSuite(func(done Done) { logf.Log.Error(err, "failed to create k8sClient") Fail("setup failed") } + + // TODO: Remove this after we get rid of the integration test dir + By("Applying CRD of WorkloadDefinition and TraitDefinition") + var workloadDefinitionCRD crdv1.CustomResourceDefinition + Expect(readYaml("../../charts/vela-core/crds/core.oam.dev_workloaddefinitions.yaml", &workloadDefinitionCRD)).Should(BeNil()) + Expect(k8sClient.Create(context.Background(), &workloadDefinitionCRD)).Should(SatisfyAny(BeNil(), &util.AlreadyExistMatcher{})) + + var traitDefinitionCRD crdv1.CustomResourceDefinition + Expect(readYaml("../../charts/vela-core/crds/core.oam.dev_traitdefinitions.yaml", &traitDefinitionCRD)).Should(BeNil()) + Expect(k8sClient.Create(context.Background(), &traitDefinitionCRD)).Should(SatisfyAny(BeNil(), &util.AlreadyExistMatcher{})) By("Finished setting up test environment") // Create manual scaler trait definition @@ -217,7 +227,7 @@ var _ = BeforeSuite(func(done Done) { Name: "cluster-admin", }, } - Expect(k8sClient.Create(context.Background(), &adminRoleBinding)).Should(BeNil()) + Expect(k8sClient.Create(context.Background(), &adminRoleBinding)).Should(SatisfyAny(BeNil(), &util.AlreadyExistMatcher{})) By("Created cluster role binding for the test service account") crd = crdv1.CustomResourceDefinition{ @@ -291,6 +301,9 @@ var _ = AfterSuite(func() { Expect(k8sClient.Delete(context.Background(), &crd)).Should(BeNil()) By("Deleted the custom resource definition") + // TODO: Remove this after we get rid of the integration test dir + // Below is a CI hack so that the integration test can run. We need to migrate the integration test + // to this e2e dir and suite (https://github.com/oam-dev/kubevela/issues/1147) By("Deleting all the definitions by deleting the definition CRDs") crd = crdv1.CustomResourceDefinition{ ObjectMeta: metav1.ObjectMeta{ diff --git a/test/e2e-test/testdata/rollout/app-deploy-finish.yaml b/test/e2e-test/testdata/rollout/cloneset/app-rollout.yaml similarity index 74% rename from test/e2e-test/testdata/rollout/app-deploy-finish.yaml rename to test/e2e-test/testdata/rollout/cloneset/app-rollout.yaml index adcf0bd33..c5b23d089 100644 --- a/test/e2e-test/testdata/rollout/app-deploy-finish.yaml +++ b/test/e2e-test/testdata/rollout/cloneset/app-rollout.yaml @@ -1,11 +1,11 @@ apiVersion: core.oam.dev/v1alpha2 -kind: ApplicationDeployment +kind: AppRollout metadata: name: rolling-e2e-test spec: # application (revision) reference - targetApplicationName: test-e2e-rolling-v2 - sourceApplicationName: test-e2e-rolling-v1 + targetAppRevisionName: test-e2e-rolling-v2 + sourceAppRevisionName: test-e2e-rolling-v1 # HPA reference (optional) componentList: - metrics-provider diff --git a/test/e2e-test/testdata/rollout/app-source.yaml b/test/e2e-test/testdata/rollout/cloneset/app-source.yaml similarity index 87% rename from test/e2e-test/testdata/rollout/app-source.yaml rename to test/e2e-test/testdata/rollout/cloneset/app-source.yaml index 16c35f77a..c49cd01e8 100644 --- a/test/e2e-test/testdata/rollout/app-source.yaml +++ b/test/e2e-test/testdata/rollout/cloneset/app-source.yaml @@ -10,6 +10,6 @@ spec: cmd: - ./podinfo - stress-cpu=1 - image: stefanprodan/podinfo:4.0.6 + image: stefanprodan/podinfo:4.0.3 port: 8080 updateStrategyType: InPlaceIfPossible \ No newline at end of file diff --git a/test/e2e-test/testdata/rollout/app-source-prep.yaml b/test/e2e-test/testdata/rollout/cloneset/app-target.yaml similarity index 100% rename from test/e2e-test/testdata/rollout/app-source-prep.yaml rename to test/e2e-test/testdata/rollout/cloneset/app-target.yaml diff --git a/test/e2e-test/testdata/rollout/clonesetDefinition.yaml b/test/e2e-test/testdata/rollout/cloneset/clonesetDefinition.yaml similarity index 100% rename from test/e2e-test/testdata/rollout/clonesetDefinition.yaml rename to test/e2e-test/testdata/rollout/cloneset/clonesetDefinition.yaml diff --git a/test/e2e-test/testdata/rollout/cloneset/clonesetDefinitionModified.yaml b/test/e2e-test/testdata/rollout/cloneset/clonesetDefinitionModified.yaml new file mode 100644 index 000000000..a374b34c2 --- /dev/null +++ b/test/e2e-test/testdata/rollout/cloneset/clonesetDefinitionModified.yaml @@ -0,0 +1,106 @@ +# Code generated by KubeVela templates. DO NOT EDIT. +apiVersion: core.oam.dev/v1alpha2 +kind: WorkloadDefinition +metadata: + name: clonesetservice + namespace: vela-system + annotations: + definition.oam.dev/description: "Describes long-running, scalable, containerized services that have a stable network endpoint to receive external network traffic from customers. + If workload type is skipped for any service defined in Appfile, it will be defaulted to `webservice` type." +spec: + definitionRef: + name: clonesets.apps.kruise.io + schematic: + cue: + template: | + output: { + apiVersion: "apps.kruise.io/v1alpha1" + kind: "CloneSet" + metadata: labels: { + "app.oam.dev/component": context.name + } + spec: { + replicas: parameter.replicas + selector: matchLabels: { + "app.oam.dev/component": context.name + } + + template: { + metadata: labels: { + "app.oam.dev/component": context.name + } + + spec: { + containers: [{ + name: context.name + image: "stefanprodan/podinfo:5.0.2" + + if parameter["cmd"] != _|_ { + command: parameter.cmd + } + + if parameter["env"] != _|_ { + env: parameter.env + } + + if context["config"] != _|_ { + env: context.config + } + + ports: [{ + containerPort: parameter.port + }] + + if parameter["cpu"] != _|_ { + resources: { + limits: + cpu: parameter.cpu + requests: + cpu: parameter.cpu + } + } + }] + } + } + if parameter["updateStrategyType"] != _|_ { + updateStrategy: { + type: parameter.updateStrategyType + } + } + } + } + parameter: { + // +usage=Which image would you like to use for your service + // +short=i + image: string + + // +usage=Commands to run in the container + cmd?: [...string] + + // +usage=Which port do you want customer traffic sent to + // +short=p + port: *80 | int + // +usage=Define arguments by using environment variables + env?: [...{ + // +usage=Environment variable name + name: string + // +usage=The value of the environment variable + value?: string + // +usage=Specifies a source the value of this var should come from + valueFrom?: { + // +usage=Selects a key of a secret in the pod's namespace + secretKeyRef: { + // +usage=The name of the secret in the pod's namespace to select from + name: string + // +usage=The key of the secret to select from. Must be a valid secret key + key: string + } + } + }] + // +usage=Number of CPU units for the service, like `0.5` (0.5 CPU core), `1` (1 CPU core) + cpu?: string + // +usage=Cloneset updateStrategy, candidates are `ReCreate`/`InPlaceIfPossible`/`InPlaceOnly` + updateStrategyType?: string + // +usage=Number of pods in the cloneset + replicas: *5 | int + } \ No newline at end of file diff --git a/test/integration/appconfig_test.go b/test/integration/appconfig_test.go index e7c21b0af..b23c19dea 100644 --- a/test/integration/appconfig_test.go +++ b/test/integration/appconfig_test.go @@ -24,6 +24,8 @@ import ( "testing" "time" + "github.com/crossplane/crossplane-runtime/pkg/logging" + "github.com/crossplane/crossplane-runtime/pkg/test/integration" corev1 "k8s.io/api/core/v1" apiextensionsv1beta1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1" kerrors "k8s.io/apimachinery/pkg/api/errors" @@ -34,9 +36,6 @@ import ( "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/log/zap" - "github.com/crossplane/crossplane-runtime/pkg/logging" - "github.com/crossplane/crossplane-runtime/pkg/test/integration" - coreoamdev "github.com/oam-dev/kubevela/apis/core.oam.dev" "github.com/oam-dev/kubevela/apis/core.oam.dev/v1alpha2" controller "github.com/oam-dev/kubevela/pkg/controller/core.oam.dev"