rename appRollout and add more fine tune tests (#1166)

* rename appRollout and add more fine tune tests

* improve rollout

* do not advance AC if we are not rolling

* fix e2e test bug

* fix the test
This commit is contained in:
Ryan Zhang
2021-03-10 23:56:38 -08:00
committed by GitHub
parent 66c111be6c
commit 1048b399fd
55 changed files with 1368 additions and 1157 deletions

View File

@@ -2,43 +2,49 @@ package controllers_test
import (
"context"
"math/rand"
"fmt"
"strconv"
"time"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"github.com/crossplane/crossplane-runtime/apis/core/v1alpha1"
cpv1 "github.com/crossplane/crossplane-runtime/apis/core/v1alpha1"
kruise "github.com/openkruise/kruise-api/apps/v1alpha1"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/utils/pointer"
"sigs.k8s.io/controller-runtime/pkg/client"
logf "sigs.k8s.io/controller-runtime/pkg/log"
"github.com/oam-dev/kubevela/apis/core.oam.dev/v1alpha2"
oamstd "github.com/oam-dev/kubevela/apis/standard.oam.dev/v1alpha1"
"github.com/oam-dev/kubevela/pkg/controller/utils"
"github.com/oam-dev/kubevela/pkg/oam"
"github.com/oam-dev/kubevela/pkg/oam/util"
)
var _ = Describe("Test Rolling out Application", func() {
var _ = Describe("Cloneset based rollout tests", func() {
ctx := context.Background()
namespace := "rolling"
var namespace, clonesetName string
var ns corev1.Namespace
var app v1alpha2.Application
var appConfig1, appConfig2 v1alpha2.ApplicationConfiguration
var kc kruise.CloneSet
var appRollout v1alpha2.AppRollout
BeforeEach(func() {
logf.Log.Info("Start to run a test, clean up previous resources")
namespace = string(strconv.AppendInt([]byte(namespace), rand.Int63(), 16))
createNamespace := func(namespace string) {
ns = corev1.Namespace{
ObjectMeta: metav1.ObjectMeta{
Name: namespace,
},
}
// delete the namespace with all its resources
Expect(k8sClient.Delete(ctx, &ns, client.PropagationPolicy(metav1.DeletePropagationForeground))).
Should(SatisfyAny(BeNil(), &util.NotFoundMatcher{}))
logf.Log.Info("make sure all the resources are removed")
Eventually(
func() error {
return k8sClient.Delete(ctx, &ns, client.PropagationPolicy(metav1.DeletePropagationForeground))
},
time.Second*120, time.Millisecond*500).Should(SatisfyAny(BeNil(), &util.NotFoundMatcher{}))
By("make sure all the resources are removed")
objectKey := client.ObjectKey{
Name: namespace,
}
@@ -53,155 +59,574 @@ var _ = Describe("Test Rolling out Application", func() {
return k8sClient.Create(ctx, &ns)
},
time.Second*3, time.Millisecond*300).Should(SatisfyAny(BeNil(), &util.AlreadyExistMatcher{}))
}
CreateClonesetDef := func() {
By("Install CloneSet based workloadDefinition")
var cd v1alpha2.WorkloadDefinition
Expect(readYaml("testdata/rollout/clonesetDefinition.yaml", &cd)).Should(BeNil())
Expect(readYaml("testdata/rollout/cloneset/clonesetDefinition.yaml", &cd)).Should(BeNil())
// create the workloadDefinition if not exist
Eventually(
func() error {
return k8sClient.Create(ctx, &cd)
},
time.Second*3, time.Millisecond*300).Should(SatisfyAny(BeNil(), &util.AlreadyExistMatcher{}))
})
}
AfterEach(func() {
logf.Log.Info("Clean up resources")
// delete the namespace with all its resources
Expect(k8sClient.Delete(ctx, &ns, client.PropagationPolicy(metav1.DeletePropagationForeground))).Should(BeNil())
})
It("Basic cloneset rollout", func() {
By("Apply an application")
var app v1alpha2.Application
Expect(readYaml("testdata/rollout/app-source.yaml", &app)).Should(BeNil())
app.Namespace = namespace
Expect(k8sClient.Create(ctx, &app)).Should(Succeed())
By("Get Application latest status after AppConfig created")
Eventually(
func() *v1alpha2.Revision {
k8sClient.Get(ctx, client.ObjectKey{Namespace: namespace, Name: app.Name}, &app)
return app.Status.LatestRevision
},
time.Second*30, time.Millisecond*500).ShouldNot(BeNil())
By("Wait for AppConfig1 synced")
var appConfig1 v1alpha2.ApplicationConfiguration
Eventually(
func() corev1.ConditionStatus {
k8sClient.Get(ctx, client.ObjectKey{Namespace: namespace, Name: app.Status.LatestRevision.Name}, &appConfig1)
return appConfig1.Status.GetCondition(v1alpha1.TypeSynced).Status
},
time.Second*30, time.Millisecond*500).Should(BeEquivalentTo(corev1.ConditionTrue))
By("Mark the application as rolling")
Expect(readYaml("testdata/rollout/app-source-prep.yaml", &app)).Should(BeNil())
app.Namespace = namespace
Expect(k8sClient.Update(ctx, &app)).Should(Succeed())
By("Wait for AppConfig1 to be templated")
Eventually(
func() v1alpha2.RollingStatus {
k8sClient.Get(ctx, client.ObjectKey{Namespace: namespace, Name: app.Status.LatestRevision.Name}, &appConfig1)
return appConfig1.Status.RollingStatus
},
time.Second*60, time.Millisecond*500).Should(BeEquivalentTo(v1alpha2.RollingTemplated))
By("Update the application during rolling")
Expect(readYaml("testdata/rollout/app-target.yaml", &app)).Should(BeNil())
app.Namespace = namespace
Expect(k8sClient.Update(ctx, &app)).Should(Succeed())
VerifyAppConfigTemplated := func(revision int64) {
var appConfigName string
By("Get Application latest status after AppConfig created")
Eventually(
func() int64 {
k8sClient.Get(ctx, client.ObjectKey{Namespace: namespace, Name: app.Name}, &app)
return app.Status.LatestRevision.Revision
},
time.Second*10, time.Millisecond*500).ShouldNot(BeEquivalentTo(1))
By("Wait for AppConfig2 synced")
var appConfig2 v1alpha2.ApplicationConfiguration
time.Second*30, time.Millisecond*500).Should(BeEquivalentTo(revision))
appConfigName = app.Status.LatestRevision.Name
By(fmt.Sprintf("Wait for AppConfig %s synced", appConfigName))
var appConfig v1alpha2.ApplicationConfiguration
Eventually(
func() corev1.ConditionStatus {
k8sClient.Get(ctx, client.ObjectKey{Namespace: namespace, Name: appConfigName}, &appConfig)
return appConfig.Status.GetCondition(cpv1.TypeSynced).Status
},
time.Second*30, time.Millisecond*500).Should(BeEquivalentTo(corev1.ConditionTrue))
By(fmt.Sprintf("Wait for AppConfig %s to be templated", appConfigName))
Eventually(
func() v1alpha2.RollingStatus {
k8sClient.Get(ctx, client.ObjectKey{Namespace: namespace, Name: appConfigName}, &appConfig)
return appConfig.Status.RollingStatus
},
time.Second*60, time.Millisecond*500).Should(BeEquivalentTo(v1alpha2.RollingTemplated))
}
ApplySourceApp := func() {
By("Apply an application")
var newApp v1alpha2.Application
Expect(readYaml("testdata/rollout/cloneset/app-source.yaml", &newApp)).Should(BeNil())
newApp.Namespace = namespace
Expect(k8sClient.Create(ctx, &newApp)).Should(Succeed())
By("Get Application latest status after AppConfig created")
Eventually(
func() *v1alpha2.Revision {
k8sClient.Get(ctx, client.ObjectKey{Namespace: namespace, Name: newApp.Name}, &app)
return app.Status.LatestRevision
},
time.Second*30, time.Millisecond*500).ShouldNot(BeNil())
By("Wait for AppConfig1 synced")
Eventually(
func() corev1.ConditionStatus {
k8sClient.Get(ctx, client.ObjectKey{Namespace: namespace, Name: app.Status.LatestRevision.Name}, &appConfig1)
return appConfig1.Status.GetCondition(cpv1.TypeSynced).Status
},
time.Second*30, time.Millisecond*500).Should(BeEquivalentTo(corev1.ConditionTrue))
}
MarkSourceAppRolling := func() {
By("Mark the application as rolling")
Eventually(
func() error {
k8sClient.Get(ctx, client.ObjectKey{Namespace: namespace, Name: app.Name}, &app)
app.SetAnnotations(util.MergeMapOverrideWithDst(app.GetAnnotations(),
map[string]string{oam.AnnotationRollingComponent: app.Spec.Components[0].Name,
oam.AnnotationAppRollout: strconv.FormatBool(true)}))
return k8sClient.Update(ctx, &app)
}, time.Second*5, time.Millisecond*500).Should(Succeed())
VerifyAppConfigTemplated(1)
}
ApplyTargetApp := func() {
By("Update the application to target spec during rolling")
var targetApp v1alpha2.Application
Expect(readYaml("testdata/rollout/cloneset/app-target.yaml", &targetApp)).Should(BeNil())
Eventually(
func() error {
k8sClient.Get(ctx, client.ObjectKey{Namespace: namespace, Name: app.Name}, &app)
app.Spec = targetApp.Spec
return k8sClient.Update(ctx, &app)
}, time.Second*5, time.Millisecond*500).Should(Succeed())
VerifyAppConfigTemplated(2)
By("Remove the application rolling annotation")
Eventually(
func() error {
k8sClient.Get(ctx, client.ObjectKey{Namespace: namespace, Name: app.Name}, &app)
util.RemoveAnnotations(&app, []string{oam.AnnotationAppRollout})
return k8sClient.Update(ctx, &app)
}, time.Second*5, time.Millisecond*500).Should(Succeed())
Eventually(
func() corev1.ConditionStatus {
k8sClient.Get(ctx, client.ObjectKey{Namespace: namespace, Name: app.Status.LatestRevision.Name}, &appConfig2)
return appConfig2.Status.GetCondition(v1alpha1.TypeSynced).Status
return appConfig2.Status.GetCondition(cpv1.TypeSynced).Status
},
time.Second*60, time.Millisecond*500).Should(BeEquivalentTo(corev1.ConditionTrue))
time.Second*30, time.Millisecond*500).Should(BeEquivalentTo(corev1.ConditionTrue))
}
By("Wait for AppConfig2 to be templated")
Eventually(
func() v1alpha2.RollingStatus {
k8sClient.Get(ctx, client.ObjectKey{Namespace: namespace, Name: app.Status.LatestRevision.Name}, &appConfig2)
return appConfig2.Status.RollingStatus
},
time.Second*60, time.Millisecond*500).Should(BeEquivalentTo(v1alpha2.RollingTemplated))
By("Get the cloneset workload")
var kc kruise.CloneSet
workloadName := utils.ExtractComponentName(appConfig2.Spec.Components[0].RevisionName)
Expect(k8sClient.Get(ctx, client.ObjectKey{Namespace: namespace, Name: workloadName},
VerifyCloneSetPaused := func() {
By("Get the cloneset workload and make sure it's paused")
clonesetName = utils.ExtractComponentName(appConfig2.Spec.Components[0].RevisionName)
Expect(k8sClient.Get(ctx, client.ObjectKey{Namespace: namespace, Name: clonesetName},
&kc)).ShouldNot(HaveOccurred())
Expect(kc.Spec.UpdateStrategy.Paused).Should(BeTrue())
}
By("Apply the application rollout that stops after two batches")
var appRollout v1alpha2.AppRollout
Expect(readYaml("testdata/rollout/app-deploy-pause.yaml", &appRollout)).Should(BeNil())
appRollout.Namespace = namespace
Expect(k8sClient.Create(ctx, &appRollout)).Should(Succeed())
VerifyRolloutOwnsCloneset := func() {
By("VerifySpec that rollout controller owns the cloneset")
Eventually(
func() string {
k8sClient.Get(ctx, client.ObjectKey{Namespace: namespace, Name: clonesetName}, &kc)
clonesetOwner := metav1.GetControllerOf(&kc)
if clonesetOwner == nil {
return ""
}
return clonesetOwner.Kind
}, time.Second*10, time.Second).Should(BeEquivalentTo(v1alpha2.AppRolloutKind))
clonesetOwner := metav1.GetControllerOf(&kc)
Expect(clonesetOwner.APIVersion).Should(BeEquivalentTo(v1alpha2.SchemeGroupVersion.String()))
}
By("Wait for the rollout phase change to rolling in batches")
VerifyRolloutSucceeded := func() {
By("Wait for the rollout phase change to succeed")
Eventually(
func() oamstd.RollingState {
k8sClient.Get(ctx, client.ObjectKey{Namespace: namespace, Name: appRollout.Name}, &appRollout)
return appRollout.Status.RollingState
},
time.Second*240, time.Second).Should(Equal(oamstd.RolloutSucceedState))
}
VerifyAppConfigRollingCompleted := func(appConfigName string) {
By("Wait for AppConfig2 to resume the control of cloneset")
var clonesetOwner *metav1.OwnerReference
Eventually(
func() string {
k8sClient.Get(ctx, client.ObjectKey{Namespace: namespace, Name: clonesetName}, &kc)
clonesetOwner = metav1.GetControllerOf(&kc)
if clonesetOwner != nil {
return clonesetOwner.Kind
}
return ""
},
time.Second*5, time.Millisecond*500).Should(BeEquivalentTo(v1alpha2.ApplicationConfigurationKind))
Expect(clonesetOwner.Name).Should(BeEquivalentTo(appConfig2.Name))
Expect(kc.Status.UpdatedReplicas).Should(BeEquivalentTo(*kc.Spec.Replicas))
Expect(kc.Status.UpdatedReadyReplicas).Should(BeEquivalentTo(*kc.Spec.Replicas))
By("VerifySpec AppConfig rolling status")
var appConfig v1alpha2.ApplicationConfiguration
Eventually(
func() v1alpha2.RollingStatus {
k8sClient.Get(ctx, client.ObjectKey{Namespace: namespace, Name: appConfigName}, &appConfig)
return appConfig.Status.RollingStatus
},
time.Second*30, time.Millisecond*500).Should(BeEquivalentTo(v1alpha2.RollingCompleted))
}
VerifyAppConfigInactive := func(appConfigName string) {
By("VerifySpec AppConfig is inactive")
var appConfig v1alpha2.ApplicationConfiguration
Eventually(
func() v1alpha2.RollingStatus {
k8sClient.Get(ctx, client.ObjectKey{Namespace: namespace, Name: appConfigName}, &appConfig)
return appConfig.Status.RollingStatus
},
time.Second*30, time.Millisecond*500).Should(BeEquivalentTo(v1alpha2.InactiveAfterRollingCompleted))
}
BeforeEach(func() {
By("Start to run a test, clean up previous resources")
namespace = "rolling-e2e-test" // + "-" + strconv.FormatInt(rand.Int63(), 16)
createNamespace(namespace)
})
AfterEach(func() {
By("Clean up resources after a test")
k8sClient.Delete(ctx, &appConfig2)
k8sClient.Delete(ctx, &appConfig1)
k8sClient.Delete(ctx, &app)
By(fmt.Sprintf("Delete the entire namespace %s", ns.Name))
// delete the namespace with all its resources
Expect(k8sClient.Delete(ctx, &ns, client.PropagationPolicy(metav1.DeletePropagationForeground))).Should(BeNil())
time.Sleep(15 * time.Second)
})
PIt("Test cloneset rollout first time (no source)", func() {
CreateClonesetDef()
ApplySourceApp()
MarkSourceAppRolling()
ApplyTargetApp()
VerifyCloneSetPaused()
By("Apply the application rollout go directly to the target")
var newAppRollout v1alpha2.AppRollout
Expect(readYaml("testdata/rollout/cloneset/app-rollout.yaml", &newAppRollout)).Should(BeNil())
newAppRollout.Namespace = namespace
newAppRollout.Spec.SourceAppRevisionName = ""
newAppRollout.Spec.RolloutPlan.BatchPartition = pointer.Int32Ptr(int32(len(newAppRollout.Spec.RolloutPlan.
RolloutBatches) - 1))
Expect(k8sClient.Create(ctx, &newAppRollout)).Should(Succeed())
By("Wait for the rollout phase change to rolling in batches")
Eventually(
func() oamstd.RollingState {
k8sClient.Get(ctx, client.ObjectKey{Namespace: namespace, Name: newAppRollout.Name}, &appRollout)
return appRollout.Status.RollingState
},
time.Second*60, time.Millisecond*500).Should(BeEquivalentTo(oamstd.RollingInBatchesState))
By("Wait for rollout to finish two batches")
VerifyRolloutOwnsCloneset()
VerifyRolloutSucceeded()
VerifyAppConfigRollingCompleted(appConfig2.Name)
// Clean up
k8sClient.Delete(ctx, &appRollout)
})
It("Test cloneset rollout with a manual check", func() {
CreateClonesetDef()
ApplySourceApp()
MarkSourceAppRolling()
ApplyTargetApp()
VerifyCloneSetPaused()
By("Apply the application rollout that stops after the first batche")
var newAppRollout v1alpha2.AppRollout
Expect(readYaml("testdata/rollout/cloneset/app-rollout.yaml", &newAppRollout)).Should(BeNil())
newAppRollout.Namespace = namespace
batchPartition := 0
newAppRollout.Spec.RolloutPlan.BatchPartition = pointer.Int32Ptr(int32(batchPartition))
Expect(k8sClient.Create(ctx, &newAppRollout)).Should(Succeed())
By("Wait for the rollout phase change to rolling in batches")
Eventually(
func() oamstd.RollingState {
k8sClient.Get(ctx, client.ObjectKey{Namespace: namespace, Name: newAppRollout.Name}, &appRollout)
return appRollout.Status.RollingState
},
time.Second*60, time.Millisecond*500).Should(BeEquivalentTo(oamstd.RollingInBatchesState))
By("Wait for rollout to finish one batch")
Eventually(
func() int32 {
k8sClient.Get(ctx, client.ObjectKey{Namespace: namespace, Name: appRollout.Name}, &appRollout)
return appRollout.Status.CurrentBatch
},
time.Second*60, time.Millisecond*500).Should(BeEquivalentTo(1))
time.Second*15, time.Millisecond*500).Should(BeEquivalentTo(batchPartition))
By("Verify that the rollout stops at two batches")
By("VerifySpec that the rollout stops at the first batch")
// wait for the batch to be ready
Eventually(
func() oamstd.BatchRollingState {
k8sClient.Get(ctx, client.ObjectKey{Namespace: namespace, Name: appRollout.Name}, &appRollout)
return appRollout.Status.BatchRollingState
},
time.Second*60, time.Millisecond*500).Should(Equal(oamstd.BatchReadyState))
// wait for 30 seconds, it should still be at 1
time.Sleep(30 * time.Second)
time.Second*30, time.Millisecond*500).Should(Equal(oamstd.BatchReadyState))
// wait for 15 seconds, it should stop at 1
time.Sleep(15 * time.Second)
k8sClient.Get(ctx, client.ObjectKey{Namespace: namespace, Name: appRollout.Name}, &appRollout)
Expect(appRollout.Status.CurrentBatch).Should(BeEquivalentTo(1))
Expect(appRollout.Status.RollingState).Should(BeEquivalentTo(oamstd.RollingInBatchesState))
Expect(appRollout.Status.BatchRollingState).Should(BeEquivalentTo(oamstd.BatchReadyState))
Expect(appRollout.Status.CurrentBatch).Should(BeEquivalentTo(batchPartition))
Expect(k8sClient.Get(ctx, client.ObjectKey{Namespace: namespace, Name: workloadName},
&kc)).ShouldNot(HaveOccurred())
Expect(kc.Status.UpdatedReplicas).Should(BeEquivalentTo(3))
Expect(kc.Status.UpdatedReadyReplicas).Should(BeEquivalentTo(3))
VerifyRolloutOwnsCloneset()
By("Finish the application rollout")
Expect(readYaml("testdata/rollout/app-deploy-finish.yaml", &appRollout)).Should(BeNil())
appRollout.Namespace = namespace
// set the partition as the same size as the array
appRollout.Spec.RolloutPlan.BatchPartition = pointer.Int32Ptr(int32(len(appRollout.Spec.RolloutPlan.
RolloutBatches) - 1))
Expect(k8sClient.Update(ctx, &appRollout)).Should(Succeed())
By("Wait for the rollout phase change to succeeded")
Eventually(
func() oamstd.RollingState {
k8sClient.Get(ctx, client.ObjectKey{Namespace: namespace, Name: appRollout.Name}, &appRollout)
return appRollout.Status.RollingState
},
time.Second*60, time.Millisecond*500).Should(Equal(oamstd.RolloutSucceedState))
VerifyRolloutSucceeded()
VerifyAppConfigRollingCompleted(appConfig2.Name)
VerifyAppConfigInactive(appConfig1.Name)
// Clean up
k8sClient.Delete(ctx, &appRollout)
})
PIt("Test pause and modify rollout plan after rolling succeeded", func() {
CreateClonesetDef()
ApplySourceApp()
MarkSourceAppRolling()
ApplyTargetApp()
VerifyCloneSetPaused()
By("Apply the application rollout that stops after two batches")
var newAppRollout v1alpha2.AppRollout
Expect(readYaml("testdata/rollout/cloneset/app-rollout.yaml", &newAppRollout)).Should(BeNil())
newAppRollout.Namespace = namespace
batchPartition := 0
newAppRollout.Spec.RolloutPlan.BatchPartition = pointer.Int32Ptr(int32(batchPartition))
Expect(k8sClient.Create(ctx, &newAppRollout)).Should(Succeed())
By("Wait for the rollout phase change to rolling in batches")
Eventually(
func() oamstd.RollingState {
k8sClient.Get(ctx, client.ObjectKey{Namespace: namespace, Name: newAppRollout.Name}, &appRollout)
return appRollout.Status.RollingState
},
time.Second*60, time.Millisecond*500).Should(BeEquivalentTo(oamstd.RollingInBatchesState))
By("Pause the rollout")
Eventually(
func() error {
k8sClient.Get(ctx, client.ObjectKey{Namespace: namespace, Name: appRollout.Name}, &appRollout)
appRollout.Spec.RolloutPlan.Paused = true
err := k8sClient.Update(ctx, &appRollout)
return err
},
time.Second*5, time.Millisecond*500).ShouldNot(HaveOccurred())
Eventually(
func() int32 {
k8sClient.Get(ctx, client.ObjectKey{Namespace: namespace, Name: appRollout.Name}, &appRollout)
return appRollout.Status.CurrentBatch
},
time.Second*15, time.Millisecond*500).Should(BeEquivalentTo(batchPartition))
By("VerifySpec that the rollout stops at the first batch")
// wait for the batch to be ready
Eventually(
func() corev1.ConditionStatus {
k8sClient.Get(ctx, client.ObjectKey{Namespace: namespace, Name: appRollout.Name}, &appRollout)
return appRollout.Status.GetCondition(oamstd.BatchPaused).Status
},
time.Second*30, time.Millisecond*500).Should(Equal(corev1.ConditionTrue))
// wait for 15 seconds, it should stop at 1
time.Sleep(15 * time.Second)
k8sClient.Get(ctx, client.ObjectKey{Namespace: namespace, Name: appRollout.Name}, &appRollout)
Expect(appRollout.Status.RollingState).Should(BeEquivalentTo(oamstd.RollingInBatchesState))
Expect(appRollout.Status.CurrentBatch).Should(BeEquivalentTo(batchPartition))
k8sClient.Get(ctx, client.ObjectKey{Namespace: namespace, Name: appRollout.Name}, &appRollout)
lt := appRollout.Status.GetCondition(oamstd.BatchPaused).LastTransitionTime
beforeSleep := metav1.Time{
Time: time.Now().Add(-15 * time.Second),
}
Expect((&lt).Before(&beforeSleep)).Should(BeTrue())
VerifyRolloutOwnsCloneset()
By("Finish the application rollout")
// set the partition as the same size as the array
appRollout.Spec.RolloutPlan.Paused = false
appRollout.Spec.RolloutPlan.BatchPartition = pointer.Int32Ptr(int32(len(appRollout.Spec.RolloutPlan.
RolloutBatches) - 1))
Expect(k8sClient.Update(ctx, &appRollout)).Should(Succeed())
VerifyRolloutSucceeded()
// record the transition time
k8sClient.Get(ctx, client.ObjectKey{Namespace: namespace, Name: appRollout.Name}, &appRollout)
lt = appRollout.Status.GetCondition(oamstd.RolloutSucceed).LastTransitionTime
// move the batch partition back to 1 to see if it will roll again
appRollout.Spec.RolloutPlan.BatchPartition = pointer.Int32Ptr(0)
Expect(k8sClient.Update(ctx, &appRollout)).Should(Succeed())
// nothing should happen, the transition time should be the same
VerifyRolloutSucceeded()
k8sClient.Get(ctx, client.ObjectKey{Namespace: namespace, Name: appRollout.Name}, &appRollout)
Expect(appRollout.Status.RollingState).Should(BeEquivalentTo(oamstd.RolloutSucceedState))
Expect(appRollout.Status.GetCondition(oamstd.RolloutSucceed).LastTransitionTime).Should(BeEquivalentTo(lt))
// Clean up
k8sClient.Delete(ctx, &appRollout)
})
PIt("Test rolling back after a successful rollout", func() {
CreateClonesetDef()
ApplySourceApp()
MarkSourceAppRolling()
ApplyTargetApp()
VerifyCloneSetPaused()
By("Apply the application rollout")
var newAppRollout v1alpha2.AppRollout
Expect(readYaml("testdata/rollout/cloneset/app-rollout.yaml", &newAppRollout)).Should(BeNil())
newAppRollout.Namespace = namespace
newAppRollout.Spec.RolloutPlan.BatchPartition = pointer.Int32Ptr(int32(len(newAppRollout.Spec.RolloutPlan.
RolloutBatches) - 1))
Expect(k8sClient.Create(ctx, &newAppRollout)).Should(Succeed())
By("Wait for the rollout phase change to rolling in batches")
Eventually(
func() oamstd.RollingState {
k8sClient.Get(ctx, client.ObjectKey{Namespace: namespace, Name: newAppRollout.Name}, &appRollout)
return appRollout.Status.RollingState
},
time.Second*60, time.Millisecond*500).Should(BeEquivalentTo(oamstd.RollingInBatchesState))
VerifyRolloutOwnsCloneset()
VerifyRolloutSucceeded()
VerifyAppConfigRollingCompleted(appConfig2.Name)
VerifyAppConfigInactive(appConfig1.Name)
By("Revert the change by first marking the application as rolling")
var appConfig3 v1alpha2.ApplicationConfiguration
k8sClient.Get(ctx, client.ObjectKey{Namespace: namespace, Name: app.Name}, &app)
app.SetAnnotations(util.MergeMapOverrideWithDst(app.GetAnnotations(),
map[string]string{oam.AnnotationRollingComponent: app.Spec.Components[0].Name,
oam.AnnotationAppRollout: strconv.FormatBool(true)}))
Expect(k8sClient.Update(ctx, &app)).Should(Succeed())
By("Wait for AppConfig2 to be templated")
Eventually(
func() v1alpha2.RollingStatus {
k8sClient.Get(ctx, client.ObjectKey{Namespace: namespace, Name: app.Status.LatestRevision.Name}, &appConfig2)
return appConfig2.Status.RollingStatus
},
time.Second*30, time.Millisecond*500).Should(BeEquivalentTo(v1alpha2.RollingTemplated))
By("Revert the application back to source")
var sourceApp v1alpha2.Application
Expect(readYaml("testdata/rollout/cloneset/app-source.yaml", &sourceApp)).Should(BeNil())
sourceApp.SetAnnotations(util.MergeMapOverrideWithDst(app.GetAnnotations(),
map[string]string{oam.AnnotationRollingComponent: app.Spec.Components[0].Name,
oam.AnnotationAppRollout: strconv.FormatBool(true)}))
Eventually(
func() error {
k8sClient.Get(ctx, client.ObjectKey{Namespace: namespace, Name: app.Name}, &app)
app.Spec = sourceApp.Spec
return k8sClient.Update(ctx, &app)
},
time.Second*60, time.Millisecond*500).Should(Succeed())
By("Wait for AppConfig3 to be templated")
Eventually(
func() v1alpha2.RollingStatus {
k8sClient.Get(ctx, client.ObjectKey{Namespace: namespace, Name: app.Status.LatestRevision.Name}, &appConfig3)
return appConfig3.Status.RollingStatus
},
time.Second*30, time.Millisecond*500).Should(BeEquivalentTo(v1alpha2.RollingTemplated))
By("Modify the application rollout with new target and source")
Eventually(
func() error {
k8sClient.Get(ctx, client.ObjectKey{Namespace: namespace, Name: appRollout.Name}, &appRollout)
appRollout.Spec.SourceAppRevisionName = appConfig2.Name
appRollout.Spec.TargetAppRevisionName = appConfig3.Name
return k8sClient.Update(ctx, &appRollout)
},
time.Second*5, time.Millisecond*500).Should(Succeed())
VerifyRolloutOwnsCloneset()
VerifyRolloutSucceeded()
By("VerifySpec AppConfig rolling status")
Eventually(
func() v1alpha2.RollingStatus {
k8sClient.Get(ctx, client.ObjectKey{Namespace: namespace, Name: appConfig2.Name}, &appConfig2)
return appConfig2.Status.RollingStatus
},
time.Second*30, time.Millisecond*500).Should(BeEquivalentTo(v1alpha2.InactiveAfterRollingCompleted))
Eventually(
func() v1alpha2.RollingStatus {
k8sClient.Get(ctx, client.ObjectKey{Namespace: namespace, Name: appConfig2.Name}, &appConfig3)
return appConfig3.Status.RollingStatus
},
time.Second*30, time.Millisecond*500).Should(BeEquivalentTo(v1alpha2.RollingCompleted))
// Clean up
k8sClient.Delete(ctx, &appRollout)
k8sClient.Delete(ctx, &appConfig3)
})
PIt("Test rolling back after a failed rollout", func() {
CreateClonesetDef()
ApplySourceApp()
MarkSourceAppRolling()
ApplyTargetApp()
VerifyCloneSetPaused()
By("Apply the application rollout that stops after the first batche")
var newAppRollout v1alpha2.AppRollout
Expect(readYaml("testdata/rollout/cloneset/app-rollout.yaml", &newAppRollout)).Should(BeNil())
newAppRollout.Namespace = namespace
batchPartition := 1
newAppRollout.Spec.RolloutPlan.BatchPartition = pointer.Int32Ptr(int32(batchPartition))
Expect(k8sClient.Create(ctx, &newAppRollout)).Should(Succeed())
By("Wait for the rollout phase change to rolling in batches")
Eventually(
func() oamstd.RollingState {
k8sClient.Get(ctx, client.ObjectKey{Namespace: namespace, Name: newAppRollout.Name}, &appRollout)
return appRollout.Status.RollingState
},
time.Second*60, time.Millisecond*500).Should(BeEquivalentTo(oamstd.RollingInBatchesState))
By("Wait for rollout to finish the batches")
Eventually(
func() int32 {
k8sClient.Get(ctx, client.ObjectKey{Namespace: namespace, Name: appRollout.Name}, &appRollout)
return appRollout.Status.CurrentBatch
},
time.Second*15, time.Millisecond*500).Should(BeEquivalentTo(batchPartition))
By("VerifySpec that the rollout stops")
// wait for the batch to be ready
Eventually(
func() oamstd.BatchRollingState {
k8sClient.Get(ctx, client.ObjectKey{Namespace: namespace, Name: appRollout.Name}, &appRollout)
return appRollout.Status.BatchRollingState
},
time.Second*30, time.Millisecond*500).Should(Equal(oamstd.BatchReadyState))
By("Move back the partition to cause the rollout to fail")
Eventually(
func() error {
k8sClient.Get(ctx, client.ObjectKey{Namespace: namespace, Name: appRollout.Name}, &appRollout)
appRollout.Spec.RolloutPlan.BatchPartition = pointer.Int32Ptr(0)
return k8sClient.Update(ctx, &newAppRollout)
},
time.Second*3, time.Millisecond*500).Should(Succeed())
By("Wait for the rollout phase change to fail")
Eventually(
func() oamstd.RollingState {
k8sClient.Get(ctx, client.ObjectKey{Namespace: namespace, Name: newAppRollout.Name}, &appRollout)
return appRollout.Status.RollingState
},
time.Second*5, time.Millisecond*500).Should(BeEquivalentTo(oamstd.RolloutFailedState))
// Clean up
k8sClient.Delete(ctx, &appRollout)
})
PIt("Test rolling by changing the definition", func() {
CreateClonesetDef()
ApplySourceApp()
MarkSourceAppRolling()
By("Apply the definition change")
var cd, newCD v1alpha2.WorkloadDefinition
Expect(readYaml("testdata/rollout/cloneset/clonesetDefinitionModified.yaml.yaml", &newCD)).Should(BeNil())
Eventually(
func() error {
k8sClient.Get(ctx, client.ObjectKey{Namespace: namespace, Name: newCD.Name}, &cd)
cd.Spec = newCD.Spec
return k8sClient.Update(ctx, &cd)
},
time.Second*3, time.Millisecond*300).Should(Succeed())
VerifyAppConfigTemplated(2)
By("Apply the application rollout")
var newAppRollout v1alpha2.AppRollout
Expect(readYaml("testdata/rollout/cloneset/app-rollout.yaml", &newAppRollout)).Should(BeNil())
newAppRollout.Namespace = namespace
newAppRollout.Spec.RolloutPlan.BatchPartition = pointer.Int32Ptr(int32(len(newAppRollout.Spec.RolloutPlan.
RolloutBatches) - 1))
Expect(k8sClient.Create(ctx, &newAppRollout)).Should(Succeed())
VerifyRolloutOwnsCloneset()
VerifyRolloutSucceeded()
VerifyAppConfigRollingCompleted(appConfig2.Name)
VerifyAppConfigInactive(appConfig1.Name)
By("Wait for rollout to finish two batches")
Expect(k8sClient.Get(ctx, client.ObjectKey{Namespace: namespace, Name: workloadName},
&kc)).ShouldNot(HaveOccurred())
Expect(kc.Status.UpdatedReplicas).Should(BeEquivalentTo(5))
Expect(kc.Status.UpdatedReadyReplicas).Should(BeEquivalentTo(5))
// Clean up
k8sClient.Delete(ctx, &appRollout)
k8sClient.Delete(ctx, &appConfig2)
k8sClient.Delete(ctx, &appConfig1)
k8sClient.Delete(ctx, &app)
})
})

View File

@@ -96,6 +96,16 @@ var _ = BeforeSuite(func(done Done) {
logf.Log.Error(err, "failed to create k8sClient")
Fail("setup failed")
}
// TODO: Remove this after we get rid of the integration test dir
By("Applying CRD of WorkloadDefinition and TraitDefinition")
var workloadDefinitionCRD crdv1.CustomResourceDefinition
Expect(readYaml("../../charts/vela-core/crds/core.oam.dev_workloaddefinitions.yaml", &workloadDefinitionCRD)).Should(BeNil())
Expect(k8sClient.Create(context.Background(), &workloadDefinitionCRD)).Should(SatisfyAny(BeNil(), &util.AlreadyExistMatcher{}))
var traitDefinitionCRD crdv1.CustomResourceDefinition
Expect(readYaml("../../charts/vela-core/crds/core.oam.dev_traitdefinitions.yaml", &traitDefinitionCRD)).Should(BeNil())
Expect(k8sClient.Create(context.Background(), &traitDefinitionCRD)).Should(SatisfyAny(BeNil(), &util.AlreadyExistMatcher{}))
By("Finished setting up test environment")
// Create manual scaler trait definition
@@ -217,7 +227,7 @@ var _ = BeforeSuite(func(done Done) {
Name: "cluster-admin",
},
}
Expect(k8sClient.Create(context.Background(), &adminRoleBinding)).Should(BeNil())
Expect(k8sClient.Create(context.Background(), &adminRoleBinding)).Should(SatisfyAny(BeNil(), &util.AlreadyExistMatcher{}))
By("Created cluster role binding for the test service account")
crd = crdv1.CustomResourceDefinition{
@@ -291,6 +301,9 @@ var _ = AfterSuite(func() {
Expect(k8sClient.Delete(context.Background(), &crd)).Should(BeNil())
By("Deleted the custom resource definition")
// TODO: Remove this after we get rid of the integration test dir
// Below is a CI hack so that the integration test can run. We need to migrate the integration test
// to this e2e dir and suite (https://github.com/oam-dev/kubevela/issues/1147)
By("Deleting all the definitions by deleting the definition CRDs")
crd = crdv1.CustomResourceDefinition{
ObjectMeta: metav1.ObjectMeta{

View File

@@ -1,18 +0,0 @@
apiVersion: core.oam.dev/v1alpha2
kind: ApplicationDeployment
metadata:
name: rolling-e2e-test
spec:
# application (revision) reference
targetApplicationName: test-e2e-rolling-v2
sourceApplicationName: test-e2e-rolling-v1
# HPA reference (optional)
componentList:
- metrics-provider
rolloutPlan:
rolloutStrategy: "IncreaseFirst"
rolloutBatches:
- replicas: 10%
- replicas: 2
- replicas: 2
batchPartition: 1

View File

@@ -1,18 +0,0 @@
apiVersion: core.oam.dev/v1alpha2
kind: Application
metadata:
name: test-e2e-rolling
annotations:
"app.oam.dev/rolling-components": "metrics-provider"
"app.oam.dev/rollout-template": "true"
spec:
components:
- name: metrics-provider
type: clonesetservice
settings:
cmd:
- ./podinfo
- stress-cpu=1
image: stefanprodan/podinfo:5.0.2
port: 8080
updateStrategyType: InPlaceIfPossible

View File

@@ -1,11 +1,11 @@
apiVersion: core.oam.dev/v1alpha2
kind: ApplicationDeployment
kind: AppRollout
metadata:
name: rolling-e2e-test
spec:
# application (revision) reference
targetApplicationName: test-e2e-rolling-v2
sourceApplicationName: test-e2e-rolling-v1
targetAppRevisionName: test-e2e-rolling-v2
sourceAppRevisionName: test-e2e-rolling-v1
# HPA reference (optional)
componentList:
- metrics-provider

View File

@@ -10,6 +10,6 @@ spec:
cmd:
- ./podinfo
- stress-cpu=1
image: stefanprodan/podinfo:4.0.6
image: stefanprodan/podinfo:4.0.3
port: 8080
updateStrategyType: InPlaceIfPossible

View File

@@ -0,0 +1,106 @@
# Code generated by KubeVela templates. DO NOT EDIT.
apiVersion: core.oam.dev/v1alpha2
kind: WorkloadDefinition
metadata:
name: clonesetservice
namespace: vela-system
annotations:
definition.oam.dev/description: "Describes long-running, scalable, containerized services that have a stable network endpoint to receive external network traffic from customers.
If workload type is skipped for any service defined in Appfile, it will be defaulted to `webservice` type."
spec:
definitionRef:
name: clonesets.apps.kruise.io
schematic:
cue:
template: |
output: {
apiVersion: "apps.kruise.io/v1alpha1"
kind: "CloneSet"
metadata: labels: {
"app.oam.dev/component": context.name
}
spec: {
replicas: parameter.replicas
selector: matchLabels: {
"app.oam.dev/component": context.name
}
template: {
metadata: labels: {
"app.oam.dev/component": context.name
}
spec: {
containers: [{
name: context.name
image: "stefanprodan/podinfo:5.0.2"
if parameter["cmd"] != _|_ {
command: parameter.cmd
}
if parameter["env"] != _|_ {
env: parameter.env
}
if context["config"] != _|_ {
env: context.config
}
ports: [{
containerPort: parameter.port
}]
if parameter["cpu"] != _|_ {
resources: {
limits:
cpu: parameter.cpu
requests:
cpu: parameter.cpu
}
}
}]
}
}
if parameter["updateStrategyType"] != _|_ {
updateStrategy: {
type: parameter.updateStrategyType
}
}
}
}
parameter: {
// +usage=Which image would you like to use for your service
// +short=i
image: string
// +usage=Commands to run in the container
cmd?: [...string]
// +usage=Which port do you want customer traffic sent to
// +short=p
port: *80 | int
// +usage=Define arguments by using environment variables
env?: [...{
// +usage=Environment variable name
name: string
// +usage=The value of the environment variable
value?: string
// +usage=Specifies a source the value of this var should come from
valueFrom?: {
// +usage=Selects a key of a secret in the pod's namespace
secretKeyRef: {
// +usage=The name of the secret in the pod's namespace to select from
name: string
// +usage=The key of the secret to select from. Must be a valid secret key
key: string
}
}
}]
// +usage=Number of CPU units for the service, like `0.5` (0.5 CPU core), `1` (1 CPU core)
cpu?: string
// +usage=Cloneset updateStrategy, candidates are `ReCreate`/`InPlaceIfPossible`/`InPlaceOnly`
updateStrategyType?: string
// +usage=Number of pods in the cloneset
replicas: *5 | int
}

View File

@@ -24,6 +24,8 @@ import (
"testing"
"time"
"github.com/crossplane/crossplane-runtime/pkg/logging"
"github.com/crossplane/crossplane-runtime/pkg/test/integration"
corev1 "k8s.io/api/core/v1"
apiextensionsv1beta1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1"
kerrors "k8s.io/apimachinery/pkg/api/errors"
@@ -34,9 +36,6 @@ import (
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/log/zap"
"github.com/crossplane/crossplane-runtime/pkg/logging"
"github.com/crossplane/crossplane-runtime/pkg/test/integration"
coreoamdev "github.com/oam-dev/kubevela/apis/core.oam.dev"
"github.com/oam-dev/kubevela/apis/core.oam.dev/v1alpha2"
controller "github.com/oam-dev/kubevela/pkg/controller/core.oam.dev"