remove appContext from app/appRollout controller (#1774)

* refine  assemble and dispatch

Signed-off-by: roy wang <seiwy2010@gmail.com>

* remove app context in app controller

modify clean up app revision

remove old resource tracker related logic

fix unit tests

Signed-off-by: roy wang <seiwy2010@gmail.com>

* fix e2e-test

- get rid of appCtx in test cases
- fix test cases according other logic changes in app controller

remove whole appcontext_test.go file

disable rollout related e2e test provisionally

disable resource tracker related e2e test provisionally

Signed-off-by: roy wang <seiwy2010@gmail.com>

* add finalizer logic for app controller

Signed-off-by: roywang <seiwy2010@gmail.com>

* add new apply option MustBeControllableByAny

make dispatch idempotent

Signed-off-by: roywang <seiwy2010@gmail.com>

* refactor rollout

* fix rollout finalize succeed

Signed-off-by: roywang <seiwy2010@gmail.com>

* add update trait and gc test

fix lint

* fix flaky e2e test

Signed-off-by: roywang <seiwy2010@gmail.com>

* fix comment

* fix comments and add sourceRevision dispatch

delete useless

Signed-off-by: Yue Wang <seiwy2010@gmail.com>

* fix app finalizer backward compatible

Signed-off-by: roywang <seiwy2010@gmail.com>

* fix backward compatability for deprecation of appContext

add unit test for apply option

add e2e test

Signed-off-by: Yue Wang <seiwy2010@gmail.com>

* fix app controller unit test

Signed-off-by: Yue Wang <seiwy2010@gmail.com>

* refine app controller apply logic

Signed-off-by: Yue Wang <seiwy2010@gmail.com>

* fix e2e test of resource tracker

fix e2e test of rollout plan

fix flaky e2e tests

Signed-off-by: Yue Wang <seiwy2010@gmail.com>

* refine comments and remove useless codes

Signed-off-by: Yue Wang <seiwy2010@gmail.com>

* disable appCtx controller

add Component handler into app controller

Signed-off-by: Yue Wang <seiwy2010@gmail.com>

Co-authored-by: wangyike <wangyike.wyk@alibaba-inc.com>
This commit is contained in:
Yue Wang
2021-06-12 15:46:32 +09:00
committed by GitHub
parent 9de6aea5ab
commit 889e38e984
41 changed files with 1983 additions and 2098 deletions

View File

@@ -86,15 +86,6 @@ var ApplicationStatusDeeplyContext = func(context string, applicationName, workl
return app.Status.LatestRevision != nil
}, 180*time.Second, 1*time.Second).Should(gomega.BeTrue())
ginkgo.By("check AppContext reconciled ready")
gomega.Eventually(func() int {
appContext := &v1alpha2.ApplicationContext{}
_ = k8sclient.Get(context2.Background(), client.ObjectKey{
Name: applicationName,
Namespace: "default"}, appContext)
return len(appContext.Status.Workloads)
}, 180*time.Second, 1*time.Second).ShouldNot(gomega.Equal(0))
cli := fmt.Sprintf("vela status %s", applicationName)
output, err := e2e.LongTimeExec(cli, 120*time.Second)
gomega.Expect(err).NotTo(gomega.HaveOccurred())

View File

@@ -18,6 +18,7 @@ package application
import (
"context"
"fmt"
"time"
"github.com/crossplane/crossplane-runtime/apis/core/v1alpha1"
@@ -33,27 +34,38 @@ import (
"k8s.io/klog/v2"
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/reconcile"
"sigs.k8s.io/controller-runtime/pkg/source"
"github.com/oam-dev/kubevela/apis/core.oam.dev/common"
"github.com/oam-dev/kubevela/apis/core.oam.dev/v1alpha2"
"github.com/oam-dev/kubevela/apis/core.oam.dev/v1beta1"
velatypes "github.com/oam-dev/kubevela/apis/types"
"github.com/oam-dev/kubevela/pkg/appfile"
core "github.com/oam-dev/kubevela/pkg/controller/core.oam.dev"
"github.com/oam-dev/kubevela/pkg/controller/core.oam.dev/v1alpha2/application/dispatch"
ac "github.com/oam-dev/kubevela/pkg/controller/core.oam.dev/v1alpha2/applicationconfiguration"
"github.com/oam-dev/kubevela/pkg/cue/packages"
"github.com/oam-dev/kubevela/pkg/oam"
"github.com/oam-dev/kubevela/pkg/oam/discoverymapper"
oamutil "github.com/oam-dev/kubevela/pkg/oam/util"
"github.com/oam-dev/kubevela/pkg/utils/apply"
"github.com/oam-dev/kubevela/version"
)
// RolloutReconcileWaitTime is the time to wait before reconcile again an application still in rollout phase
const (
RolloutReconcileWaitTime = time.Second * 3
resourceTrackerFinalizer = "resourceTracker.finalizer.core.oam.dev"
errUpdateApplicationStatus = "cannot update application status"
errUpdateApplicationFinalizer = "cannot update application finalizer"
)
const (
legacyResourceTrackerFinalizer = "resourceTracker.finalizer.core.oam.dev"
// resourceTrackerFinalizer is to delete the resource tracker of the latest app revision.
resourceTrackerFinalizer = "app.oam.dev/resource-tracker-finalizer"
// onlyRevisionFinalizer is to delete all resource trackers of app revisions which may be used
// out of the domain of app controller, e.g., AppRollout controller.
onlyRevisionFinalizer = "app.oam.dev/only-revision-finalizer"
)
// Reconciler reconciles a Application object
type Reconciler struct {
client.Client
@@ -84,100 +96,80 @@ func (r *Reconciler) Reconcile(req ctrl.Request) (ctrl.Result, error) {
}
return ctrl.Result{}, err
}
ctx = oamutil.SetNamespaceInCtx(ctx, app.Namespace)
// this annotation will be propogated to all resources created by the application
if len(app.GetAnnotations()[oam.AnnotationKubeVelaVersion]) == 0 {
oamutil.AddAnnotations(app, map[string]string{
oam.AnnotationKubeVelaVersion: version.VelaVersion,
})
}
if endReconcile, err := r.handleFinalizers(ctx, app); endReconcile {
return ctrl.Result{}, err
}
handler := &appHandler{
r: r,
app: app,
}
if app.ObjectMeta.DeletionTimestamp.IsZero() {
if registerFinalizers(app) {
klog.InfoS("Register new finalizer for application", "application", klog.KObj(app), "finalizers", app.ObjectMeta.Finalizers)
return reconcile.Result{}, errors.Wrap(r.Client.Update(ctx, app), errUpdateApplicationFinalizer)
}
} else {
needUpdate, err := handler.removeResourceTracker(ctx)
if err != nil {
klog.InfoS("Failed to remove application resourceTracker", "err", err)
app.Status.SetConditions(v1alpha1.ReconcileError(errors.Wrap(err, "error to remove finalizer")))
return reconcile.Result{}, errors.Wrap(r.UpdateStatus(ctx, app), errUpdateApplicationStatus)
}
if needUpdate {
klog.InfoS("Remove finalizer of application", "application", app.Namespace+"/"+app.Name, "finalizers", app.ObjectMeta.Finalizers)
return ctrl.Result{}, errors.Wrap(r.Update(ctx, app), errUpdateApplicationFinalizer)
}
// deleting and no need to handle finalizer
return reconcile.Result{}, nil
if app.Status.LatestRevision != nil {
// record previous app revision name
handler.previousRevisionName = app.Status.LatestRevision.Name
}
klog.Info("Start Rendering")
app.Status.Phase = common.ApplicationRendering
klog.Info("Parse template")
// parse template
appParser := appfile.NewApplicationParser(r.Client, r.dm, r.pd)
ctx = oamutil.SetNamespaceInCtx(ctx, app.Namespace)
generatedAppfile, err := appParser.GenerateAppFile(ctx, app)
if err != nil {
klog.InfoS("Failed to parse application", "err", err)
klog.ErrorS(err, "Failed to parse application", "application", klog.KObj(app))
app.Status.SetConditions(errorCondition("Parsed", err))
r.Recorder.Event(app, event.Warning(velatypes.ReasonFailedParse, err))
return handler.handleErr(err)
}
app.Status.SetConditions(readyCondition("Parsed"))
handler.appfile = generatedAppfile
r.Recorder.Event(app, event.Normal(velatypes.ReasonParsed, velatypes.MessageParsed))
handler.appfile = generatedAppfile
appRev, err := handler.GenerateAppRevision(ctx)
if err != nil {
klog.InfoS("Failed to calculate appRevision", "err", err)
klog.ErrorS(err, "Failed to calculate appRevision", "application", klog.KObj(app))
app.Status.SetConditions(errorCondition("Parsed", err))
r.Recorder.Event(app, event.Warning(velatypes.ReasonFailedParse, err))
return handler.handleErr(err)
}
r.Recorder.Event(app, event.Normal(velatypes.ReasonParsed, velatypes.MessageParsed))
// Record the revision so it can be used to render data in context.appRevision
generatedAppfile.RevisionName = appRev.Name
klog.Info("Successfully calculate appRevision", "revisionName", appRev.Name,
"revisionHash", handler.revisionHash, "isNewRevision", handler.isNewRevision)
klog.Info("Build template")
// pass appRevision to appfile, so it can be used to render data in context.appRevision
generatedAppfile.RevisionName = appRev.Name
// build template to applicationconfig & component
ac, comps, err := generatedAppfile.GenerateApplicationConfiguration()
if err != nil {
klog.InfoS("Failed to generate applicationConfiguration", "err", err)
klog.ErrorS(err, "Failed to generate applicationConfiguration", "application", klog.KObj(app))
app.Status.SetConditions(errorCondition("Built", err))
r.Recorder.Event(app, event.Warning(velatypes.ReasonFailedRender, err))
return handler.handleErr(err)
}
err = handler.handleResourceTracker(ctx, comps, ac)
if err != nil {
klog.InfoS("Failed to handle resourceTracker", "err", err)
app.Status.SetConditions(errorCondition("Handle resourceTracker", err))
r.Recorder.Event(app, event.Warning(velatypes.ReasonFailedRender, err))
return handler.handleErr(err)
}
// pass the App label and annotation to ac except some app specific ones
oamutil.PassLabelAndAnnotation(app, ac)
app.Status.SetConditions(readyCondition("Built"))
r.Recorder.Event(app, event.Normal(velatypes.ReasonRendered, velatypes.MessageRendered))
klog.Info("Apply application revision & component to the cluster")
// apply application revision & component to the cluster
klog.Info("Successfully render application resources", "application", klog.KObj(app))
// pass application's labels and annotations to ac
oamutil.PassLabelAndAnnotation(app, ac)
// apply application resources' manifests to the cluster
if err := handler.apply(ctx, appRev, ac, comps); err != nil {
klog.InfoS("Failed to apply application revision & component to the cluster", "err", err)
klog.ErrorS(err, "Failed to apply application resources' manifests",
"application", klog.KObj(app))
app.Status.SetConditions(errorCondition("Applied", err))
r.Recorder.Event(app, event.Warning(velatypes.ReasonFailedApply, err))
return handler.handleErr(err)
}
klog.Info("Successfully apply application resources' manifests", "application", klog.KObj(app))
// if inplace is false and rolloutPlan is nil, it means the user will use an outer AppRollout object to rollout the application
if handler.app.Spec.RolloutPlan != nil {
res, err := handler.handleRollout(ctx)
if err != nil {
klog.InfoS("Failed to handle rollout", "err", err)
klog.ErrorS(err, "Failed to handle rollout", "application", klog.KObj(app))
app.Status.SetConditions(errorCondition("Rollout", err))
r.Recorder.Event(app, event.Warning(velatypes.ReasonFailedRollout, err))
return handler.handleErr(err)
@@ -203,7 +195,7 @@ func (r *Reconciler) Reconcile(req ctrl.Request) (ctrl.Result, error) {
// check application health status
appCompStatus, healthy, err := handler.statusAggregate(generatedAppfile)
if err != nil {
klog.InfoS("Failed to aggregate status", "err", err)
klog.ErrorS(err, "Failed to aggregate status", "application", klog.KObj(app))
app.Status.SetConditions(errorCondition("HealthCheck", err))
r.Recorder.Event(app, event.Warning(velatypes.ReasonFailedHealthCheck, err))
return handler.handleErr(err)
@@ -220,11 +212,12 @@ func (r *Reconciler) Reconcile(req ctrl.Request) (ctrl.Result, error) {
r.Recorder.Event(app, event.Normal(velatypes.ReasonHealthCheck, velatypes.MessageHealthCheck))
app.Status.Phase = common.ApplicationRunning
err = garbageCollection(ctx, handler)
if err != nil {
klog.InfoS("Failed to run Garbage collection", "err", err)
if err := garbageCollection(ctx, handler); err != nil {
klog.ErrorS(err, "Failed to run Garbage collection")
r.Recorder.Event(app, event.Warning(velatypes.ReasonFailedGC, err))
return handler.handleErr(err)
}
klog.Info("Successfully garbage collect", "application", klog.KObj(app))
// Gather status of components
var refComps []v1alpha1.TypedReference
@@ -241,23 +234,93 @@ func (r *Reconciler) Reconcile(req ctrl.Request) (ctrl.Result, error) {
return ctrl.Result{}, r.UpdateStatus(ctx, app)
}
// if any finalizers newly registered, return true
func registerFinalizers(app *v1beta1.Application) bool {
if !meta.FinalizerExists(&app.ObjectMeta, resourceTrackerFinalizer) && app.Status.ResourceTracker != nil {
meta.AddFinalizer(&app.ObjectMeta, resourceTrackerFinalizer)
return true
// NOTE Because resource tracker is cluster-scoped resources, we cannot garbage collect them
// by setting application(namespace-scoped) as their owner.
// We delete all resource trackers related to an application through below finalizer logic.
func (r *Reconciler) handleFinalizers(ctx context.Context, app *v1beta1.Application) (bool, error) {
if app.ObjectMeta.DeletionTimestamp.IsZero() {
if !meta.FinalizerExists(app, resourceTrackerFinalizer) {
meta.AddFinalizer(app, resourceTrackerFinalizer)
klog.InfoS("Register new finalizer for application", "application", klog.KObj(app), "finalizer", resourceTrackerFinalizer)
return true, errors.Wrap(r.Client.Update(ctx, app), errUpdateApplicationFinalizer)
}
if appWillReleaseByRollout(app) {
klog.InfoS("Found an application which will be released by rollout", "application", klog.KObj(app))
if !meta.FinalizerExists(app, onlyRevisionFinalizer) {
meta.AddFinalizer(app, onlyRevisionFinalizer)
klog.InfoS("Register new finalizer for application", "application", klog.KObj(app), "finalizer", onlyRevisionFinalizer)
return true, errors.Wrap(r.Client.Update(ctx, app), errUpdateApplicationFinalizer)
}
}
} else {
if meta.FinalizerExists(app, legacyResourceTrackerFinalizer) {
// TODO(roywang) legacyResourceTrackerFinalizer will be deprecated in the future
// this is for backward compatibility
rt := &v1beta1.ResourceTracker{}
rt.SetName(fmt.Sprintf("%s-%s", app.Namespace, app.Name))
if err := r.Client.Delete(ctx, rt); err != nil && !kerrors.IsNotFound(err) {
klog.ErrorS(err, "Failed to delete legacy resource tracker", "name", rt.Name)
app.Status.SetConditions(v1alpha1.ReconcileError(errors.Wrap(err, "error to remove finalizer")))
return true, errors.Wrap(r.UpdateStatus(ctx, app), errUpdateApplicationStatus)
}
meta.RemoveFinalizer(app, legacyResourceTrackerFinalizer)
return true, errors.Wrap(r.Client.Update(ctx, app), errUpdateApplicationFinalizer)
}
if meta.FinalizerExists(app, resourceTrackerFinalizer) {
if app.Status.LatestRevision != nil && len(app.Status.LatestRevision.Name) != 0 {
latestTracker := &v1beta1.ResourceTracker{}
latestTracker.SetName(dispatch.ConstructResourceTrackerName(app.Status.LatestRevision.Name, app.Namespace))
if err := r.Client.Delete(ctx, latestTracker); err != nil && !kerrors.IsNotFound(err) {
klog.ErrorS(err, "Failed to delete latest resource tracker", "name", latestTracker.Name)
app.Status.SetConditions(v1alpha1.ReconcileError(errors.Wrap(err, "error to remove finalizer")))
return true, errors.Wrap(r.UpdateStatus(ctx, app), errUpdateApplicationStatus)
}
}
meta.RemoveFinalizer(app, resourceTrackerFinalizer)
return true, errors.Wrap(r.Client.Update(ctx, app), errUpdateApplicationFinalizer)
}
if meta.FinalizerExists(app, onlyRevisionFinalizer) {
listOpts := []client.ListOption{
client.MatchingLabels{
oam.LabelAppName: app.Name,
oam.LabelAppNamespace: app.Namespace,
}}
rtList := &v1beta1.ResourceTrackerList{}
if err := r.Client.List(ctx, rtList, listOpts...); err != nil {
klog.ErrorS(err, "Failed to list resource tracker of app", "name", app.Name)
app.Status.SetConditions(v1alpha1.ReconcileError(errors.Wrap(err, "error to remove finalizer")))
return true, errors.Wrap(r.UpdateStatus(ctx, app), errUpdateApplicationStatus)
}
for _, rt := range rtList.Items {
if err := r.Client.Delete(ctx, rt.DeepCopy()); err != nil && !kerrors.IsNotFound(err) {
klog.ErrorS(err, "Failed to delete resource tracker", "name", rt.Name)
app.Status.SetConditions(v1alpha1.ReconcileError(errors.Wrap(err, "error to remove finalizer")))
return true, errors.Wrap(r.UpdateStatus(ctx, app), errUpdateApplicationStatus)
}
}
meta.RemoveFinalizer(app, onlyRevisionFinalizer)
return true, errors.Wrap(r.Client.Update(ctx, app), errUpdateApplicationFinalizer)
}
}
return false
return false, nil
}
// appWillReleaseByRollout judge whether the application will be released by rollout.
// If it's true, application controller will only create or update application revision but not emit any other K8s
// resources into the cluster. Rollout controller will do real release works.
func appWillReleaseByRollout(app *v1beta1.Application) bool {
return len(app.GetAnnotations()[oam.AnnotationAppRollout]) != 0 || app.Spec.RolloutPlan != nil
}
// SetupWithManager install to manager
func (r *Reconciler) SetupWithManager(mgr ctrl.Manager) error {
func (r *Reconciler) SetupWithManager(mgr ctrl.Manager, compHandler *ac.ComponentHandler) error {
// If Application Own these two child objects, AC status change will notify application controller and recursively update AC again, and trigger application event again...
return ctrl.NewControllerManagedBy(mgr).
WithOptions(controller.Options{
MaxConcurrentReconciles: r.concurrentReconciles,
}).
For(&v1beta1.Application{}).
Watches(&source.Kind{Type: &v1alpha2.Component{}}, compHandler).
Complete(r)
}
@@ -285,5 +348,10 @@ func Setup(mgr ctrl.Manager, args core.Args) error {
appRevisionLimit: args.AppRevisionLimit,
concurrentReconciles: args.ConcurrentReconciles,
}
return reconciler.SetupWithManager(mgr)
compHandler := &ac.ComponentHandler{
Client: mgr.GetClient(),
RevisionLimit: args.RevisionLimit,
CustomRevisionHookURL: args.CustomRevisionHookURL,
}
return reconciler.SetupWithManager(mgr, compHandler)
}

View File

@@ -295,6 +295,7 @@ var _ = Describe("Test Application Controller", func() {
Expect(k8sClient.Create(ctx, appFailParse.DeepCopyObject())).Should(BeNil())
reconcileOnce(reconciler, reconcile.Request{NamespacedName: appFailParseKey})
reconcileOnce(reconciler, reconcile.Request{NamespacedName: appFailParseKey})
parseEvents, err := recorder.GetEventsWithName(appFailParse.Name)
Expect(err).Should(BeNil())
@@ -312,6 +313,7 @@ var _ = Describe("Test Application Controller", func() {
}
Expect(k8sClient.Create(ctx, appFailRender.DeepCopyObject())).Should(BeNil())
reconcileOnce(reconciler, reconcile.Request{NamespacedName: appFailRenderKey})
reconcileOnce(reconciler, reconcile.Request{NamespacedName: appFailRenderKey})
renderEvents, err := recorder.GetEventsWithName(appFailRender.Name)
Expect(err).Should(BeNil())
@@ -393,10 +395,6 @@ spec:
err = k8sClient.Get(ctx, appKey, &a)
Expect(err).Should(BeNil())
By("Check ApplicationContext Created")
var appContext v1alpha2.ApplicationContext
Expect(k8sClient.Get(ctx, appKey, &appContext)).Should(BeNil())
By("Check Component Created with the expected workload spec")
var component v1alpha2.Component
Expect(k8sClient.Get(ctx, client.ObjectKey{Namespace: ns, Name: componentName}, &component)).Should(BeNil())
@@ -431,16 +429,11 @@ spec:
Expect(k8sClient.Get(ctx, appKey, checkApp)).Should(BeNil())
Expect(checkApp.Status.Phase).Should(Equal(common.ApplicationRunning))
By("Check ApplicationContext Created")
appContext := &v1alpha2.ApplicationContext{}
Expect(k8sClient.Get(ctx, client.ObjectKey{
Namespace: appwithNoTrait.Namespace,
Name: appwithNoTrait.Name,
}, appContext)).Should(BeNil())
// check that the new appContext has the correct annotation and labels
Expect(appContext.GetAnnotations()[oam.AnnotationAppRollout]).Should(BeEmpty())
Expect(appContext.GetLabels()[oam.LabelAppRevisionHash]).ShouldNot(BeEmpty())
Expect(appContext.Spec.ApplicationRevisionName).ShouldNot(BeEmpty())
By("Check affiliated resource tracker is created")
expectRTName := fmt.Sprintf("%s-%s", checkApp.Status.LatestRevision.Name, checkApp.GetNamespace())
Eventually(func() error {
return k8sClient.Get(ctx, client.ObjectKey{Name: expectRTName}, &v1beta1.ResourceTracker{})
}, 10*time.Second, 500*time.Millisecond).Should(Succeed())
By("Check Component Created with the expected workload spec")
var component v1alpha2.Component
@@ -488,12 +481,11 @@ spec:
Expect(k8sClient.Get(ctx, appKey, checkApp)).Should(BeNil())
Expect(checkApp.Status.Phase).Should(Equal(common.ApplicationRunning))
By("Check ApplicationContext Created")
appContext := &v1alpha2.ApplicationContext{}
Expect(k8sClient.Get(ctx, client.ObjectKey{
Namespace: app.Namespace,
Name: app.Name,
}, appContext)).Should(BeNil())
By("Check affiliated resource tracker is created")
expectRTName := fmt.Sprintf("%s-%s", checkApp.Status.LatestRevision.Name, checkApp.GetNamespace())
Eventually(func() error {
return k8sClient.Get(ctx, client.ObjectKey{Name: expectRTName}, &v1beta1.ResourceTracker{})
}, 10*time.Second, 500*time.Millisecond).Should(Succeed())
By("Check Component Created with the expected workload spec")
component := &v1alpha2.Component{}
@@ -534,18 +526,18 @@ spec:
Expect(k8sClient.Get(ctx, appKey, curApp)).Should(BeNil())
Expect(curApp.Status.Phase).Should(Equal(common.ApplicationRunning))
By("Check ApplicationContext and trait created as expected")
appContext := &v1alpha2.ApplicationContext{}
Expect(k8sClient.Get(ctx, client.ObjectKey{
Namespace: app.Namespace,
Name: app.Name,
}, appContext)).Should(BeNil())
appRevision := &v1beta1.ApplicationRevision{}
Expect(k8sClient.Get(ctx, client.ObjectKey{
Namespace: app.Namespace,
Name: curApp.Status.LatestRevision.Name,
}, appRevision)).Should(BeNil())
By("Check affiliated resource tracker is created")
expectRTName := fmt.Sprintf("%s-%s", appRevision.GetName(), appRevision.GetNamespace())
Eventually(func() error {
return k8sClient.Get(ctx, client.ObjectKey{Name: expectRTName}, &v1beta1.ResourceTracker{})
}, 10*time.Second, 500*time.Millisecond).Should(Succeed())
gotTrait := unstructured.Unstructured{}
ac, err := util.RawExtension2AppConfig(appRevision.Spec.ApplicationConfiguration)
@@ -608,19 +600,17 @@ spec:
Expect(k8sClient.Get(ctx, appKey, curApp)).Should(BeNil())
Expect(curApp.Status.Phase).Should(Equal(common.ApplicationRunning))
By("Check AppConfig and trait created as expected")
appContext := &v1alpha2.ApplicationContext{}
Expect(k8sClient.Get(ctx, client.ObjectKey{
Namespace: app.Namespace,
Name: app.Name,
}, appContext)).Should(BeNil())
appRevision := &v1beta1.ApplicationRevision{}
Expect(k8sClient.Get(ctx, client.ObjectKey{
Namespace: app.Namespace,
Name: curApp.Status.LatestRevision.Name,
}, appRevision)).Should(BeNil())
Expect(appContext.Spec.ApplicationRevisionName).Should(Equal(appRevision.Name))
By("Check affiliated resource tracker is created")
expectRTName := fmt.Sprintf("%s-%s", appRevision.GetName(), appRevision.GetNamespace())
Eventually(func() error {
return k8sClient.Get(ctx, client.ObjectKey{Name: expectRTName}, &v1beta1.ResourceTracker{})
}, 10*time.Second, 500*time.Millisecond).Should(Succeed())
ac, err := util.RawExtension2AppConfig(appRevision.Spec.ApplicationConfiguration)
Expect(err).Should(BeNil())
@@ -717,19 +707,17 @@ spec:
Expect(scopes[0].Kind).Should(BeEquivalentTo("HealthScope"))
Expect(scopes[0].Name).Should(BeEquivalentTo("appWithTraitAndScope-default-health"))
By("Check AppConfig and trait created as expected")
appContext := &v1alpha2.ApplicationContext{}
Expect(k8sClient.Get(ctx, client.ObjectKey{
Namespace: app.Namespace,
Name: app.Name,
}, appContext)).Should(BeNil())
appRevision := &v1beta1.ApplicationRevision{}
Expect(k8sClient.Get(ctx, client.ObjectKey{
Namespace: app.Namespace,
Name: curApp.Status.LatestRevision.Name,
}, appRevision)).Should(BeNil())
Expect(appContext.Spec.ApplicationRevisionName).Should(Equal(appRevision.Name))
Expect(appContext.GetAnnotations()[oam.AnnotationInplaceUpgrade]).Should(Equal("true"))
By("Check affiliated resource tracker is created")
expectRTName := fmt.Sprintf("%s-%s", appRevision.GetName(), appRevision.GetNamespace())
Eventually(func() error {
return k8sClient.Get(ctx, client.ObjectKey{Name: expectRTName}, &v1beta1.ResourceTracker{})
}, 10*time.Second, 500*time.Millisecond).Should(Succeed())
gotTrait := unstructured.Unstructured{}
ac, err := util.RawExtension2AppConfig(appRevision.Spec.ApplicationConfiguration)
@@ -788,19 +776,17 @@ spec:
Expect(k8sClient.Get(ctx, appKey, curApp)).Should(BeNil())
Expect(curApp.Status.Phase).Should(Equal(common.ApplicationRunning))
By("Check AppConfig and trait created as expected")
appContext := &v1alpha2.ApplicationContext{}
Expect(k8sClient.Get(ctx, client.ObjectKey{
Namespace: app.Namespace,
Name: app.Name,
}, appContext)).Should(BeNil())
appRevision := &v1beta1.ApplicationRevision{}
Expect(k8sClient.Get(ctx, client.ObjectKey{
Namespace: app.Namespace,
Name: curApp.Status.LatestRevision.Name,
}, appRevision)).Should(BeNil())
Expect(appContext.Spec.ApplicationRevisionName).Should(Equal(appRevision.Name))
Expect(appContext.GetAnnotations()[oam.AnnotationInplaceUpgrade]).Should(Equal("true"))
By("Check affiliated resource tracker is created")
expectRTName := fmt.Sprintf("%s-%s", appRevision.GetName(), appRevision.GetNamespace())
Eventually(func() error {
return k8sClient.Get(ctx, client.ObjectKey{Name: expectRTName}, &v1beta1.ResourceTracker{})
}, 10*time.Second, 500*time.Millisecond).Should(Succeed())
gotTrait := unstructured.Unstructured{}
ac, err := util.RawExtension2AppConfig(appRevision.Spec.ApplicationConfiguration)
@@ -866,21 +852,19 @@ spec:
Expect(k8sClient.Get(ctx, appKey, curApp)).Should(BeNil())
Expect(curApp.Status.Phase).Should(Equal(common.ApplicationRunning))
By("check AC and Component updated")
Expect(k8sClient.Get(ctx, client.ObjectKey{
Namespace: app.Namespace,
Name: app.Name,
}, appContext)).Should(BeNil())
Expect(k8sClient.Get(ctx, client.ObjectKey{
Namespace: app.Namespace,
Name: curApp.Status.LatestRevision.Name,
}, appRevision)).Should(BeNil())
Expect(appContext.Spec.ApplicationRevisionName).Should(Equal(appRevision.Name))
Expect(appContext.GetAnnotations()[oam.AnnotationInplaceUpgrade]).Should(Equal("true"))
Expect(json.Unmarshal(ac.Spec.Components[0].Traits[0].Trait.Raw, &gotTrait)).Should(BeNil())
Expect(gotTrait).Should(BeEquivalentTo(expectScalerTrait("myweb5", app.Name)))
By("Check affiliated resource tracker is upgraded")
expectRTName = fmt.Sprintf("%s-%s", appRevision.GetName(), appRevision.GetNamespace())
Eventually(func() error {
return k8sClient.Get(ctx, client.ObjectKey{Name: expectRTName}, &v1beta1.ResourceTracker{})
}, 10*time.Second, 500*time.Millisecond).Should(Succeed())
Expect(ac.Spec.Components[0].Scopes[0].ScopeReference).Should(BeEquivalentTo(v1alpha1.TypedReference{
APIVersion: "core.oam.dev/v1alpha2",
Kind: "HealthScope",
@@ -952,18 +936,11 @@ spec:
Expect(k8sClient.Get(ctx, appKey, curApp)).Should(BeNil())
Expect(curApp.Status.Phase).Should(Equal(common.ApplicationRunning))
By("Check AppConfig and trait created as expected")
appContext := &v1alpha2.ApplicationContext{}
Expect(k8sClient.Get(ctx, client.ObjectKey{
Namespace: app.Namespace,
Name: app.Name,
}, appContext)).Should(BeNil())
appRevision := &v1beta1.ApplicationRevision{}
Expect(k8sClient.Get(ctx, client.ObjectKey{
Namespace: app.Namespace,
Name: curApp.Status.LatestRevision.Name,
}, appRevision)).Should(BeNil())
Expect(appContext.Spec.ApplicationRevisionName).Should(Equal(appRevision.Name))
gotTrait := unstructured.Unstructured{}
ac, err := util.RawExtension2AppConfig(appRevision.Spec.ApplicationConfiguration)
@@ -1058,12 +1035,12 @@ spec:
By("Check App running successfully")
checkApp := &v1beta1.Application{}
Eventually(func() string {
_, err := reconciler.Reconcile(reconcile.Request{NamespacedName: appKey})
if err != nil {
return err.Error()
}
checkApp := &v1beta1.Application{}
err = k8sClient.Get(ctx, appKey, checkApp)
if err != nil {
return err.Error()
@@ -1074,6 +1051,12 @@ spec:
return string(checkApp.Status.Phase)
}(), 5*time.Second, time.Second).Should(BeEquivalentTo(common.ApplicationRunning))
By("Check affiliated resource tracker is created")
expectRTName := fmt.Sprintf("%s-%s", checkApp.Status.LatestRevision.Name, checkApp.GetNamespace())
Eventually(func() error {
return k8sClient.Get(ctx, client.ObjectKey{Name: expectRTName}, &v1beta1.ResourceTracker{})
}, 10*time.Second, 500*time.Millisecond).Should(Succeed())
Expect(k8sClient.Delete(ctx, app)).Should(BeNil())
})
@@ -1109,15 +1092,11 @@ spec:
Name: utils.ConstructRevisionName(rolloutApp.Name, 1),
}, appRevision)).Should(BeNil())
By("Check ApplicationContext not created")
appContext := &v1alpha2.ApplicationContext{}
// no appContext same name as app exist
Expect(k8sClient.Get(ctx, appKey, appContext)).ShouldNot(Succeed())
// no appContext same name as apprevision exist
Expect(k8sClient.Get(ctx, client.ObjectKey{
Namespace: rolloutApp.Namespace,
Name: utils.ConstructRevisionName(rolloutApp.Name, 1),
}, appContext)).ShouldNot(Succeed())
By("Check affiliated resource tracker is not created")
expectRTName := fmt.Sprintf("%s-%s", appRevision.GetName(), appRevision.GetNamespace())
Eventually(func() error {
return k8sClient.Get(ctx, client.ObjectKey{Name: expectRTName}, &v1beta1.ResourceTracker{})
}, 10*time.Second, 500*time.Millisecond).ShouldNot(Succeed())
By("Check Component Created with the expected workload spec")
var component v1alpha2.Component
@@ -1135,20 +1114,19 @@ spec:
Expect(ac.Spec.Components[0].ComponentName).Should(BeEmpty())
Expect(ac.Spec.Components[0].RevisionName).Should(Equal(component.Status.LatestRevision.Name))
By("Reconcile again to make sure we are not creating more appConfigs")
By("Reconcile again to make sure we are not creating more resource trackers")
reconcileRetry(reconciler, reconcile.Request{NamespacedName: appKey})
By("Verify that no new AppRevision created")
Expect(k8sClient.Get(ctx, client.ObjectKey{
Namespace: rolloutApp.Namespace,
Name: utils.ConstructRevisionName(rolloutApp.Name, 2),
}, appRevision)).ShouldNot(Succeed())
// no appContext same name as app exist
Expect(k8sClient.Get(ctx, appKey, appContext)).ShouldNot(Succeed())
// no appContext same name as apprevision exist
Expect(k8sClient.Get(ctx, client.ObjectKey{
Namespace: rolloutApp.Namespace,
Name: utils.ConstructRevisionName(rolloutApp.Name, 1),
}, appContext)).ShouldNot(Succeed())
By("Check no new affiliated resource tracker is created")
expectRTName = fmt.Sprintf("%s-%s", utils.ConstructRevisionName(rolloutApp.Name, 2), rolloutApp.GetNamespace())
Eventually(func() error {
return k8sClient.Get(ctx, client.ObjectKey{Name: expectRTName}, &v1beta1.ResourceTracker{})
}, 10*time.Second, 500*time.Millisecond).ShouldNot(Succeed())
By("Check no new Component created")
Expect(k8sClient.Get(ctx, client.ObjectKey{
@@ -1159,26 +1137,26 @@ spec:
Expect(component.Status.LatestRevision.Revision).ShouldNot(BeNil())
Expect(component.Status.LatestRevision.Revision).Should(BeEquivalentTo(1))
By("Remove rollout annotation should lead to new appContext created")
By("Remove rollout annotation should lead to new resource tracker created")
Expect(k8sClient.Get(ctx, appKey, rolloutApp)).Should(Succeed())
rolloutApp.SetAnnotations(map[string]string{
"keep": "true",
})
Expect(k8sClient.Update(ctx, rolloutApp)).Should(BeNil())
reconcileRetry(reconciler, reconcile.Request{NamespacedName: appKey})
// app should create an appContext
Expect(k8sClient.Get(ctx, appKey, appContext)).Should(Succeed())
Expect(appContext.Spec.ApplicationRevisionName).Should(Equal(utils.ConstructRevisionName(rolloutApp.Name, 1)))
By("Verify that no new AppRevision created")
Expect(k8sClient.Get(ctx, client.ObjectKey{
Namespace: rolloutApp.Namespace,
Name: utils.ConstructRevisionName(rolloutApp.Name, 2),
}, appRevision)).ShouldNot(Succeed())
// no appContext same name as apprevision exist
Expect(k8sClient.Get(ctx, client.ObjectKey{
Namespace: rolloutApp.Namespace,
Name: utils.ConstructRevisionName(rolloutApp.Name, 1),
}, appContext)).ShouldNot(Succeed())
By("Check no new affiliated resource tracker is created")
expectRTName = fmt.Sprintf("%s-%s", utils.ConstructRevisionName(rolloutApp.Name, 2), rolloutApp.GetNamespace())
Eventually(func() error {
return k8sClient.Get(ctx, client.ObjectKey{Name: expectRTName}, &v1beta1.ResourceTracker{})
}, 10*time.Second, 500*time.Millisecond).ShouldNot(Succeed())
By("Delete Application, clean the resource")
Expect(k8sClient.Delete(ctx, rolloutApp)).Should(BeNil())
})
@@ -1210,7 +1188,7 @@ spec:
app := appWithTraitHealthStatus.DeepCopy()
app.Spec.Components[0].Name = compName
app.Spec.Components[0].Type = "nworker"
app.Spec.Components[0].Properties = runtime.RawExtension{Raw: []byte(`{"cmd":["sleep","1000"],"image":"busybox3","lives":"3","enemies":"alain"}`)}
app.Spec.Components[0].Properties = runtime.RawExtension{Raw: []byte(`{"cmd":["sleep","1000"],"image":"busybox3","lives":"3","enemies":"alien"}`)}
app.Spec.Components[0].Traits[0].Type = "ingress"
app.Spec.Components[0].Traits[0].Properties = runtime.RawExtension{Raw: []byte(`{"domain":"example.com","http":{"/":80}}`)}
@@ -1385,12 +1363,11 @@ spec:
Name: curApp.Status.LatestRevision.Name,
}, appRevision)).Should(BeNil())
By("Check ApplicationContext created")
appContext := &v1alpha2.ApplicationContext{}
Expect(k8sClient.Get(ctx, client.ObjectKey{
Namespace: curApp.Namespace,
Name: curApp.Name,
}, appContext)).Should(BeNil())
By("Check affiliated resource tracker is created")
expectRTName := fmt.Sprintf("%s-%s", appRevision.GetName(), appRevision.GetNamespace())
Eventually(func() error {
return k8sClient.Get(ctx, client.ObjectKey{Name: expectRTName}, &v1beta1.ResourceTracker{})
}, 10*time.Second, 500*time.Millisecond).Should(Succeed())
})
It("app with two components and one component refer to an existing WorkloadDefinition", func() {
@@ -1434,13 +1411,6 @@ spec:
Namespace: curApp.Namespace,
Name: curApp.Status.LatestRevision.Name,
}, appRevision)).Should(BeNil())
By("Check ApplicationContext created")
appContext := &v1alpha2.ApplicationContext{}
Expect(k8sClient.Get(ctx, client.ObjectKey{
Namespace: curApp.Namespace,
Name: curApp.Name,
}, appContext)).Should(BeNil())
})
It("app-import-pkg will create workload by imported kube package", func() {
@@ -1511,15 +1481,11 @@ spec:
Backend: v1beta12.IngressBackend{ServiceName: "myweb", ServicePort: intstr.FromInt(80)}}}}}}},
}})).Should(BeEquivalentTo(""))
By("Check ApplicationContext created")
appContext := &v1alpha2.ApplicationContext{}
Expect(k8sClient.Get(ctx, client.ObjectKey{
Namespace: curApp.Namespace,
Name: curApp.Name,
}, appContext)).Should(BeNil())
// check that the new appContext has the correct annotation and labels
Expect(appContext.GetAnnotations()[oam.AnnotationAppRollout]).Should(BeEmpty())
Expect(appContext.GetLabels()[oam.LabelAppRevisionHash]).ShouldNot(BeEmpty())
By("Check affiliated resource tracker is created")
expectRTName := fmt.Sprintf("%s-%s", appRevision.GetName(), appRevision.GetNamespace())
Eventually(func() error {
return k8sClient.Get(ctx, client.ObjectKey{Name: expectRTName}, &v1beta1.ResourceTracker{})
}, 10*time.Second, 500*time.Millisecond).Should(Succeed())
By("Check Component Created with the expected workload spec")
var component v1alpha2.Component
@@ -1597,6 +1563,28 @@ spec:
})
func reconcileRetry(r reconcile.Reconciler, req reconcile.Request) {
// 1st and 2nd time reconcile to add finalizer
Eventually(func() error {
result, err := r.Reconcile(req)
if err != nil {
By(fmt.Sprintf("reconcile err: %+v ", err))
} else if result.Requeue || result.RequeueAfter > 0 {
By("reconcile timeout as it still needs to requeue")
return fmt.Errorf("reconcile timeout as it still needs to requeue")
}
return err
}, 3*time.Second, time.Second).Should(BeNil())
Eventually(func() error {
result, err := r.Reconcile(req)
if err != nil {
By(fmt.Sprintf("reconcile err: %+v ", err))
} else if result.Requeue || result.RequeueAfter > 0 {
By("reconcile timeout as it still needs to requeue")
return fmt.Errorf("reconcile timeout as it still needs to requeue")
}
return err
}, 3*time.Second, time.Second).Should(BeNil())
// 3rd time reconcile to process main logic of app controller
Eventually(func() error {
result, err := r.Reconcile(req)
if err != nil {
@@ -1607,7 +1595,7 @@ func reconcileRetry(r reconcile.Reconciler, req reconcile.Request) {
return fmt.Errorf("reconcile timeout as it still needs to requeue")
}
return err
}, 30*time.Second, time.Second).Should(BeNil())
}, 5*time.Second, time.Second).Should(BeNil())
}
func reconcileOnce(r reconcile.Reconciler, req reconcile.Request) {

View File

@@ -20,30 +20,29 @@ import (
"context"
"encoding/json"
"fmt"
"time"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
runtimev1alpha1 "github.com/crossplane/crossplane-runtime/apis/core/v1alpha1"
"github.com/crossplane/crossplane-runtime/pkg/meta"
"github.com/ghodss/yaml"
"github.com/oam-dev/kubevela/apis/core.oam.dev/common"
"github.com/oam-dev/kubevela/apis/core.oam.dev/v1alpha2"
"github.com/oam-dev/kubevela/apis/core.oam.dev/v1beta1"
"github.com/oam-dev/kubevela/pkg/oam/util"
appsv1 "k8s.io/api/apps/v1"
v1 "k8s.io/api/core/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/types"
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/client"
)
var _ = Describe("Test application controller finalizer logic", func() {
ctx := context.TODO()
namespace := "cross-ns-namespace"
namespace := "cross-namespace"
cd := &v1beta1.ComponentDefinition{}
cDDefJson, _ := yaml.YAMLToJSON([]byte(crossCompDefYaml))
@@ -74,6 +73,8 @@ var _ = Describe("Test application controller finalizer logic", func() {
AfterEach(func() {
By("[TEST] Clean up resources after an integration test")
Expect(k8sClient.DeleteAllOf(ctx, &appsv1.Deployment{}, client.InNamespace(namespace)))
Expect(k8sClient.DeleteAllOf(ctx, &v1alpha2.ManualScalerTrait{}, client.InNamespace(namespace)))
})
It("Test component have normal workload", func() {
@@ -84,14 +85,13 @@ var _ = Describe("Test application controller finalizer logic", func() {
By("Create a normal workload app")
checkApp := &v1beta1.Application{}
_, err := reconciler.Reconcile(ctrl.Request{NamespacedName: appKey})
Expect(err).Should(BeNil())
reconcileRetry(reconciler, ctrl.Request{NamespacedName: appKey})
Expect(k8sClient.Get(ctx, appKey, checkApp)).Should(BeNil())
Expect(checkApp.Status.Phase).Should(Equal(common.ApplicationRunning))
Expect(len(checkApp.Finalizers)).Should(BeEquivalentTo(0))
Expect(len(checkApp.Finalizers)).Should(BeEquivalentTo(1))
rt := &v1beta1.ResourceTracker{}
Expect(k8sClient.Get(ctx, getTrackerKey(checkApp.Namespace, checkApp.Name), rt)).Should(util.NotFoundMatcher{})
Expect(k8sClient.Get(ctx, getTrackerKey(checkApp.Namespace, checkApp.Name, "v1"), rt)).Should(Succeed())
By("add a cross namespace trait for application")
updateApp := checkApp.DeepCopy()
@@ -102,21 +102,16 @@ var _ = Describe("Test application controller finalizer logic", func() {
},
}
Expect(k8sClient.Update(ctx, updateApp)).Should(BeNil())
// first reconcile will create resourceTracker and set resourceTracker for app status
_, err = reconciler.Reconcile(ctrl.Request{NamespacedName: appKey})
Expect(err).Should(BeNil())
reconcileRetry(reconciler, ctrl.Request{NamespacedName: appKey})
checkApp = new(v1beta1.Application)
Expect(k8sClient.Get(ctx, appKey, checkApp)).Should(BeNil())
Expect(k8sClient.Get(ctx, getTrackerKey(checkApp.Namespace, checkApp.Name), rt)).Should(BeNil())
Expect(checkApp.Status.ResourceTracker.UID).Should(BeEquivalentTo(rt.UID))
Expect(len(checkApp.Finalizers)).Should(BeEquivalentTo(0))
Expect(k8sClient.Get(ctx, getTrackerKey(checkApp.Namespace, checkApp.Name, "v2"), rt)).Should(BeNil())
Expect(len(checkApp.Finalizers)).Should(BeEquivalentTo(1))
// second reconcile will set finalizer for app
_, err = reconciler.Reconcile(ctrl.Request{NamespacedName: appKey})
reconcileRetry(reconciler, ctrl.Request{NamespacedName: appKey})
checkApp = new(v1beta1.Application)
Expect(k8sClient.Get(ctx, appKey, checkApp)).Should(BeNil())
Expect(k8sClient.Get(ctx, getTrackerKey(checkApp.Namespace, checkApp.Name), rt)).Should(BeNil())
Expect(err).Should(BeNil())
Expect(k8sClient.Get(ctx, getTrackerKey(checkApp.Namespace, checkApp.Name, "v2"), rt)).Should(BeNil())
Expect(len(checkApp.Finalizers)).Should(BeEquivalentTo(1))
Expect(checkApp.Finalizers[0]).Should(BeEquivalentTo(resourceTrackerFinalizer))
@@ -126,11 +121,10 @@ var _ = Describe("Test application controller finalizer logic", func() {
updateApp = checkApp.DeepCopy()
updateApp.Spec.Components[0].Traits = nil
Expect(k8sClient.Update(ctx, updateApp)).Should(BeNil())
_, err = reconciler.Reconcile(ctrl.Request{NamespacedName: appKey})
Expect(err).Should(BeNil())
reconcileRetry(reconciler, ctrl.Request{NamespacedName: appKey})
checkApp = new(v1beta1.Application)
Expect(k8sClient.Get(ctx, appKey, checkApp)).Should(BeNil())
Expect(k8sClient.Get(ctx, getTrackerKey(checkApp.Namespace, checkApp.Name), rt)).Should(util.NotFoundMatcher{})
Expect(k8sClient.Get(ctx, getTrackerKey(checkApp.Namespace, checkApp.Name, "v3"), rt)).Should(Succeed())
Expect(checkApp.Status.ResourceTracker).Should(BeNil())
})
@@ -141,16 +135,14 @@ var _ = Describe("Test application controller finalizer logic", func() {
Expect(k8sClient.Create(ctx, app)).Should(BeNil())
By("Create a cross workload app")
_, err := reconciler.Reconcile(ctrl.Request{NamespacedName: appKey})
Expect(err).Should(BeNil())
reconcileRetry(reconciler, ctrl.Request{NamespacedName: appKey})
checkApp := &v1beta1.Application{}
Expect(k8sClient.Get(ctx, appKey, checkApp)).Should(BeNil())
Expect(checkApp.Status.Phase).Should(Equal(common.ApplicationRunning))
Expect(len(checkApp.Finalizers)).Should(BeEquivalentTo(0))
Expect(len(checkApp.Finalizers)).Should(BeEquivalentTo(1))
rt := &v1beta1.ResourceTracker{}
Expect(k8sClient.Get(ctx, getTrackerKey(checkApp.Namespace, checkApp.Name), rt)).Should(BeNil())
_, err = reconciler.Reconcile(ctrl.Request{NamespacedName: appKey})
Expect(err).Should(BeNil())
Expect(k8sClient.Get(ctx, getTrackerKey(checkApp.Namespace, checkApp.Name, "v1"), rt)).Should(BeNil())
reconcileRetry(reconciler, ctrl.Request{NamespacedName: appKey})
checkApp = new(v1beta1.Application)
Expect(k8sClient.Get(ctx, appKey, checkApp)).Should(BeNil())
Expect(len(checkApp.Finalizers)).Should(BeEquivalentTo(1))
@@ -159,12 +151,11 @@ var _ = Describe("Test application controller finalizer logic", func() {
Expect(k8sClient.Delete(ctx, checkApp)).Should(BeNil())
By("delete app will delete resourceTracker")
// reconcile will delete resourceTracker and unset app's finalizer
_, err = reconciler.Reconcile(ctrl.Request{NamespacedName: appKey})
Expect(err).Should(BeNil())
reconcileRetry(reconciler, ctrl.Request{NamespacedName: appKey})
checkApp = new(v1beta1.Application)
Expect(k8sClient.Get(ctx, appKey, checkApp)).Should(util.NotFoundMatcher{})
checkRt := new(v1beta1.ResourceTracker)
Expect(k8sClient.Get(ctx, getTrackerKey(checkApp.Namespace, checkApp.Name), checkRt)).Should(util.NotFoundMatcher{})
Expect(k8sClient.Get(ctx, getTrackerKey(checkApp.Namespace, checkApp.Name, "v1"), checkRt)).Should(util.NotFoundMatcher{})
})
It("Test cross namespace workload, then update the app to change the namespace", func() {
@@ -174,34 +165,29 @@ var _ = Describe("Test application controller finalizer logic", func() {
Expect(k8sClient.Create(ctx, app)).Should(BeNil())
By("Create a cross workload app")
_, err := reconciler.Reconcile(ctrl.Request{NamespacedName: appKey})
Expect(err).Should(BeNil())
reconcileRetry(reconciler, ctrl.Request{NamespacedName: appKey})
checkApp := &v1beta1.Application{}
Expect(k8sClient.Get(ctx, appKey, checkApp)).Should(BeNil())
Expect(checkApp.Status.Phase).Should(Equal(common.ApplicationRunning))
Expect(len(checkApp.Finalizers)).Should(BeEquivalentTo(0))
Expect(len(checkApp.Finalizers)).Should(BeEquivalentTo(1))
rt := &v1beta1.ResourceTracker{}
Expect(k8sClient.Get(ctx, getTrackerKey(checkApp.Namespace, checkApp.Name), rt)).Should(BeNil())
_, err = reconciler.Reconcile(ctrl.Request{NamespacedName: appKey})
Expect(err).Should(BeNil())
Expect(k8sClient.Get(ctx, getTrackerKey(checkApp.Namespace, checkApp.Name, "v1"), rt)).Should(BeNil())
reconcileRetry(reconciler, ctrl.Request{NamespacedName: appKey})
checkApp = new(v1beta1.Application)
Expect(k8sClient.Get(ctx, appKey, checkApp)).Should(BeNil())
Expect(len(checkApp.Finalizers)).Should(BeEquivalentTo(1))
Expect(checkApp.Finalizers[0]).Should(BeEquivalentTo(resourceTrackerFinalizer))
Expect(checkApp.Status.ResourceTracker.UID).Should(BeEquivalentTo(rt.UID))
Expect(len(rt.Status.TrackedResources)).Should(BeEquivalentTo(1))
By("Update the app, set type to normal-worker")
checkApp.Spec.Components[0].Type = "normal-worker"
Expect(k8sClient.Update(ctx, checkApp)).Should(BeNil())
_, err = reconciler.Reconcile(ctrl.Request{NamespacedName: appKey})
Expect(err).Should(BeNil())
reconcileRetry(reconciler, ctrl.Request{NamespacedName: appKey})
checkApp = new(v1beta1.Application)
Expect(k8sClient.Get(ctx, appKey, checkApp)).Should(BeNil())
Expect(checkApp.Status.ResourceTracker).Should(BeNil())
Expect(k8sClient.Get(ctx, getTrackerKey(checkApp.Namespace, checkApp.Name), rt)).Should(util.NotFoundMatcher{})
Expect(k8sClient.Get(ctx, getTrackerKey(checkApp.Namespace, checkApp.Name, "v2"), rt)).Should(Succeed())
Expect(k8sClient.Delete(ctx, checkApp)).Should(BeNil())
_, err = reconciler.Reconcile(ctrl.Request{NamespacedName: appKey})
Expect(err).Should(BeNil())
reconcileRetry(reconciler, ctrl.Request{NamespacedName: appKey})
})
It("Test cross namespace workload and trait, then update the app to delete trait ", func() {
@@ -216,99 +202,31 @@ var _ = Describe("Test application controller finalizer logic", func() {
}
Expect(k8sClient.Create(ctx, app)).Should(BeNil())
By("Create a cross workload trait app")
_, err := reconciler.Reconcile(ctrl.Request{NamespacedName: appKey})
Expect(err).Should(BeNil())
reconcileRetry(reconciler, ctrl.Request{NamespacedName: appKey})
checkApp := &v1beta1.Application{}
Expect(k8sClient.Get(ctx, appKey, checkApp)).Should(BeNil())
Expect(checkApp.Status.Phase).Should(Equal(common.ApplicationRunning))
Expect(len(checkApp.Finalizers)).Should(BeEquivalentTo(0))
Expect(len(checkApp.Finalizers)).Should(BeEquivalentTo(1))
rt := &v1beta1.ResourceTracker{}
Expect(k8sClient.Get(ctx, getTrackerKey(checkApp.Namespace, checkApp.Name), rt)).Should(BeNil())
_, err = reconciler.Reconcile(ctrl.Request{NamespacedName: appKey})
Expect(err).Should(BeNil())
Expect(k8sClient.Get(ctx, getTrackerKey(checkApp.Namespace, checkApp.Name, "v1"), rt)).Should(BeNil())
reconcileRetry(reconciler, ctrl.Request{NamespacedName: appKey})
checkApp = new(v1beta1.Application)
Expect(k8sClient.Get(ctx, appKey, checkApp)).Should(BeNil())
Expect(len(checkApp.Finalizers)).Should(BeEquivalentTo(1))
Expect(checkApp.Finalizers[0]).Should(BeEquivalentTo(resourceTrackerFinalizer))
Expect(checkApp.Status.ResourceTracker.UID).Should(BeEquivalentTo(rt.UID))
Expect(len(rt.Status.TrackedResources)).Should(BeEquivalentTo(2))
By("Update the app, set type to normal-worker")
checkApp.Spec.Components[0].Traits = nil
Expect(k8sClient.Update(ctx, checkApp)).Should(BeNil())
_, err = reconciler.Reconcile(ctrl.Request{NamespacedName: appKey})
Expect(err).Should(BeNil())
reconcileRetry(reconciler, ctrl.Request{NamespacedName: appKey})
rt = &v1beta1.ResourceTracker{}
checkApp = new(v1beta1.Application)
Expect(k8sClient.Get(ctx, appKey, checkApp)).Should(BeNil())
Expect(k8sClient.Get(ctx, getTrackerKey(checkApp.Namespace, checkApp.Name), rt)).Should(BeNil())
Expect(checkApp.Status.ResourceTracker.UID).Should(BeEquivalentTo(rt.UID))
Expect(k8sClient.Get(ctx, getTrackerKey(checkApp.Namespace, checkApp.Name, "v2"), rt)).Should(BeNil())
Expect(len(rt.Status.TrackedResources)).Should(BeEquivalentTo(1))
Expect(k8sClient.Delete(ctx, checkApp)).Should(BeNil())
_, err = reconciler.Reconcile(ctrl.Request{NamespacedName: appKey})
Expect(err).Should(BeNil())
Expect(k8sClient.Get(ctx, getTrackerKey(checkApp.Namespace, checkApp.Name), rt)).Should(util.NotFoundMatcher{})
})
})
var _ = Describe("Test finalizer related func", func() {
ctx := context.TODO()
namespace := "cross-ns-namespace"
var handler appHandler
BeforeEach(func() {
ns := v1.Namespace{
ObjectMeta: metav1.ObjectMeta{
Name: namespace,
},
}
Expect(k8sClient.Create(ctx, &ns)).Should(SatisfyAny(BeNil(), &util.AlreadyExistMatcher{}))
})
AfterEach(func() {
By("[TEST] Clean up resources after an integration test")
})
It("Test finalizeResourceTracker func with need update ", func() {
app := getApp("app-3", namespace, "worker")
rt := &v1beta1.ResourceTracker{
ObjectMeta: metav1.ObjectMeta{
Name: namespace + "-" + app.GetName(),
},
}
Expect(k8sClient.Create(ctx, rt)).Should(BeNil())
app.Status.ResourceTracker = &runtimev1alpha1.TypedReference{
Name: rt.Name,
Kind: v1beta1.ResourceTrackerGroupKind,
APIVersion: v1beta1.ResourceTrackerKindAPIVersion,
UID: rt.UID}
meta.AddFinalizer(&app.ObjectMeta, resourceTrackerFinalizer)
handler = appHandler{
r: reconciler,
app: app,
}
need, err := handler.removeResourceTracker(ctx)
Expect(err).Should(BeNil())
Expect(need).Should(BeEquivalentTo(true))
Eventually(func() error {
err := k8sClient.Get(ctx, getTrackerKey(namespace, app.Name), rt)
if err == nil || !apierrors.IsNotFound(err) {
return fmt.Errorf("resourceTracker still exsit")
}
return nil
}, time.Second*60, time.Microsecond*300).Should(BeNil())
Expect(app.Status.ResourceTracker).Should(BeNil())
Expect(meta.FinalizerExists(app, resourceTrackerFinalizer)).Should(BeEquivalentTo(false))
})
It("Test finalizeResourceTracker func without need ", func() {
app := getApp("app-4", namespace, "worker")
handler = appHandler{
r: reconciler,
app: app,
}
need, err := handler.removeResourceTracker(ctx)
Expect(err).Should(BeNil())
Expect(need).Should(BeEquivalentTo(false))
reconcileRetry(reconciler, ctrl.Request{NamespacedName: appKey})
Expect(k8sClient.Get(ctx, getTrackerKey(checkApp.Namespace, checkApp.Name, "v2"), rt)).Should(util.NotFoundMatcher{})
})
})
@@ -334,8 +252,8 @@ func getApp(appName, namespace, comptype string) *v1beta1.Application {
}
}
func getTrackerKey(namespace, name string) types.NamespacedName {
return types.NamespacedName{Name: fmt.Sprintf("%s-%s", namespace, name)}
func getTrackerKey(namespace, name, revision string) types.NamespacedName {
return types.NamespacedName{Name: fmt.Sprintf("%s-%s-%s", name, revision, namespace)}
}
const (

View File

@@ -19,20 +19,15 @@ package application
import (
"context"
"fmt"
"strconv"
"strings"
"time"
runtimev1alpha1 "github.com/crossplane/crossplane-runtime/apis/core/v1alpha1"
"github.com/crossplane/crossplane-runtime/pkg/meta"
terraformtypes "github.com/oam-dev/terraform-controller/api/types"
terraformapi "github.com/oam-dev/terraform-controller/api/v1beta1"
"github.com/pkg/errors"
corev1 "k8s.io/api/core/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/apimachinery/pkg/runtime"
ctypes "k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/klog/v2"
@@ -46,12 +41,12 @@ import (
"github.com/oam-dev/kubevela/apis/core.oam.dev/v1beta1"
"github.com/oam-dev/kubevela/apis/types"
"github.com/oam-dev/kubevela/pkg/appfile"
"github.com/oam-dev/kubevela/pkg/controller/core.oam.dev/v1alpha2/applicationconfiguration"
"github.com/oam-dev/kubevela/pkg/controller/core.oam.dev/v1alpha2/application/assemble"
"github.com/oam-dev/kubevela/pkg/controller/core.oam.dev/v1alpha2/application/dispatch"
"github.com/oam-dev/kubevela/pkg/controller/core.oam.dev/v1alpha2/applicationrollout"
"github.com/oam-dev/kubevela/pkg/controller/utils"
"github.com/oam-dev/kubevela/pkg/cue/process"
"github.com/oam-dev/kubevela/pkg/oam"
"github.com/oam-dev/kubevela/pkg/oam/discoverymapper"
oamutil "github.com/oam-dev/kubevela/pkg/oam/util"
)
@@ -75,20 +70,13 @@ func readyCondition(tpy string) runtimev1alpha1.Condition {
}
type appHandler struct {
r *Reconciler
app *v1beta1.Application
appfile *appfile.Appfile
inplace bool
isNewRevision bool
revisionHash string
acrossNamespaceResources []v1beta1.TypedReference
resourceTracker *v1beta1.ResourceTracker
autodetect bool
}
// setInplace will mark if the application should upgrade the workload within the same instance(name never changed)
func (h *appHandler) setInplace(isInplace bool) {
h.inplace = isInplace
r *Reconciler
app *v1beta1.Application
appfile *appfile.Appfile
previousRevisionName string
isNewRevision bool
revisionHash string
autodetect bool
}
func (h *appHandler) handleErr(err error) (ctrl.Result, error) {
@@ -104,12 +92,13 @@ func (h *appHandler) handleErr(err error) (ctrl.Result, error) {
}, nil
}
// apply will
// 1. set ownerReference for ApplicationConfiguration and Components
// 2. update AC's components using the component revision name
// 3. update or create the AC with new revision and remember it in the application status
// 4. garbage collect unused components
func (h *appHandler) apply(ctx context.Context, appRev *v1beta1.ApplicationRevision, ac *v1alpha2.ApplicationConfiguration, comps []*v1alpha2.Component) error {
// don't create components if revision-only annotation is set
if ac.Annotations[oam.AnnotationAppRevisionOnly] == "true" {
h.FinalizeAppRevision(appRev, ac, comps)
return h.createOrUpdateAppRevision(ctx, appRev)
}
owners := []metav1.OwnerReference{{
APIVersion: v1beta1.SchemeGroupVersion.String(),
Kind: v1beta1.ApplicationKind,
@@ -117,36 +106,17 @@ func (h *appHandler) apply(ctx context.Context, appRev *v1beta1.ApplicationRevis
UID: h.app.UID,
Controller: pointer.BoolPtr(true),
}}
if _, exist := h.app.GetAnnotations()[oam.AnnotationAppRollout]; !exist && h.app.Spec.RolloutPlan == nil {
h.setInplace(true)
} else {
h.setInplace(false)
}
// don't create components and AC if revision-only annotation is set
if ac.Annotations[oam.AnnotationAppRevisionOnly] == "true" {
h.FinalizeAppRevision(appRev, ac, comps)
return h.createOrUpdateAppRevision(ctx, appRev)
}
var needTracker bool
ac.SetOwnerReferences(owners)
var err error
for _, comp := range comps {
comp.SetOwnerReferences(owners)
// If the helm mode component doesn't specify the workload
// we just install a helm chart resources
if h.checkAutoDetect(comp) {
if h.isNewRevision && h.checkAutoDetect(comp) {
if err = h.applyHelmModuleResources(ctx, comp, owners); err != nil {
return errors.Wrap(err, "cannot apply Helm module resources")
}
continue
}
needTracker, err = h.checkAndSetResourceTracker(&comp.Spec.Workload)
if err != nil {
return err
}
newComp := comp.DeepCopy()
// newComp will be updated and return the revision name instead of the component name
@@ -154,21 +124,12 @@ func (h *appHandler) apply(ctx context.Context, appRev *v1beta1.ApplicationRevis
if err != nil {
return err
}
if needTracker {
if err := h.recodeTrackedWorkload(comp, revisionName); err != nil {
return err
}
}
// find the ACC that contains this component
for i := 0; i < len(ac.Spec.Components); i++ {
// update the AC using the component revision instead of component name
// we have to make AC immutable including the component it's pointing to
if ac.Spec.Components[i].ComponentName == newComp.Name {
ac.Spec.Components[i].RevisionName = revisionName
ac.Spec.Components[i].ComponentName = ""
if err := h.checkResourceTrackerForTrait(ctx, ac.Spec.Components[i], newComp.Name); err != nil {
return err
}
}
}
// isNewRevision indicates app's newly created or spec has changed
@@ -179,7 +140,6 @@ func (h *appHandler) apply(ctx context.Context, appRev *v1beta1.ApplicationRevis
}
}
}
ac.SetOwnerReferences(owners)
h.FinalizeAppRevision(appRev, ac, comps)
if h.autodetect {
@@ -191,14 +151,21 @@ func (h *appHandler) apply(ctx context.Context, appRev *v1beta1.ApplicationRevis
return err
}
// `h.inplace`: the rollout will create AppContext which will launch the real K8s resources.
// Otherwise, we should create/update the appContext here when there if no rollout controller to take care of new versions
// In this case, the workload should update with the annotation `app.oam.dev/inplace-upgrade=true`
// `!h.autodetect`: If the workload type of the helm mode component is not clear, an autodetect type workload will be specified by default
// In this case, the traits attached to the helm mode component will fail to generate,
// so we only call applyHelmModuleResources to create the helm resource, don't generate ApplicationContext.
if h.inplace && !h.autodetect {
return h.createOrUpdateAppContext(ctx, owners)
if !appWillReleaseByRollout(h.app) && !h.autodetect {
a := assemble.NewAppManifests(appRev).WithWorkloadOption(assemble.DiscoveryHelmBasedWorkload(ctx, h.r.Client))
manifests, err := a.AssembledManifests()
if err != nil {
return errors.WithMessage(err, "cannot assemble resources' manifests")
}
d := dispatch.NewAppManifestsDispatcher(h.r.Client, appRev)
if len(h.previousRevisionName) != 0 {
latestTracker := &v1beta1.ResourceTracker{}
latestTracker.SetName(dispatch.ConstructResourceTrackerName(h.previousRevisionName, h.app.Namespace))
d = d.EnableUpgradeAndGC(latestTracker)
}
if _, err := d.Dispatch(ctx, manifests); err != nil {
return errors.WithMessage(err, "cannot dispatch resources' manifests")
}
}
return nil
}
@@ -397,55 +364,6 @@ func (h *appHandler) createOrUpdateComponent(ctx context.Context, comp *v1alpha2
return curRevisionName, nil
}
// createOrUpdateAppContext will make sure the appContext points to the latest application revision
// this will only be called in the case of no rollout,
func (h *appHandler) createOrUpdateAppContext(ctx context.Context, owners []metav1.OwnerReference) error {
var curAppContext v1alpha2.ApplicationContext
// AC name is the same as the app name if there is no rollout
appContext := v1alpha2.ApplicationContext{
ObjectMeta: metav1.ObjectMeta{
Name: h.app.Name,
Namespace: h.app.Namespace,
},
Spec: v1alpha2.ApplicationContextSpec{
// new AC always point to the latest app revision
ApplicationRevisionName: h.app.Status.LatestRevision.Name,
},
}
appContext.SetOwnerReferences(owners)
// set the AC label and annotation
appLabel := h.app.GetLabels()
if appLabel == nil {
appLabel = make(map[string]string)
}
appLabel[oam.LabelAppRevisionHash] = h.app.Status.LatestRevision.RevisionHash
appContext.SetLabels(appLabel)
appAnnotation := h.app.GetAnnotations()
if appAnnotation == nil {
appAnnotation = make(map[string]string)
}
appAnnotation[oam.AnnotationInplaceUpgrade] = strconv.FormatBool(h.inplace)
appContext.SetAnnotations(appAnnotation)
key := ctypes.NamespacedName{Name: appContext.Name, Namespace: appContext.Namespace}
if err := h.r.Get(ctx, key, &curAppContext); err != nil {
if !apierrors.IsNotFound(err) {
return err
}
klog.InfoS("Create a new appContext", "application name",
appContext.GetName(), "revision it points to", appContext.Spec.ApplicationRevisionName)
return h.r.Create(ctx, &appContext)
}
// we don't need to create another appConfig
klog.InfoS("Replace the existing appContext", "appContext", klog.KObj(&appContext),
"revision it points to", appContext.Spec.ApplicationRevisionName)
appContext.ResourceVersion = curAppContext.ResourceVersion
return h.r.Update(ctx, &appContext)
}
func (h *appHandler) applyHelmModuleResources(ctx context.Context, comp *v1alpha2.Component, owners []metav1.OwnerReference) error {
klog.Info("Process a Helm module component")
repo, err := oamutil.RawExtension2Unstructured(&comp.Spec.Helm.Repository)
@@ -471,30 +389,9 @@ func (h *appHandler) applyHelmModuleResources(ctx context.Context, comp *v1alpha
return nil
}
// checkAndSetResourceTracker check if resource's namespace is different with application, if yes set resourceTracker as
// resource's ownerReference
func (h *appHandler) checkAndSetResourceTracker(resource *runtime.RawExtension) (bool, error) {
needTracker := false
u, err := oamutil.RawExtension2Unstructured(resource)
if err != nil {
return false, err
}
inDiffNamespace, err := h.checkCrossNamespace(u)
if err != nil {
return false, err
}
if inDiffNamespace {
needTracker = true
ref := h.genResourceTrackerOwnerReference()
// set resourceTracker as the ownerReference of workload/trait
u.SetOwnerReferences([]metav1.OwnerReference{*ref})
raw := oamutil.Object2RawExtension(u)
*resource = raw
return needTracker, nil
}
return needTracker, nil
}
// checkAutoDetect judge whether the workload type of a helm mode component is not clear, an autodetect type workload
// will be specified by default In this case, the traits attached to the helm mode component will fail to generate, so
// we only call applyHelmModuleResources to create the helm resource, don't generate other K8s resources.
func (h *appHandler) checkAutoDetect(component *v1alpha2.Component) bool {
if len(component.Spec.Workload.Raw) == 0 && component.Spec.Workload.Object == nil && component.Spec.Helm != nil {
h.autodetect = true
@@ -503,166 +400,12 @@ func (h *appHandler) checkAutoDetect(component *v1alpha2.Component) bool {
return false
}
// genResourceTrackerOwnerReference check the related resourceTracker whether have been created.
// If not, create it. And return the ownerReference of this resourceTracker.
func (h *appHandler) genResourceTrackerOwnerReference() *metav1.OwnerReference {
return metav1.NewControllerRef(h.resourceTracker, v1beta1.ResourceTrackerKindVersionKind)
}
func (h *appHandler) generateResourceTrackerName() string {
return fmt.Sprintf("%s-%s", h.app.Namespace, h.app.Name)
}
// return true if the resource is cluser-scoped or is not in the same namespace
// with application
func (h *appHandler) checkCrossNamespace(u *unstructured.Unstructured) (bool, error) {
gk := u.GetObjectKind().GroupVersionKind().GroupKind()
isNamespacedScope, err := discoverymapper.IsNamespacedScope(h.r.dm, gk)
if err != nil {
return false, err
}
if !isNamespacedScope {
// it's cluster-scoped resource
return true, nil
}
// for a namespace-scoped resource, if its namespace is empty,
// we will set application's namespace to it latter,
// so only check non-empty namespace here
return len(u.GetNamespace()) != 0 && u.GetNamespace() != h.app.Namespace, nil
}
// finalizeResourceTracker func return whether need to update application
func (h *appHandler) removeResourceTracker(ctx context.Context) (bool, error) {
client := h.r.Client
rt := new(v1beta1.ResourceTracker)
trackerName := h.generateResourceTrackerName()
key := ctypes.NamespacedName{Name: trackerName}
err := client.Get(ctx, key, rt)
if err != nil {
if apierrors.IsNotFound(err) {
// for some cases the resourceTracker have been deleted but finalizer still exist
if meta.FinalizerExists(h.app, resourceTrackerFinalizer) {
meta.RemoveFinalizer(h.app, resourceTrackerFinalizer)
return true, nil
}
// for some cases: informer cache haven't sync resourceTracker from k8s, return error trigger reconcile again
if h.app.Status.ResourceTracker != nil {
return false, fmt.Errorf("application status has resouceTracker but cannot get from k8s ")
}
return false, nil
}
return false, err
}
rt = &v1beta1.ResourceTracker{
ObjectMeta: metav1.ObjectMeta{
Name: trackerName,
},
}
err = h.r.Client.Delete(ctx, rt)
if err != nil {
return false, err
}
klog.Info("Delete application resourceTracker")
meta.RemoveFinalizer(h.app, resourceTrackerFinalizer)
h.app.Status.ResourceTracker = nil
return true, nil
}
func (h *appHandler) recodeTrackedWorkload(comp *v1alpha2.Component, compRevisionName string) error {
workloadName, err := h.getWorkloadName(comp.Spec.Workload, comp.Name, compRevisionName)
if err != nil {
return err
}
if err = h.recodeTrackedResource(workloadName, comp.Spec.Workload); err != nil {
return err
}
return nil
}
// checkResourceTrackerForTrait check component trait namespace, if it's namespace is different with application, set resourceTracker as its ownerReference
// and recode trait in handler acrossNamespace field
func (h *appHandler) checkResourceTrackerForTrait(ctx context.Context, comp v1alpha2.ApplicationConfigurationComponent, compName string) error {
for i, ct := range comp.Traits {
needTracker, err := h.checkAndSetResourceTracker(&comp.Traits[i].Trait)
if err != nil {
return err
}
if needTracker {
traitName, err := h.getTraitName(ctx, compName, comp.Traits[i].DeepCopy(), &ct.Trait)
if err != nil {
return err
}
if err = h.recodeTrackedResource(traitName, ct.Trait); err != nil {
return err
}
}
}
return nil
}
// getWorkloadName generate workload name. By default the workload's name will be generated by applicationContext, this func is for application controller
// get name of crossNamespace workload. The logic of this func is same with the way of appConfig generating workloadName
func (h *appHandler) getWorkloadName(w runtime.RawExtension, componentName string, revisionName string) (string, error) {
workload, err := oamutil.RawExtension2Unstructured(&w)
if err != nil {
return "", err
}
var revision = 0
if len(revisionName) != 0 {
r, err := utils.ExtractRevision(revisionName)
if err != nil {
return "", err
}
revision = r
}
applicationconfiguration.SetAppWorkloadInstanceName(componentName, workload, revision, strconv.FormatBool(h.inplace))
return workload.GetName(), nil
}
// getTraitName generate trait name. By default the trait name will be generated by applicationContext, this func is for application controller
// get name of crossNamespace trait. The logic of this func is same with the way of appConfig generating traitName
func (h *appHandler) getTraitName(ctx context.Context, componentName string, ct *v1alpha2.ComponentTrait, t *runtime.RawExtension) (string, error) {
trait, err := oamutil.RawExtension2Unstructured(t)
if err != nil {
return "", err
}
traitDef, err := oamutil.FetchTraitDefinition(ctx, h.r, h.r.dm, trait)
if err != nil {
if !apierrors.IsNotFound(err) {
return "", errors.Wrapf(err, "cannot find trait definition %q %q %q", trait.GetAPIVersion(), trait.GetKind(), trait.GetName())
}
traitDef = oamutil.GetDummyTraitDefinition(trait)
}
traitType := traitDef.Name
if strings.Contains(traitType, ".") {
traitType = strings.Split(traitType, ".")[0]
}
traitName := oamutil.GenTraitName(componentName, ct, traitType)
return traitName, nil
}
// recodeTrackedResource append cross namespace resource to apphandler's acrossNamespaceResources field
func (h *appHandler) recodeTrackedResource(resourceName string, resource runtime.RawExtension) error {
u, err := oamutil.RawExtension2Unstructured(&resource)
if err != nil {
return err
}
tr := new(v1beta1.TypedReference)
tr.Name = resourceName
tr.Namespace = u.GetNamespace()
tr.APIVersion = u.GetAPIVersion()
tr.Kind = u.GetKind()
h.acrossNamespaceResources = append(h.acrossNamespaceResources, *tr)
return nil
}
type garbageCollectFunc func(ctx context.Context, h *appHandler) error
// 1. collect useless across-namespace resource
// 2. collect appRevision
// execute garbage collection functions, including:
// - clean up legacy app revisions
func garbageCollection(ctx context.Context, h *appHandler) error {
collectFuncs := []garbageCollectFunc{
garbageCollectFunc(gcAcrossNamespaceResource),
garbageCollectFunc(cleanUpApplicationRevision),
}
for _, collectFunc := range collectFuncs {
@@ -673,122 +416,6 @@ func garbageCollection(ctx context.Context, h *appHandler) error {
return nil
}
// Now if workloads or traits are in the same namespace with application, applicationContext will take over gc workloads and traits.
// Here we cover the case in witch a cross namespace component or one of its cross namespace trait is removed from an application.
func gcAcrossNamespaceResource(ctx context.Context, h *appHandler) error {
rt := new(v1beta1.ResourceTracker)
err := h.r.Get(ctx, ctypes.NamespacedName{Name: h.generateResourceTrackerName()}, rt)
if err != nil {
if apierrors.IsNotFound(err) {
// guarantee app status right
h.app.Status.ResourceTracker = nil
return nil
}
return err
}
applied := map[v1beta1.TypedReference]bool{}
if len(h.acrossNamespaceResources) == 0 {
h.app.Status.ResourceTracker = nil
if err := h.r.Delete(ctx, rt); err != nil {
return client.IgnoreNotFound(err)
}
return nil
}
for _, resource := range h.acrossNamespaceResources {
applied[resource] = true
}
for _, ref := range rt.Status.TrackedResources {
if !applied[ref] {
resource := new(unstructured.Unstructured)
resource.SetAPIVersion(ref.APIVersion)
resource.SetKind(ref.Kind)
resource.SetNamespace(ref.Namespace)
resource.SetName(ref.Name)
err := h.r.Delete(ctx, resource)
if err != nil {
if apierrors.IsNotFound(err) {
continue
}
return err
}
}
}
// update resourceTracker status, recode applied across-namespace resources
rt.Status.TrackedResources = h.acrossNamespaceResources
if err := h.r.Status().Update(ctx, rt); err != nil {
return err
}
h.app.Status.ResourceTracker = &runtimev1alpha1.TypedReference{
Name: rt.Name,
Kind: v1beta1.ResourceTrackerGroupKind,
APIVersion: v1beta1.ResourceTrackerKindAPIVersion,
UID: rt.UID}
return nil
}
// handleResourceTracker check the namespace of all workloads and traits
// if one resource is across-namespace create resourceTracker and set in appHandler field
func (h *appHandler) handleResourceTracker(ctx context.Context, components []*v1alpha2.Component, ac *v1alpha2.ApplicationConfiguration) error {
resourceTracker := new(v1beta1.ResourceTracker)
needTracker := false
for _, c := range components {
if h.checkAutoDetect(c) {
continue
}
u, err := oamutil.RawExtension2Unstructured(&c.Spec.Workload)
if err != nil {
return err
}
inDiffNamespace, err := h.checkCrossNamespace(u)
if err != nil {
return err
}
if inDiffNamespace {
needTracker = true
break
}
}
outLoop:
for _, acComponent := range ac.Spec.Components {
for _, t := range acComponent.Traits {
u, err := oamutil.RawExtension2Unstructured(&t.Trait)
if err != nil {
return err
}
inDiffNamespace, err := h.checkCrossNamespace(u)
if err != nil {
return err
}
if inDiffNamespace {
needTracker = true
break outLoop
}
}
}
if needTracker {
// check weather related resourceTracker is existed, if not create it
err := h.r.Get(ctx, ctypes.NamespacedName{Name: h.generateResourceTrackerName()}, resourceTracker)
if err == nil {
h.resourceTracker = resourceTracker
return nil
}
if apierrors.IsNotFound(err) {
resourceTracker = &v1beta1.ResourceTracker{
ObjectMeta: metav1.ObjectMeta{
Name: h.generateResourceTrackerName(),
},
}
if err = h.r.Client.Create(ctx, resourceTracker); err != nil {
return err
}
h.resourceTracker = resourceTracker
return nil
}
return err
}
return nil
}
func (h *appHandler) handleRollout(ctx context.Context) (reconcile.Result, error) {
var comps []string
for _, component := range h.app.Spec.Components {

View File

@@ -94,6 +94,10 @@ func (am *AppManifests) WithWorkloadOption(wo WorkloadOption) *AppManifests {
}
// AssembledManifests do assemble and merge all assembled resources(except referenced scopes) into one array
// The result guarantee the order of resources as defined in application originally.
// If it contains more than one component, the resources are well-orderred and also grouped.
// For example, if app = comp1 (wl1 + trait1 + trait2) + comp2 (wl2 + trait3 +trait4),
// the result is [wl1, trait1, trait2, wl2, trait3, trait4]
func (am *AppManifests) AssembledManifests() ([]*unstructured.Unstructured, error) {
if !am.finalized {
am.assemble()
@@ -102,10 +106,9 @@ func (am *AppManifests) AssembledManifests() ([]*unstructured.Unstructured, erro
return nil, am.err
}
r := make([]*unstructured.Unstructured, 0)
for _, wl := range am.assembledWorkloads {
for compName, wl := range am.assembledWorkloads {
r = append(r, wl.DeepCopy())
}
for _, ts := range am.assembledTraits {
ts := am.assembledTraits[compName]
for _, t := range ts {
r = append(r, t.DeepCopy())
}
@@ -250,11 +253,12 @@ func (am *AppManifests) complete() {
func (am *AppManifests) finalizeAssemble(err error) {
am.finalized = true
if err != nil {
klog.ErrorS(err, "Failed assembling manifests for application", "name", am.appName, "revision", am.AppRevision.GetName())
am.err = errors.WithMessagef(err, "cannot assemble resources' manifests for application %q", am.appName)
if err == nil {
klog.InfoS("Successfully assemble manifests for application", "name", am.appName, "revision", am.AppRevision.GetName(), "namespace", am.appNamespace)
return
}
klog.InfoS("Successfully assemble manifests for application", "name", am.appName, "revision", am.AppRevision.GetName(), "namespace", am.appNamespace)
klog.ErrorS(err, "Failed assembling manifests for application", "name", am.appName, "revision", am.AppRevision.GetName())
am.err = errors.WithMessagef(err, "cannot assemble resources' manifests for application %q", am.appName)
}
// AssembleOptions is highly coulped with AppRevision, should check the AppRevision provides all info
@@ -302,10 +306,6 @@ func (am *AppManifests) setNamespace(obj *unstructured.Unstructured) {
}
}
func (am *AppManifests) setOwnerReference(obj *unstructured.Unstructured) {
obj.SetOwnerReferences([]metav1.OwnerReference{*am.appOwnerRef})
}
func (am *AppManifests) assembleWorkload(comp *v1alpha2.Component, labels map[string]string) (*unstructured.Unstructured, error) {
compName := comp.Name
wl, err := util.RawExtension2Unstructured(&comp.Spec.Workload)
@@ -318,7 +318,6 @@ func (am *AppManifests) assembleWorkload(comp *v1alpha2.Component, labels map[st
am.setWorkloadLabels(wl, labels)
am.setAnnotations(wl)
am.setNamespace(wl)
am.setOwnerReference(wl)
workloadType := wl.GetLabels()[oam.WorkloadTypeLabel]
compDefinition := am.AppRevision.Spec.ComponentDefinitions[workloadType]
@@ -364,7 +363,6 @@ func (am *AppManifests) assembleTrait(compTrait v1alpha2.ComponentTrait, compNam
am.setTraitLabels(trait, labels)
am.setAnnotations(trait)
am.setNamespace(trait)
am.setOwnerReference(trait)
klog.InfoS("Successfully assemble a trait", "trait", klog.KObj(trait), "APIVersion", trait.GetAPIVersion(), "Kind", trait.GetKind())
return trait, nil
}

View File

@@ -22,7 +22,6 @@ import (
runtimev1alpha1 "github.com/crossplane/crossplane-runtime/apis/core/v1alpha1"
"github.com/ghodss/yaml"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"github.com/oam-dev/kubevela/apis/core.oam.dev/v1beta1"
@@ -103,8 +102,6 @@ var _ = Describe("Test Assemble Options", func() {
oam.WorkloadTypeLabel,
oam.LabelOAMResourceType))
Expect(len(wl.GetAnnotations())).Should(Equal(1))
ownerRef := metav1.GetControllerOf(wl)
Expect(ownerRef.Kind).Should(Equal("Application"))
By("Verify trait metadata (name, namespace, labels, annotations, ownerRef)")
trait := traits[compName][0]
@@ -124,8 +121,6 @@ var _ = Describe("Test Assemble Options", func() {
oam.TraitTypeLabel,
oam.LabelOAMResourceType))
Expect(len(wl.GetAnnotations())).Should(Equal(1))
ownerRef = metav1.GetControllerOf(trait)
Expect(ownerRef.Kind).Should(Equal("Application"))
By("Verify set workload reference to trait")
scaler := traits[compName][2]

View File

@@ -26,10 +26,8 @@ import (
kruisev1alpha1 "github.com/openkruise/kruise-api/apps/v1alpha1"
"github.com/pkg/errors"
appsv1 "k8s.io/api/apps/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/klog/v2"
"k8s.io/utils/pointer"
"sigs.k8s.io/controller-runtime/pkg/client"
"github.com/oam-dev/kubevela/apis/core.oam.dev/v1alpha2"
@@ -85,7 +83,7 @@ func discoverHelmModuleWorkload(ctx context.Context, c client.Reader, assembledW
}
}
workloadByHelm := &unstructured.Unstructured{}
workloadByHelm := assembledWorkload.DeepCopy()
if err := c.Get(ctx, client.ObjectKey{Namespace: ns, Name: qualifiedWorkloadName}, workloadByHelm); err != nil {
return err
}
@@ -102,7 +100,7 @@ func discoverHelmModuleWorkload(ctx context.Context, c client.Reader, assembledW
"annotations", annots, "labels", labels)
return err
}
*assembledWorkload = *workloadByHelm
assembledWorkload.SetName(qualifiedWorkloadName)
return nil
}
@@ -126,12 +124,7 @@ func PrepareWorkloadForRollout() WorkloadOption {
advancedStatefulSetDisablePath = "spec.updateStrategy.rollingUpdate.paused"
deploymentDisablePath = "spec.paused"
)
// change the ownerReference and rollout controller will take it over
ownerRef := metav1.GetControllerOf(assembledWorkload)
ownerRef.Controller = pointer.BoolPtr(false)
pv := fieldpath.Pave(assembledWorkload.UnstructuredContent())
// TODO: we can get the workloadDefinition name from workload.GetLabels()["oam.WorkloadTypeLabel"]
// and use a special field like "disablePath" in the definition to allow configurable behavior

View File

@@ -256,8 +256,13 @@ var _ = Describe("Test WorkloadOption", func() {
helm: &common.Helm{
Release: runtime.RawExtension{Raw: releaseRaw},
},
wantWorkload: wl.DeepCopy(),
wantErr: nil,
wantWorkload: func() *unstructured.Unstructured {
r := &unstructured.Unstructured{}
r.SetNamespace(ns)
r.SetName("test-rls-test-chart")
return r
}(),
wantErr: nil,
}),
)
})

View File

@@ -24,12 +24,14 @@ import (
kerrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/apimachinery/pkg/types"
"k8s.io/client-go/util/retry"
"k8s.io/klog/v2"
"k8s.io/utils/pointer"
"sigs.k8s.io/controller-runtime/pkg/client"
"github.com/oam-dev/kubevela/apis/core.oam.dev/v1beta1"
"github.com/oam-dev/kubevela/pkg/oam"
"github.com/oam-dev/kubevela/pkg/utils/apply"
)
@@ -51,32 +53,33 @@ type AppManifestsDispatcher struct {
applicator apply.Applicator
gcHandler GarbageCollector
appRev *v1beta1.ApplicationRevision
oldRT *v1beta1.ResourceTracker
skipGC bool
appRev *v1beta1.ApplicationRevision
previousRT *v1beta1.ResourceTracker
skipGC bool
appRevName string
namespace string
newRTName string
newRT *v1beta1.ResourceTracker
appRevName string
namespace string
currentRTName string
currentRT *v1beta1.ResourceTracker
}
// EnableGC return an AppManifestsDispatcher that always do GC after dispatching resources.
// GC will calculate diff between the dispatched resouces and ones recorded in the given resource tracker.
func (a *AppManifestsDispatcher) EnableGC(rt *v1beta1.ResourceTracker) *AppManifestsDispatcher {
// EnableUpgradeAndGC return an AppManifestsDispatcher that always do GC after dispatching resources.
// For resources exists in two revision, dispatcher will update their owner to the new resource tracker.
// GC will calculate diff between the dispatched resources and ones recorded in the given resource tracker.
func (a *AppManifestsDispatcher) EnableUpgradeAndGC(rt *v1beta1.ResourceTracker) *AppManifestsDispatcher {
if rt != nil {
a.oldRT = rt.DeepCopy()
a.previousRT = rt.DeepCopy()
}
return a
}
// EnableUpgradeAndSkipGC return an AppManifestsDispatcher that skips GC after dispatching resources.
// For the unchanged resources, dispatcher will update their owner to the newly created resource tracker.
// For resources exists in two revision, dispatcher will update their owner to the new resource tracker.
// It's helpful in a rollout scenario where new revision is going to create a new workload while the old one should not
// be deleted before rollout is terminated.
func (a *AppManifestsDispatcher) EnableUpgradeAndSkipGC(rt *v1beta1.ResourceTracker) *AppManifestsDispatcher {
if rt != nil {
a.oldRT = rt.DeepCopy()
a.previousRT = rt.DeepCopy()
a.skipGC = true
}
return a
@@ -86,7 +89,7 @@ func (a *AppManifestsDispatcher) EnableUpgradeAndSkipGC(rt *v1beta1.ResourceTrac
// If GC is enabled, it will do GC after applying.
// If 'UpgradeAndSkipGC' is enabled, it will:
// - create new resources if not exist before
// - update unchanged resources' owner from the old resource tracker to the new one
// - update unchanged resources' owner from the previous resource tracker to the new one
// - skip deleting(GC) any resources
func (a *AppManifestsDispatcher) Dispatch(ctx context.Context, manifests []*unstructured.Unstructured) (*v1beta1.ResourceTracker, error) {
if err := a.validateAndComplete(ctx); err != nil {
@@ -98,12 +101,12 @@ func (a *AppManifestsDispatcher) Dispatch(ctx context.Context, manifests []*unst
if err := a.applyAndRecordManifests(ctx, manifests); err != nil {
return nil, err
}
if !a.skipGC && a.oldRT != nil && a.oldRT.Name != a.newRTName {
if err := a.gcHandler.GarbageCollect(ctx, a.oldRT, a.newRT); err != nil {
return nil, errors.WithMessagef(err, "cannot do GC based on resource trackers %q and %q", a.oldRT.Name, a.newRTName)
if !a.skipGC && a.previousRT != nil && a.previousRT.Name != a.currentRTName {
if err := a.gcHandler.GarbageCollect(ctx, a.previousRT, a.currentRT); err != nil {
return nil, errors.WithMessagef(err, "cannot do GC based on resource trackers %q and %q", a.previousRT.Name, a.currentRTName)
}
}
return a.newRT.DeepCopy(), nil
return a.currentRT.DeepCopy(), nil
}
// ReferenceScopes add workload reference to scopes' workloadRefPath
@@ -127,61 +130,65 @@ func (a *AppManifestsDispatcher) validateAndComplete(ctx context.Context) error
}
a.appRevName = a.appRev.Name
a.namespace = a.appRev.Namespace
a.newRTName = ConstructResourceTrackerName(a.appRevName, a.namespace)
a.currentRTName = ConstructResourceTrackerName(a.appRevName, a.namespace)
// no matter GC or UpgradeAndSkipGC, it requires a valid and existing resource tracker
if a.oldRT != nil {
existingOldRT := &v1beta1.ResourceTracker{}
if err := a.c.Get(ctx, client.ObjectKey{Name: a.oldRT.Name}, existingOldRT); err != nil {
return errors.Errorf("given resource tracker %q doesn't exist", a.oldRT.Name)
// if upgrade is enbabled (no matter GC or skip GC), it requires a valid existing resource tracker
if a.previousRT != nil && a.previousRT.Name != a.currentRTName {
klog.InfoS("Validate previous resource tracker exists", "previous", klog.KObj(a.previousRT))
gotPreviousRT := &v1beta1.ResourceTracker{}
if err := a.c.Get(ctx, client.ObjectKey{Name: a.previousRT.Name}, gotPreviousRT); err != nil {
return errors.Errorf("given resource tracker %q doesn't exist", a.previousRT.Name)
}
a.oldRT = existingOldRT
a.previousRT = gotPreviousRT
}
klog.InfoS("Given old resource tracker is nil, so skip GC", "appRevision", klog.KObj(a.appRev))
klog.InfoS("Given previous resource tracker is nil or same as current one, so skip GC", "appRevision", klog.KObj(a.appRev))
return nil
}
func (a *AppManifestsDispatcher) createOrGetResourceTracker(ctx context.Context) error {
rt := &v1beta1.ResourceTracker{}
err := a.c.Get(ctx, client.ObjectKey{Name: a.newRTName}, rt)
err := a.c.Get(ctx, client.ObjectKey{Name: a.currentRTName}, rt)
if err == nil {
klog.InfoS("Found a resource tracker matching current app revision", "resourceTracker", rt.Name)
// already exists, no need to update
// because we assume the manifests' references from a specific application revision never change
a.newRT = rt
a.currentRT = rt
return nil
}
if !kerrors.IsNotFound(err) {
return errors.Wrap(err, "cannot get resource tracker")
}
klog.InfoS("Going to create a resource tracker", "resourceTracker", a.newRTName)
rt.SetName(a.newRTName)
klog.InfoS("Going to create a resource tracker", "resourceTracker", a.currentRTName)
rt.SetName(a.currentRTName)
// these labels can help to list resource trackers of a specific application
rt.SetLabels(map[string]string{
oam.LabelAppName: ExtractAppName(a.currentRTName, a.namespace),
oam.LabelAppNamespace: a.namespace,
})
if err := a.c.Create(ctx, rt); err != nil {
klog.ErrorS(err, "Failed to create a resource tracker", "resourceTracker", a.newRTName)
klog.ErrorS(err, "Failed to create a resource tracker", "resourceTracker", a.currentRTName)
return errors.Wrap(err, "cannot create resource tracker")
}
a.newRT = rt
a.currentRT = rt
return nil
}
func (a *AppManifestsDispatcher) applyAndRecordManifests(ctx context.Context, manifests []*unstructured.Unstructured) error {
applyOpts := []apply.ApplyOption{}
if a.oldRT != nil && a.oldRT.Name != a.newRTName {
klog.InfoS("Going to apply and upgrade resources", "from", a.oldRT.Name, "to", a.newRTName)
ctrlUIDs := []types.UID{a.currentRT.UID}
if a.previousRT != nil && a.previousRT.Name != a.currentRTName {
klog.InfoS("Going to apply or upgrade resources", "from", a.previousRT.Name, "to", a.currentRTName)
// if two RT's names are different, it means dispatching operation happens in an upgrade or rollout scenario
// in such two scenarios, for those unchanged manifests, we will
// - check existing resources are controlled by the old resource tracker
// - make sure existing resources are controlled by any of these two resource trackers
// - set new resource tracker as their controller owner
applyOpts = append(applyOpts, apply.MustBeControllableBy(a.oldRT.UID))
} else {
applyOpts = append(applyOpts, apply.MustBeControllableBy(a.newRT.UID))
ctrlUIDs = append(ctrlUIDs, a.previousRT.UID)
}
applyOpts := []apply.ApplyOption{apply.MustBeControllableByAny(ctrlUIDs)}
ownerRef := metav1.OwnerReference{
APIVersion: v1beta1.SchemeGroupVersion.String(),
Kind: reflect.TypeOf(v1beta1.ResourceTracker{}).Name(),
Name: a.newRT.Name,
UID: a.newRT.UID,
Name: a.currentRT.Name,
UID: a.currentRT.UID,
Controller: pointer.BoolPtr(true),
BlockOwnerDeletion: pointer.BoolPtr(true),
}
@@ -201,45 +208,45 @@ func (a *AppManifestsDispatcher) applyAndRecordManifests(ctx context.Context, ma
}
func (a *AppManifestsDispatcher) updateResourceTrackerStatus(ctx context.Context, appliedManifests []*unstructured.Unstructured) error {
// merge applied resources and already recorded ones
trackedResources := []v1beta1.TypedReference{}
// merge applied resources and already tracked ones
if a.currentRT.Status.TrackedResources == nil {
a.currentRT.Status.TrackedResources = make([]v1beta1.TypedReference, 0)
}
for _, rsc := range appliedManifests {
ref := v1beta1.TypedReference{
appliedRef := v1beta1.TypedReference{
APIVersion: rsc.GetAPIVersion(),
Kind: rsc.GetKind(),
Name: rsc.GetName(),
Namespace: rsc.GetNamespace(),
}
alreadyTracked := false
for _, existing := range a.newRT.Status.TrackedResources {
if existing.APIVersion == ref.APIVersion && existing.Kind == ref.Kind &&
existing.Name == ref.Name && existing.Namespace == ref.Namespace {
for _, tracked := range a.currentRT.Status.TrackedResources {
if tracked.APIVersion == appliedRef.APIVersion && tracked.Kind == appliedRef.Kind &&
tracked.Name == appliedRef.Name && tracked.Namespace == appliedRef.Namespace {
alreadyTracked = true
break
}
}
if alreadyTracked {
continue
if !alreadyTracked {
a.currentRT.Status.TrackedResources = append(a.currentRT.Status.TrackedResources, appliedRef)
}
trackedResources = append(trackedResources, ref)
}
a.newRT.Status.TrackedResources = trackedResources
// TODO move TrackedResources from status to spec
// update status with retry
copyRT := a.newRT.DeepCopy()
copyRT := a.currentRT.DeepCopy()
sts := copyRT.Status
if err := retry.RetryOnConflict(retry.DefaultBackoff, func() (err error) {
if err = a.c.Get(ctx, client.ObjectKey{Name: a.newRTName}, copyRT); err != nil {
if err = a.c.Get(ctx, client.ObjectKey{Name: a.currentRTName}, copyRT); err != nil {
return
}
copyRT.Status = sts
return a.c.Status().Update(ctx, copyRT)
}); err != nil {
klog.ErrorS(err, "Failed to update resource tracker status", "resourceTracker", a.newRTName)
klog.ErrorS(err, "Failed to update resource tracker status", "resourceTracker", a.currentRTName)
return errors.Wrap(err, "cannot update resource tracker status")
}
klog.InfoS("Successfully update resource tracker status", "resourceTracker", a.newRTName)
klog.InfoS("Successfully update resource tracker status", "resourceTracker", a.currentRTName)
return nil
}

View File

@@ -229,7 +229,7 @@ var _ = Describe("Test AppManifestsDispatcher", func() {
Expect(k8sClient.Get(ctx, client.ObjectKey{Name: pvName1}, &corev1.PersistentVolume{})).Should(Succeed())
By("Dispatch application revision 1 again with v1 as latest RT")
dp = NewAppManifestsDispatcher(k8sClient, appRev1).EnableGC(rtForAppV1)
dp = NewAppManifestsDispatcher(k8sClient, appRev1).EnableUpgradeAndGC(rtForAppV1)
_, err = dp.Dispatch(ctx, []*unstructured.Unstructured{deploy1, svc1, pv1})
Expect(err).Should(BeNil())
By("Verify resources still exist")
@@ -250,7 +250,7 @@ var _ = Describe("Test AppManifestsDispatcher", func() {
Expect(k8sClient.Get(ctx, client.ObjectKey{Name: pvName1}, &corev1.PersistentVolume{})).Should(Succeed())
By("Dispatch application revision 2 with v1 as latest RT")
dp2 := NewAppManifestsDispatcher(k8sClient, appRev2).EnableGC(rtForAppV1)
dp2 := NewAppManifestsDispatcher(k8sClient, appRev2).EnableUpgradeAndGC(rtForAppV1)
_, err = dp2.Dispatch(ctx, []*unstructured.Unstructured{deploy2, svc2, pv2})
Expect(err).Should(BeNil())
By("Verify v2 resources are applied successfully")
@@ -284,7 +284,7 @@ var _ = Describe("Test AppManifestsDispatcher", func() {
Expect(owner.Name).Should(Equal(rtForAppV1.Name))
By("Dispatch application revision 2 with v1 as latest RT")
dp2 := NewAppManifestsDispatcher(k8sClient, appRev2).EnableGC(rtForAppV1)
dp2 := NewAppManifestsDispatcher(k8sClient, appRev2).EnableUpgradeAndGC(rtForAppV1)
rtForAppV2, err := dp2.Dispatch(ctx, []*unstructured.Unstructured{deploy2, svc1, pv2}) // manifests have 'svc1'
Expect(err).Should(BeNil())

View File

@@ -93,8 +93,8 @@ func (h *GCHandler) validate() error {
oldRTName := h.oldRT.Name
newRTName := h.newRT.Name
if strings.HasSuffix(oldRTName, h.namespace) && strings.HasSuffix(newRTName, h.namespace) {
if extractAppNameFromResourceTrackerName(oldRTName, h.namespace) ==
extractAppNameFromResourceTrackerName(newRTName, h.namespace) {
if ExtractAppName(oldRTName, h.namespace) ==
ExtractAppName(newRTName, h.namespace) {
return nil
}
}

View File

@@ -28,7 +28,13 @@ func ConstructResourceTrackerName(appRevName, ns string) string {
return fmt.Sprintf("%s-%s", appRevName, ns)
}
func extractAppNameFromResourceTrackerName(name, ns string) string {
splits := strings.Split(strings.TrimSuffix(name, "-"+ns), "-")
// ExtractAppName get application name from resource tracker name
func ExtractAppName(resourceTrackerName, ns string) string {
splits := strings.Split(strings.TrimSuffix(resourceTrackerName, "-"+ns), "-")
return strings.Join(splits[0:len(splits)-1], "-")
}
// ExtractAppRevisionName get application revision name from resource tracker name
func ExtractAppRevisionName(resourceTrackerName, ns string) string {
return strings.TrimSuffix(resourceTrackerName, "-"+ns)
}

View File

@@ -41,12 +41,16 @@ func TestExtractAppNameFromResourceTrackerName(t *testing.T) {
for _, tc := range testcases {
gotRTName := ConstructResourceTrackerName(tc.appRevName, tc.ns)
gotAppName := extractAppNameFromResourceTrackerName(gotRTName, tc.ns)
gotAppName := ExtractAppName(gotRTName, tc.ns)
gotAppRevName := ExtractAppRevisionName(gotRTName, tc.ns)
if gotRTName != tc.wantRTName {
t.Fatalf("expect resource tracker name %q but got %q", tc.wantRTName, gotRTName)
}
if gotAppName != tc.wantAppName {
t.Fatalf("expect app name %q but got %q", tc.wantAppName, gotAppName)
}
if gotAppRevName != tc.appRevName {
t.Fatalf("expect app revision name %q but got %q", tc.appRevName, gotAppRevName)
}
}
}

View File

@@ -31,6 +31,7 @@ import (
"github.com/oam-dev/kubevela/apis/core.oam.dev/common"
"github.com/oam-dev/kubevela/apis/core.oam.dev/v1alpha2"
"github.com/oam-dev/kubevela/apis/core.oam.dev/v1beta1"
"github.com/oam-dev/kubevela/pkg/controller/core.oam.dev/v1alpha2/application/dispatch"
"github.com/oam-dev/kubevela/pkg/controller/utils"
"github.com/oam-dev/kubevela/pkg/oam"
"github.com/oam-dev/kubevela/pkg/oam/util"
@@ -339,21 +340,23 @@ func cleanUpApplicationRevision(ctx context.Context, h *appHandler) error {
// gatherUsingAppRevision get all using appRevisions include app's status pointing to and appContext point to
func gatherUsingAppRevision(ctx context.Context, h *appHandler) (map[string]bool, error) {
ns := h.app.Namespace
listOpts := []client.ListOption{
client.InNamespace(h.app.Namespace),
client.MatchingLabels{oam.LabelAppName: h.app.Name},
}
client.MatchingLabels{
oam.LabelAppName: h.app.Name,
oam.LabelAppNamespace: ns,
}}
usingRevision := map[string]bool{}
if h.app.Status.LatestRevision != nil && len(h.app.Status.LatestRevision.Name) != 0 {
usingRevision[h.app.Status.LatestRevision.Name] = true
}
appContextList := new(v1alpha2.ApplicationContextList)
err := h.r.List(ctx, appContextList, listOpts...)
if err != nil {
rtList := &v1beta1.ResourceTrackerList{}
if err := h.r.List(ctx, rtList, listOpts...); err != nil {
return nil, err
}
for _, appContext := range appContextList.Items {
usingRevision[appContext.Spec.ApplicationRevisionName] = true
for _, rt := range rtList.Items {
appRev := dispatch.ExtractAppRevisionName(rt.Name, ns)
usingRevision[appRev] = true
}
appDeployUsingRevision, err := utils.CheckAppDeploymentUsingAppRevision(ctx, h.r, h.app.Namespace, h.app.Name)
if err != nil {

View File

@@ -22,8 +22,6 @@ import (
"fmt"
"time"
"github.com/oam-dev/kubevela/apis/core.oam.dev/common"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
@@ -37,7 +35,7 @@ import (
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"github.com/oam-dev/kubevela/apis/core.oam.dev/v1alpha2"
"github.com/oam-dev/kubevela/apis/core.oam.dev/common"
"github.com/oam-dev/kubevela/apis/core.oam.dev/v1beta1"
"github.com/oam-dev/kubevela/pkg/oam"
"github.com/oam-dev/kubevela/pkg/oam/util"
@@ -77,11 +75,8 @@ var _ = Describe("Test application controller clean up ", func() {
property := fmt.Sprintf(`{"cmd":["sleep","1000"],"image":"busybox:%d"}`, i)
checkApp.Spec.Components[0].Properties = runtime.RawExtension{Raw: []byte(property)}
Expect(k8sClient.Update(ctx, checkApp)).Should(BeNil())
_, err := reconciler.Reconcile(ctrl.Request{NamespacedName: appKey})
Expect(err).Should(BeNil())
reconcileRetry(reconciler, ctrl.Request{NamespacedName: appKey})
}
appContext := new(v1alpha2.ApplicationContext)
Expect(k8sClient.Get(ctx, appKey, appContext)).Should(BeNil())
listOpts := []client.ListOption{
client.InNamespace(namespace),
client.MatchingLabels{
@@ -110,6 +105,9 @@ var _ = Describe("Test application controller clean up ", func() {
deletedRevison := new(v1beta1.ApplicationRevision)
revKey := types.NamespacedName{Namespace: namespace, Name: appName + "-v1"}
Eventually(func() error {
if _, err = reconciler.Reconcile(ctrl.Request{NamespacedName: appKey}); err != nil {
return err
}
err := k8sClient.List(ctx, appRevisionList, listOpts...)
if err != nil {
return err
@@ -132,6 +130,9 @@ var _ = Describe("Test application controller clean up ", func() {
_, err = reconciler.Reconcile(ctrl.Request{NamespacedName: appKey})
Expect(err).Should(BeNil())
Eventually(func() error {
if _, err = reconciler.Reconcile(ctrl.Request{NamespacedName: appKey}); err != nil {
return err
}
err := k8sClient.List(ctx, appRevisionList, listOpts...)
if err != nil {
return err
@@ -161,11 +162,8 @@ var _ = Describe("Test application controller clean up ", func() {
property := fmt.Sprintf(`{"cmd":["sleep","1000"],"image":"busybox:%d"}`, i)
checkApp.Spec.Components[0].Properties = runtime.RawExtension{Raw: []byte(property)}
Expect(k8sClient.Update(ctx, checkApp)).Should(BeNil())
_, err := reconciler.Reconcile(ctrl.Request{NamespacedName: appKey})
Expect(err).Should(BeNil())
reconcileRetry(reconciler, ctrl.Request{NamespacedName: appKey})
}
appContext := new(v1alpha2.ApplicationContext)
Expect(k8sClient.Get(ctx, appKey, appContext)).Should(util.NotFoundMatcher{})
listOpts := []client.ListOption{
client.InNamespace(namespace),
client.MatchingLabels{
@@ -194,6 +192,9 @@ var _ = Describe("Test application controller clean up ", func() {
deletedRevison := new(v1beta1.ApplicationRevision)
revKey := types.NamespacedName{Namespace: namespace, Name: appName + "-v1"}
Eventually(func() error {
if _, err = reconciler.Reconcile(ctrl.Request{NamespacedName: appKey}); err != nil {
return err
}
err := k8sClient.List(ctx, appRevisionList, listOpts...)
if err != nil {
return err
@@ -209,7 +210,7 @@ var _ = Describe("Test application controller clean up ", func() {
return fmt.Errorf("appRevision collection mismatch")
}
return nil
}, time.Second*30, time.Microsecond*300).Should(BeNil())
}, time.Second*10, time.Second*2).Should(BeNil())
By("update app again will gc appRevision2")
Expect(k8sClient.Get(ctx, appKey, checkApp)).Should(BeNil())
@@ -219,6 +220,9 @@ var _ = Describe("Test application controller clean up ", func() {
_, err = reconciler.Reconcile(ctrl.Request{NamespacedName: appKey})
Expect(err).Should(BeNil())
Eventually(func() error {
if _, err = reconciler.Reconcile(ctrl.Request{NamespacedName: appKey}); err != nil {
return err
}
err := k8sClient.List(ctx, appRevisionList, listOpts...)
if err != nil {
return err
@@ -251,11 +255,8 @@ var _ = Describe("Test application controller clean up ", func() {
property := fmt.Sprintf(`{"cmd":["sleep","1000"],"image":"busybox:%d"}`, i)
checkApp.Spec.Components[0].Properties = runtime.RawExtension{Raw: []byte(property)}
Expect(k8sClient.Update(ctx, checkApp)).Should(BeNil())
_, err := reconciler.Reconcile(ctrl.Request{NamespacedName: appKey})
Expect(err).Should(BeNil())
reconcileRetry(reconciler, ctrl.Request{NamespacedName: appKey})
}
appContext := new(v1alpha2.ApplicationContext)
Expect(k8sClient.Get(ctx, appKey, appContext)).Should(util.NotFoundMatcher{})
listOpts := []client.ListOption{
client.InNamespace(namespace),
client.MatchingLabels{
@@ -284,6 +285,9 @@ var _ = Describe("Test application controller clean up ", func() {
deletedRevison := new(v1beta1.ApplicationRevision)
revKey := types.NamespacedName{Namespace: namespace, Name: appName + "-v1"}
Eventually(func() error {
if _, err = reconciler.Reconcile(ctrl.Request{NamespacedName: appKey}); err != nil {
return err
}
err := k8sClient.List(ctx, appRevisionList, listOpts...)
if err != nil {
return err
@@ -299,7 +303,7 @@ var _ = Describe("Test application controller clean up ", func() {
return fmt.Errorf("appRevision collection mismatch")
}
return nil
}, time.Second*30, time.Microsecond*300).Should(BeNil())
}, time.Second*10, time.Second*2).Should(BeNil())
By("update create appDeploy check gc logic")
appDeploy := &v1beta1.AppDeployment{
@@ -331,6 +335,9 @@ var _ = Describe("Test application controller clean up ", func() {
Expect(err).Should(BeNil())
}
Eventually(func() error {
if _, err = reconciler.Reconcile(ctrl.Request{NamespacedName: appKey}); err != nil {
return err
}
err := k8sClient.List(ctx, appRevisionList, listOpts...)
if err != nil {
return err
@@ -381,11 +388,13 @@ var _ = Describe("Test gatherUsingAppRevision func", func() {
Name: appName + "-v1",
}
Expect(k8sClient.Create(ctx, app)).Should(BeNil())
appContext := getAppContext(namespace, appName+"-ctx", appName+"-v2")
appContext.Labels = map[string]string{
oam.LabelAppName: appName,
}
Expect(k8sClient.Create(ctx, appContext)).Should(BeNil())
rt := &v1beta1.ResourceTracker{}
rt.SetName(appName + "-v2-" + namespace)
rt.SetLabels(map[string]string{
oam.LabelAppName: appName,
oam.LabelAppNamespace: namespace,
})
Expect(k8sClient.Create(ctx, rt)).Should(BeNil())
handler := appHandler{
r: reconciler,
app: app,
@@ -408,19 +417,3 @@ var _ = Describe("Test gatherUsingAppRevision func", func() {
}, time.Second*60, time.Microsecond).Should(BeNil())
})
})
func getAppContext(namespace, name string, pointingRev string) *v1alpha2.ApplicationContext {
return &v1alpha2.ApplicationContext{
TypeMeta: metav1.TypeMeta{
APIVersion: v1alpha2.ApplicationContextKindAPIVersion,
Kind: v1alpha2.ApplicationContextKind,
},
ObjectMeta: metav1.ObjectMeta{
Namespace: namespace,
Name: "app-rollout",
},
Spec: v1alpha2.ApplicationContextSpec{
ApplicationRevisionName: pointingRev,
},
}
}

View File

@@ -29,7 +29,6 @@ import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/types"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/yaml"
"github.com/oam-dev/kubevela/apis/core.oam.dev/v1alpha2"
@@ -259,20 +258,8 @@ var _ = Describe("test generate revision ", func() {
Expect(len(curAppRevision.GetOwnerReferences())).Should(BeEquivalentTo(1))
Expect(curAppRevision.GetOwnerReferences()[0].Kind).Should(Equal(v1alpha2.ApplicationKind))
By("Verify that an application context is created to point to the correct appRevision")
curAC := &v1alpha2.ApplicationContext{}
Expect(handler.r.Get(ctx,
types.NamespacedName{Namespace: ns.Name, Name: app.Name},
curAC)).NotTo(HaveOccurred())
Expect(curAC.GetLabels()[oam.LabelAppRevisionHash]).Should(Equal(appHash1))
Expect(curAC.Spec.ApplicationRevisionName).Should(Equal(curApp.Status.LatestRevision.Name))
Expect(curAC.GetAnnotations()[annoKey1]).ShouldNot(BeEmpty())
Expect(curAC.GetAnnotations()[oam.AnnotationInplaceUpgrade]).Should(Equal("true"))
Expect(metav1.GetControllerOf(curAC)).ShouldNot(BeNil())
Expect(metav1.GetControllerOf(curAC).Kind).Should(Equal(v1alpha2.ApplicationKind))
By("Apply the application again without any spec change")
// there can be annotation change and appContext should have the exact label/annotation as app
handler.previousRevisionName = "revision-apply-test-v1"
annoKey2 := "testKey2"
app.SetAnnotations(map[string]string{annoKey2: "true"})
lastRevision := curApp.Status.LatestRevision.Name
@@ -301,15 +288,6 @@ var _ = Describe("test generate revision ", func() {
time.Second*5, time.Millisecond*500).Should(BeNil())
Expect(err).Should(Succeed())
Expect(curAppRevision.GetLabels()[oam.LabelAppRevisionHash]).Should(Equal(appHash1))
By("Verify that an application context is created to point to the same appRevision")
curAC = &v1alpha2.ApplicationContext{}
Expect(handler.r.Get(ctx,
types.NamespacedName{Namespace: ns.Name, Name: app.Name},
curAC)).NotTo(HaveOccurred())
Expect(curAC.GetLabels()[oam.LabelAppRevisionHash]).Should(Equal(appHash1))
Expect(curAC.Spec.ApplicationRevisionName).Should(Equal(lastRevision))
Expect(curAC.GetAnnotations()[annoKey1]).Should(BeEmpty())
Expect(curAC.GetAnnotations()[annoKey2]).ShouldNot(BeEmpty())
By("Change the application and apply again")
// bump the image tag
@@ -354,15 +332,6 @@ var _ = Describe("test generate revision ", func() {
Expect(appHash1).ShouldNot(Equal(appHash2))
Expect(curAppRevision.GetLabels()[oam.LabelAppRevisionHash]).Should(Equal(appHash2))
Expect(curApp.Status.LatestRevision.RevisionHash).Should(Equal(appHash2))
By("Verify that an application context is created to point to the right appRevision")
curAC = &v1alpha2.ApplicationContext{}
Expect(handler.r.Get(ctx,
types.NamespacedName{Namespace: ns.Name, Name: app.Name},
curAC)).NotTo(HaveOccurred())
Expect(curAC.GetLabels()[oam.LabelAppRevisionHash]).Should(Equal(appHash2))
Expect(curAC.Spec.ApplicationRevisionName).Should(Equal(curApp.Status.LatestRevision.Name))
Expect(curAC.GetAnnotations()[annoKey2]).ShouldNot(BeEmpty())
Expect(metav1.GetControllerOf(curAC).Kind).Should(Equal(v1alpha2.ApplicationKind))
})
It("Test App with rollout template", func() {
@@ -406,19 +375,7 @@ var _ = Describe("test generate revision ", func() {
Expect(len(curAppRevision.GetOwnerReferences())).Should(BeEquivalentTo(1))
Expect(curAppRevision.GetOwnerReferences()[0].Kind).Should(Equal(v1alpha2.ApplicationKind))
By("Verify that no application context is created")
curACs := &v1alpha2.ApplicationContextList{}
opts := []client.ListOption{
client.InNamespace(namespaceName),
}
Eventually(
func() error {
return handler.r.List(ctx, curACs, opts...)
}, time.Second*5, time.Microsecond*500).Should(Succeed())
Expect(len(curACs.Items)).Should(BeEquivalentTo(0))
By("Apply the application again without any spec change but remove the rollout annotation")
// there can be annotation change and appContext should have the exact label/annotation as app
annoKey2 := "testKey2"
app.SetAnnotations(map[string]string{annoKey2: "true"})
lastRevision := curApp.Status.LatestRevision.Name
@@ -448,14 +405,6 @@ var _ = Describe("test generate revision ", func() {
Expect(err).Should(Succeed())
Expect(curAppRevision.GetLabels()[oam.LabelAppRevisionHash]).Should(Equal(appHash1))
Expect(curAppRevision.GetAnnotations()[annoKey2]).ShouldNot(BeEmpty())
By("Verify that an application context is created to point to the same appRevision")
curAC := &v1alpha2.ApplicationContext{}
Expect(handler.r.Get(ctx,
types.NamespacedName{Namespace: ns.Name, Name: app.Name},
curAC)).NotTo(HaveOccurred())
Expect(curAC.GetLabels()[oam.LabelAppRevisionHash]).Should(Equal(appHash1))
Expect(curAC.Spec.ApplicationRevisionName).Should(Equal(lastRevision))
Expect(curAC.GetAnnotations()[annoKey2]).ShouldNot(BeEmpty())
By("Change the application and apply again with rollout")
// bump the image tag
@@ -503,12 +452,6 @@ var _ = Describe("test generate revision ", func() {
Expect(curApp.Status.LatestRevision.RevisionHash).Should(Equal(appHash2))
Expect(curAppRevision.GetAnnotations()[annoKey2]).Should(BeEmpty())
Expect(curAppRevision.GetAnnotations()[oam.AnnotationAppRollout]).ShouldNot(BeEmpty())
By("Verify that no more application context is created")
Eventually(
func() error {
return handler.r.List(ctx, curACs, opts...)
}, time.Second*5, time.Microsecond*500).Should(Succeed())
Expect(len(curACs.Items)).Should(BeEquivalentTo(1))
})
It("Test apply passes all label and annotation from app to appRevision", func() {
@@ -550,7 +493,6 @@ var _ = Describe("test generate revision ", func() {
Expect(appHash1).Should(Equal(curApp.Status.LatestRevision.RevisionHash))
Expect(curAppRevision.GetLabels()[labelKey1]).Should(Equal("true"))
Expect(curAppRevision.GetAnnotations()[annoKey1]).Should(Equal("true"))
// there can be annotation change and appContext should have the exact label/annotation as app
annoKey2 := "testKey2"
app.SetAnnotations(map[string]string{annoKey2: "true"})
labelKey2 := "labelKey2"

View File

@@ -18,13 +18,13 @@ package applicationrollout
import (
"context"
"strconv"
"time"
"github.com/crossplane/crossplane-runtime/pkg/event"
"github.com/crossplane/crossplane-runtime/pkg/meta"
"github.com/pkg/errors"
apierrors "k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/client-go/util/retry"
"k8s.io/klog/v2"
@@ -33,13 +33,10 @@ import (
"sigs.k8s.io/controller-runtime/pkg/controller"
"sigs.k8s.io/controller-runtime/pkg/reconcile"
oamv1alpha2 "github.com/oam-dev/kubevela/apis/core.oam.dev/v1alpha2"
"github.com/oam-dev/kubevela/apis/core.oam.dev/v1beta1"
"github.com/oam-dev/kubevela/apis/standard.oam.dev/v1alpha1"
"github.com/oam-dev/kubevela/apis/types"
"github.com/oam-dev/kubevela/pkg/controller/common/rollout"
oamctrl "github.com/oam-dev/kubevela/pkg/controller/core.oam.dev"
"github.com/oam-dev/kubevela/pkg/oam"
"github.com/oam-dev/kubevela/pkg/oam/discoverymapper"
oamutil "github.com/oam-dev/kubevela/pkg/oam/util"
)
@@ -110,119 +107,86 @@ func (r *Reconciler) Reconcile(req ctrl.Request) (res reconcile.Result, retErr e
}
// DoReconcile is real reconcile logic for appRollout.
// 1.prepare rollout info: use assemble module in application pkg to generate manifest with appRevision
// 2.determine which component is the common component between source and target AppRevision
// 3.if target workload isn't exist yet, template the targetAppRevision to apply target manifest
// 4.extract target workload and source workload(if sourceAppRevision not empty)
// 5.generate a rolloutPlan controller with source and target workload and call rolloutPlan's reconcile func
// 6.handle output status
// !!! Note the AppRollout object should not be updated in this function as it could be logically used in Application reconcile loop which does not have real AppRollout object.
func (r *Reconciler) DoReconcile(ctx context.Context, appRollout *v1beta1.AppRollout) (res reconcile.Result, retErr error) {
func (r *Reconciler) DoReconcile(ctx context.Context, appRollout *v1beta1.AppRollout) (reconcile.Result, error) {
if len(appRollout.Status.RollingState) == 0 {
appRollout.Status.ResetStatus()
}
targetAppRevisionName := appRollout.Spec.TargetAppRevisionName
sourceAppRevisionName := appRollout.Spec.SourceAppRevisionName
var err error
// no need to proceed if rollout is already in a terminal state and there is no source/target change
doneReconcile := r.handleRollingTerminated(*appRollout, targetAppRevisionName, sourceAppRevisionName)
doneReconcile := r.handleRollingTerminated(*appRollout)
if doneReconcile {
return reconcile.Result{}, nil
}
h := rolloutHandler{Reconciler: r, appRollout: appRollout}
// handle rollout target/source change (only if it's not deleting already)
if isRolloutModified(*appRollout) {
klog.InfoS("rollout target changed, restart the rollout", "new source", sourceAppRevisionName,
"new target", targetAppRevisionName)
r.record.Event(appRollout, event.Normal("Rollout Restarted",
"rollout target changed, restart the rollout", "new source", sourceAppRevisionName,
"new target", targetAppRevisionName))
// we are okay to move directly to restart the rollout since we are at the terminal state
// however, we need to make sure we properly finalizing the existing rollout before restart if it's
// still in the middle of rolling out
if appRollout.Status.RollingState != v1alpha1.RolloutSucceedState &&
appRollout.Status.RollingState != v1alpha1.RolloutFailedState {
// continue to handle the previous resources until we are okay to move forward
targetAppRevisionName = appRollout.Status.LastUpgradedTargetAppRevision
sourceAppRevisionName = appRollout.Status.LastSourceAppRevision
} else {
// mark so that we don't think we are modified again
appRollout.Status.LastUpgradedTargetAppRevision = targetAppRevisionName
appRollout.Status.LastSourceAppRevision = sourceAppRevisionName
}
appRollout.Status.StateTransition(v1alpha1.RollingModifiedEvent)
h.handleRolloutModified()
} else {
// except modified in middle of one rollout, in most cases use real source/target in appRollout and revision as this round reconcile
h.sourceRevName = appRollout.Spec.SourceAppRevisionName
h.targetRevName = appRollout.Spec.TargetAppRevisionName
}
// Get the source application first
var sourceApRev, targetAppRev *oamv1alpha2.ApplicationRevision
var sourceApp, targetApp *oamv1alpha2.ApplicationContext
var err error
// call assemble func generate source and target manifest
if err = h.prepareRollout(ctx); err != nil {
return reconcile.Result{}, err
}
if appRollout.Status.RollingState == v1alpha1.RolloutDeletingState {
if sourceAppRevisionName == "" {
klog.InfoS("source app fields not filled, this is a scale operation", "appRollout", klog.KRef(appRollout.Namespace, appRollout.Name))
} else {
sourceApRev, sourceApp, err = r.getSourceAppContexts(ctx,
appRollout.Spec.ComponentList, appRollout.Status.RollingState, sourceAppRevisionName)
if err != nil && !apierrors.IsNotFound(err) {
return ctrl.Result{}, err
}
}
// Get the
targetAppRev, targetApp, err = r.getTargetApps(ctx, appRollout.Spec.ComponentList,
appRollout.Status.RollingState, targetAppRevisionName)
if err != nil && !apierrors.IsNotFound(err) {
return ctrl.Result{}, err
}
if sourceApp == nil && targetApp == nil {
// we only support one workload rollout now, so here is determine witch component is need to rollout
if err = h.determineRolloutComponent(); err != nil {
return reconcile.Result{}, err
}
var sourceWorkload, targetWorkload *unstructured.Unstructured
// we should handle two special cases before call rolloutPlan Reconcile
switch h.appRollout.Status.RollingState {
case v1alpha1.RolloutDeletingState:
// application has been deleted, the related appRev haven't removed
if h.sourceAppRevision == nil && h.targetAppRevision == nil {
klog.InfoS("Both the target and the source app are gone", "appRollout",
klog.KRef(appRollout.Namespace, appRollout.Name), "rolling state", appRollout.Status.RollingState)
appRollout.Status.StateTransition(v1alpha1.RollingFinalizedEvent)
h.appRollout.Status.StateTransition(v1alpha1.RollingFinalizedEvent)
// update the appRollout status
return ctrl.Result{}, nil
}
} else {
// TODO: try to refactor this into a method with reasonable number of parameters and output
if sourceAppRevisionName == "" {
klog.Info("source app fields not filled, this is a scale operation")
} else {
sourceApRev, sourceApp, err = r.getSourceAppContexts(ctx,
appRollout.Spec.ComponentList, appRollout.Status.RollingState, sourceAppRevisionName)
if err != nil {
return ctrl.Result{}, err
}
// check if the app is templated
if sourceApp.Status.RollingStatus != types.RollingTemplated {
klog.Info("source app revision is not ready for rolling yet", "application revision", sourceAppRevisionName)
r.record.Event(appRollout, event.Normal("Rollout Paused",
"source app revision is not ready for rolling yet", "application revision", sourceApp.GetName()))
return ctrl.Result{RequeueAfter: 3 * time.Second}, nil
}
}
// Get the target application revision after the source app is templated
targetAppRev, targetApp, err = r.getTargetApps(ctx, appRollout.Spec.ComponentList,
appRollout.Status.RollingState, targetAppRevisionName)
case v1alpha1.LocatingTargetAppState:
// dispatch sourceWorkload
err = h.templateSourceManifest(ctx)
if err != nil {
return ctrl.Result{}, err
return reconcile.Result{}, err
}
// this ensures that we handle the target app init only once
appRollout.Status.StateTransition(v1alpha1.AppLocatedEvent)
// check if the app is templated
if targetApp.Status.RollingStatus != types.RollingTemplated {
r.record.Event(appRollout, event.Normal("Rollout Paused",
"target app revision is not ready for rolling yet", "application revision", targetApp.GetName()))
return ctrl.Result{RequeueAfter: 3 * time.Second}, nil
// target manifest haven't template yet, call dispatch template target manifest firstly
err = h.templateTargetManifest(ctx)
if err != nil {
return reconcile.Result{}, err
}
// this ensures that we template workload only once
h.appRollout.Status.StateTransition(v1alpha1.AppLocatedEvent)
return reconcile.Result{RequeueAfter: 3 * time.Second}, nil
default:
// in other cases there is no need do anything
}
// we get the real workloads from the spec of the revisions
targetWorkload, sourceWorkload, err := r.extractWorkloads(ctx, appRollout.Spec.ComponentList, targetAppRev, sourceApRev)
sourceWorkload, targetWorkload, err = h.fetchSourceAndTargetWorkload(ctx)
if err != nil {
klog.ErrorS(err, "cannot fetch the workloads to upgrade", "target application",
klog.KRef(appRollout.Namespace, targetAppRevisionName), "source application", klog.KRef(appRollout.Namespace, sourceAppRevisionName),
"commonComponent", appRollout.Spec.ComponentList)
return ctrl.Result{RequeueAfter: 5 * time.Second}, err
return reconcile.Result{}, err
}
klog.InfoS("get the target workload we need to work on", "targetWorkload", klog.KObj(targetWorkload))
if sourceWorkload != nil {
klog.InfoS("get the source workload we need to work on", "sourceWorkload", klog.KObj(sourceWorkload))
}
// reconcile the rollout part of the spec given the target and source workload
rolloutPlanController := rollout.NewRolloutPlanController(r, appRollout, r.record,
&appRollout.Spec.RolloutPlan, &appRollout.Status.RolloutStatus, targetWorkload, sourceWorkload)
@@ -234,18 +198,19 @@ func (r *Reconciler) DoReconcile(ctx context.Context, appRollout *v1beta1.AppRol
appRollout.Status.LastUpgradedTargetAppRevision = appRollout.Spec.TargetAppRevisionName
appRollout.Status.LastSourceAppRevision = appRollout.Spec.SourceAppRevisionName
}
if rolloutStatus.RollingState == v1alpha1.RolloutSucceedState {
klog.InfoS("rollout succeeded, record the source and target app revision", "source", sourceAppRevisionName,
"target", targetAppRevisionName)
if err = r.finalizeRollingSucceeded(ctx, sourceApp, targetApp); err != nil {
return ctrl.Result{}, err
err = h.finalizeRollingSucceeded(ctx)
if err != nil {
return reconcile.Result{}, err
}
klog.InfoS("rollout succeeded, record the source and target app revision", "source", appRollout.Spec.SourceAppRevisionName,
"target", appRollout.Spec.TargetAppRevisionName)
} else if rolloutStatus.RollingState == v1alpha1.RolloutFailedState {
klog.InfoS("rollout failed, record the source and target app revision", "source", sourceAppRevisionName,
"target", targetAppRevisionName, "revert on deletion", appRollout.Spec.RevertOnDelete)
klog.InfoS("rollout failed, record the source and target app revision", "source", appRollout.Spec.SourceAppRevisionName,
"target", appRollout.Spec.TargetAppRevisionName, "revert on deletion", appRollout.Spec.RevertOnDelete)
}
// update the appRollout status
return result, nil
}
@@ -289,43 +254,20 @@ func (r *Reconciler) handleFinalizer(ctx context.Context, appRollout *v1beta1.Ap
return false, reconcile.Result{}, nil
}
func (r *Reconciler) handleRollingTerminated(appRollout v1beta1.AppRollout, targetAppRevisionName string,
sourceAppRevisionName string) bool {
func (r *Reconciler) handleRollingTerminated(appRollout v1beta1.AppRollout) bool {
// handle rollout completed
if appRollout.Status.RollingState == v1alpha1.RolloutSucceedState ||
appRollout.Status.RollingState == v1alpha1.RolloutFailedState {
if appRollout.Status.LastUpgradedTargetAppRevision == targetAppRevisionName &&
appRollout.Status.LastSourceAppRevision == sourceAppRevisionName {
klog.InfoS("rollout completed, no need to reconcile", "source", sourceAppRevisionName,
"target", targetAppRevisionName)
if appRollout.Status.LastUpgradedTargetAppRevision == appRollout.Spec.TargetAppRevisionName &&
appRollout.Status.LastSourceAppRevision == appRollout.Spec.TargetAppRevisionName {
klog.InfoS("rollout completed, no need to reconcile", "source", appRollout.Spec.SourceAppRevisionName,
"target", appRollout.Spec.TargetAppRevisionName)
return true
}
}
return false
}
func (r *Reconciler) finalizeRollingSucceeded(ctx context.Context, sourceApp *oamv1alpha2.ApplicationContext,
targetApp *oamv1alpha2.ApplicationContext) error {
if sourceApp != nil {
// mark the source app as an application revision only so that it stop being reconciled
oamutil.RemoveAnnotations(sourceApp, []string{oam.AnnotationAppRollout})
oamutil.AddAnnotations(sourceApp, map[string]string{oam.AnnotationAppRevision: strconv.FormatBool(true)})
if err := r.Update(ctx, sourceApp); err != nil {
klog.ErrorS(err, "cannot add the app revision annotation", "source application",
klog.KRef(sourceApp.Namespace, sourceApp.GetName()))
return err
}
}
// remove the rollout annotation so that the target appConfig controller can take over the rest of the work
oamutil.RemoveAnnotations(targetApp, []string{oam.AnnotationAppRollout, oam.AnnotationRollingComponent})
if err := r.Update(ctx, targetApp); err != nil {
klog.ErrorS(err, "cannot remove the rollout annotation", "target application",
klog.KRef(targetApp.Namespace, targetApp.GetName()))
return err
}
return nil
}
// UpdateStatus updates v1alpha2.AppRollout's Status with retry.RetryOnConflict
func (r *Reconciler) updateStatus(ctx context.Context, appRollout *v1beta1.AppRollout) error {
status := appRollout.DeepCopy().Status

View File

@@ -1,258 +0,0 @@
/*
Copyright 2021 The KubeVela Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package applicationrollout
import (
"context"
"fmt"
"strconv"
"strings"
"github.com/pkg/errors"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
ktypes "k8s.io/apimachinery/pkg/types"
"k8s.io/klog/v2"
"k8s.io/utils/pointer"
"github.com/oam-dev/kubevela/apis/core.oam.dev/v1alpha2"
"github.com/oam-dev/kubevela/apis/standard.oam.dev/v1alpha1"
"github.com/oam-dev/kubevela/apis/types"
"github.com/oam-dev/kubevela/pkg/controller/common"
"github.com/oam-dev/kubevela/pkg/controller/core.oam.dev/v1alpha2/applicationconfiguration"
"github.com/oam-dev/kubevela/pkg/controller/utils"
"github.com/oam-dev/kubevela/pkg/oam"
oamutil "github.com/oam-dev/kubevela/pkg/oam/util"
appUtil "github.com/oam-dev/kubevela/pkg/webhook/core.oam.dev/v1alpha2/applicationrollout"
)
// getTargetApps try to locate the target appRevision and appContext that is responsible for the target
// we will create a new appContext when it's not found
func (r *Reconciler) getTargetApps(ctx context.Context, componentList []string, rollingState v1alpha1.RollingState,
targetAppRevisionName string) (*v1alpha2.ApplicationRevision, *v1alpha2.ApplicationContext, error) {
var appRevision v1alpha2.ApplicationRevision
var appContext v1alpha2.ApplicationContext
namespaceName := oamutil.GetDefinitionNamespaceWithCtx(ctx)
if err := r.Get(ctx, ktypes.NamespacedName{Namespace: namespaceName, Name: targetAppRevisionName},
&appRevision); err != nil {
klog.ErrorS(err, "cannot locate target application revision", "target application revision",
klog.KRef(namespaceName, targetAppRevisionName))
return nil, nil, err
}
if err := r.Get(ctx, ktypes.NamespacedName{Namespace: namespaceName, Name: targetAppRevisionName},
&appContext); err != nil {
if apierrors.IsNotFound(err) && rollingState == v1alpha1.LocatingTargetAppState {
klog.InfoS("target application context does not exist yet, create one", "target application revision",
klog.KRef(namespaceName, targetAppRevisionName))
appContext, err = r.createAppContext(ctx, componentList, &appRevision)
if err != nil {
return nil, nil, err
}
return &appRevision, &appContext, nil
}
// the appContext has to exist by now
klog.ErrorS(err, "cannot locate target application context", "target application name",
klog.KRef(namespaceName, targetAppRevisionName), "rollingState", rollingState)
return nil, nil, err
}
// special handle the first time we locate the appContext
if rollingState == v1alpha1.LocatingTargetAppState {
if appContext.Status.RollingStatus == types.RollingTemplated {
// force template the target app
klog.InfoS("force templating an already templated target application",
"target application revision", klog.KRef(namespaceName, targetAppRevisionName))
appContext.Status.RollingStatus = types.RollingTemplating
if err := r.Status().Update(ctx, &appContext); err != nil {
klog.ErrorS(err, "failed to force update target application context to be templating",
"target application name", klog.KRef(namespaceName, targetAppRevisionName))
return nil, nil, err
}
}
err := r.prepareAppContext(ctx, componentList, &appContext)
if err != nil {
return nil, nil, err
}
}
return &appRevision, &appContext, nil
}
// getTargetApps try to locate the source appRevision and appContext that is responsible for the source
func (r *Reconciler) getSourceAppContexts(ctx context.Context, componentList []string, rollingState v1alpha1.RollingState,
sourceAppRevisionName string) (*v1alpha2.ApplicationRevision, *v1alpha2.ApplicationContext, error) {
var appRevision v1alpha2.ApplicationRevision
var appContext v1alpha2.ApplicationContext
namespaceName := oamutil.GetDefinitionNamespaceWithCtx(ctx)
if err := r.Get(ctx, ktypes.NamespacedName{Namespace: namespaceName, Name: sourceAppRevisionName},
&appRevision); err != nil {
klog.ErrorS(err, "cannot locate source application revision", "source application revision",
klog.KRef(namespaceName, sourceAppRevisionName))
return nil, nil, err
}
// the source app has to exist or there is nothing for us to upgrade from
if err := r.Get(ctx, ktypes.NamespacedName{Namespace: namespaceName, Name: sourceAppRevisionName},
&appContext); err != nil {
// TODO: use the app name as the source Context to upgrade from none-rolling application to rolling
klog.ErrorS(err, "cannot locate source application revision", "source application name",
klog.KRef(namespaceName, sourceAppRevisionName))
return nil, nil, err
}
// set the AC as rolling if we are still at locating state
if rollingState == v1alpha1.LocatingTargetAppState {
err := r.prepareAppContext(ctx, componentList, &appContext)
if err != nil {
return nil, nil, err
}
}
return &appRevision, &appContext, nil
}
func (r *Reconciler) prepareAppContext(ctx context.Context, componentList []string,
appContext *v1alpha2.ApplicationContext) error {
oamutil.RemoveAnnotations(appContext, []string{oam.AnnotationAppRevision})
// pass the rolling component to the app
oamutil.AddAnnotations(appContext, map[string]string{oam.AnnotationAppRollout: strconv.FormatBool(true)})
if len(componentList) != 0 {
oamutil.AddAnnotations(appContext, map[string]string{
oam.AnnotationRollingComponent: strings.Join(componentList, common.RollingComponentsSep)})
}
return r.Update(ctx, appContext)
}
func (r *Reconciler) createAppContext(ctx context.Context, componentList []string,
appRevision *v1alpha2.ApplicationRevision) (v1alpha2.ApplicationContext, error) {
namespaceName := oamutil.GetDefinitionNamespaceWithCtx(ctx)
appContext := v1alpha2.ApplicationContext{
ObjectMeta: metav1.ObjectMeta{
Name: appRevision.GetName(),
Namespace: namespaceName,
Labels: appRevision.GetLabels(),
Annotations: appRevision.GetAnnotations(),
OwnerReferences: appRevision.GetOwnerReferences(),
},
Spec: v1alpha2.ApplicationContextSpec{
ApplicationRevisionName: appRevision.GetName(),
},
}
if metav1.GetControllerOf(&appContext) == nil {
for i, owner := range appContext.GetOwnerReferences() {
if owner.Kind == v1alpha2.ApplicationKind {
appContext.GetOwnerReferences()[i].Controller = pointer.BoolPtr(true)
}
}
}
// set the AC as rolling
oamutil.AddAnnotations(&appContext, map[string]string{oam.AnnotationAppRollout: strconv.FormatBool(true)})
// pass the rolling component to the app
if len(componentList) != 0 {
oamutil.AddAnnotations(&appContext, map[string]string{
oam.AnnotationRollingComponent: strings.Join(componentList, common.RollingComponentsSep)})
}
err := r.Create(ctx, &appContext)
return appContext, err
}
// extractWorkloads extracts the workloads from the source and target applicationConfig
func (r *Reconciler) extractWorkloads(ctx context.Context, componentList []string, targetAppRevision,
sourceAppRevision *v1alpha2.ApplicationRevision) (*unstructured.Unstructured, *unstructured.Unstructured, error) {
var componentName string
var sourceApp *v1alpha2.ApplicationConfiguration
targetApp, err := oamutil.RawExtension2AppConfig(targetAppRevision.Spec.ApplicationConfiguration)
if err != nil {
return nil, nil, err
}
if sourceAppRevision != nil {
sourceApp, err = oamutil.RawExtension2AppConfig(sourceAppRevision.Spec.ApplicationConfiguration)
if err != nil {
return nil, nil, err
}
}
if len(componentList) == 0 {
// we need to find a default component
commons := appUtil.FindCommonComponent(targetApp, sourceApp)
if len(commons) != 1 {
return nil, nil, fmt.Errorf("cannot find a default component, too many common components: %+v", commons)
}
componentName = commons[0]
} else {
// assume that the validator webhook has already guaranteed that there is no more than one component for now
// and the component exists in both the target and source app
componentName = componentList[0]
}
// get the workload definition
// the validator webhook has checked that source and the target are the same type
targetWorkload, err := r.fetchWorkload(ctx, componentName, targetApp)
if err != nil {
return nil, nil, err
}
klog.InfoS("successfully get the target workload we need to work on", "targetWorkload", klog.KObj(targetWorkload))
if sourceApp != nil {
sourceWorkload, err := r.fetchWorkload(ctx, componentName, sourceApp)
if err != nil {
return nil, nil, err
}
klog.InfoS("successfully get the source workload we need to work on", "sourceWorkload",
klog.KObj(sourceWorkload))
return targetWorkload, sourceWorkload, nil
}
return targetWorkload, nil, nil
}
// fetchWorkload based on the component and the appConfig
func (r *Reconciler) fetchWorkload(ctx context.Context, componentName string,
targetApp *v1alpha2.ApplicationConfiguration) (*unstructured.Unstructured, error) {
var targetAcc *v1alpha2.ApplicationConfigurationComponent
for _, acc := range targetApp.Spec.Components {
if utils.ExtractComponentName(acc.RevisionName) == componentName {
targetAcc = acc.DeepCopy()
}
}
// can't happen as we just searched the appConfig
if targetAcc == nil {
klog.Error("The component does not belong to the application",
"components", targetApp.Spec.Components, "component to upgrade", componentName)
return nil, fmt.Errorf("the component %s does not belong to the application with components %+v",
componentName, targetApp.Spec.Components)
}
revision, err := utils.ExtractRevision(targetAcc.RevisionName)
if err != nil {
return nil, errors.Wrap(err, fmt.Sprintf("failed to get revision given revision name %s",
targetAcc.RevisionName))
}
// get the component given the component revision
component, _, err := oamutil.GetComponent(ctx, r, *targetAcc, targetApp.GetNamespace())
if err != nil {
return nil, errors.Wrap(err, fmt.Sprintf("failed to get component given its revision %s",
targetAcc.RevisionName))
}
// get the workload template in the component
w, err := oamutil.RawExtension2Unstructured(&component.Spec.Workload)
if err != nil {
return nil, errors.Wrap(err, fmt.Sprintf("failed to get component given revision %s", targetAcc.RevisionName))
}
// reuse the same appConfig controller logic that determines the workload name given an ACC
// inplaceUpgrade not used in rollout now
applicationconfiguration.SetAppWorkloadInstanceName(componentName, w, revision, "")
// get the real workload object from api-server given GVK and name
workload, err := oamutil.GetObjectGivenGVKAndName(ctx, r, w.GroupVersionKind(), targetApp.GetNamespace(), w.GetName())
if err != nil {
return nil, errors.Wrap(err, fmt.Sprintf("failed to get workload %s with gvk %+v ", w.GetName(), w.GroupVersionKind()))
}
return workload, nil
}

View File

@@ -0,0 +1,80 @@
/*
Copyright 2021 The KubeVela Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package applicationrollout
import (
"reflect"
"k8s.io/utils/pointer"
"github.com/openkruise/kruise-api/apps/v1alpha1"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/klog/v2"
"github.com/oam-dev/kubevela/apis/core.oam.dev/v1alpha2"
"github.com/oam-dev/kubevela/apis/core.oam.dev/v1beta1"
"github.com/oam-dev/kubevela/pkg/controller/core.oam.dev/v1alpha2/application/assemble"
"github.com/oam-dev/kubevela/pkg/oam"
)
func rolloutWorkloadName() assemble.WorkloadOption {
return assemble.WorkloadOptionFn(func(w *unstructured.Unstructured, component *v1alpha2.Component, definition *v1beta1.ComponentDefinition) error {
// we hard code the behavior depends on the workload group/kind for now. The only in-place upgradable resources
// we support is cloneset/statefulset for now. We can easily add more later.
if w.GroupVersionKind().Group == v1alpha1.GroupVersion.Group {
if w.GetKind() == reflect.TypeOf(v1alpha1.CloneSet{}).Name() ||
w.GetKind() == reflect.TypeOf(v1alpha1.StatefulSet{}).Name() {
// we use the component name alone for those resources that do support in-place upgrade
klog.InfoS("we reuse the component name for resources that support in-place upgrade",
"GVK", w.GroupVersionKind(), "instance name", component.Name)
w.SetName(component.Name)
return nil
}
}
// we assume that the rest of the resources do not support in-place upgrade
compRevName := w.GetLabels()[oam.LabelAppComponentRevision]
w.SetName(compRevName)
klog.InfoS("we encountered an unknown resources, assume that it does not support in-place upgrade",
"GVK", w.GroupVersionKind(), "instance name", compRevName)
return nil
})
}
// appRollout should take over updating workload, so disable previous controller owner(resourceTracker)
func disableControllerOwner(workload *unstructured.Unstructured) {
if workload == nil {
return
}
ownerRefs := workload.GetOwnerReferences()
for i, ref := range ownerRefs {
if ref.Controller != nil && *ref.Controller {
ownerRefs[i].Controller = pointer.BoolPtr(false)
}
}
workload.SetOwnerReferences(ownerRefs)
}
// enableControllerOwner yield controller owner back to resourceTracker
func enableControllerOwner(workload *unstructured.Unstructured) {
owners := workload.GetOwnerReferences()
for i, owner := range owners {
if owner.Kind == v1beta1.ResourceTrackerKind && owner.Controller != nil && !*owner.Controller {
owners[i].Controller = pointer.BoolPtr(true)
}
}
workload.SetOwnerReferences(owners)
}

View File

@@ -0,0 +1,61 @@
/*
Copyright 2021 The KubeVela Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package applicationrollout
import (
"testing"
"github.com/oam-dev/kubevela/apis/core.oam.dev/v1alpha2"
"github.com/oam-dev/kubevela/apis/core.oam.dev/v1beta1"
"gotest.tools/assert"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/utils/pointer"
)
func TestDisableControllerOwner(t *testing.T) {
w := &unstructured.Unstructured{}
owners := []metav1.OwnerReference{
{Name: "test-1", Controller: pointer.BoolPtr(false)},
{Name: "test-2", Controller: pointer.BoolPtr(true)},
}
w.SetOwnerReferences(owners)
disableControllerOwner(w)
assert.Equal(t, 2, len(w.GetOwnerReferences()))
for _, reference := range w.GetOwnerReferences() {
assert.Equal(t, false, *reference.Controller)
}
}
func TestEnableControllerOwner(t *testing.T) {
w := &unstructured.Unstructured{}
owners := []metav1.OwnerReference{
{Name: "test-1", Controller: pointer.BoolPtr(false), Kind: v1beta1.ResourceTrackerKind},
{Name: "test-2", Controller: pointer.BoolPtr(false), Kind: v1alpha2.ApplicationKind},
}
w.SetOwnerReferences(owners)
enableControllerOwner(w)
assert.Equal(t, 2, len(w.GetOwnerReferences()))
for _, reference := range w.GetOwnerReferences() {
if reference.Kind == v1beta1.ResourceTrackerKind {
assert.Equal(t, true, *reference.Controller)
} else {
assert.Equal(t, false, *reference.Controller)
}
}
}

View File

@@ -0,0 +1,290 @@
/*
Copyright 2021 The KubeVela Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package applicationrollout
import (
"context"
"fmt"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/apimachinery/pkg/types"
"k8s.io/klog/v2"
"sigs.k8s.io/controller-runtime/pkg/client"
"github.com/crossplane/crossplane-runtime/pkg/event"
"github.com/oam-dev/kubevela/apis/core.oam.dev/v1beta1"
"github.com/oam-dev/kubevela/apis/standard.oam.dev/v1alpha1"
"github.com/oam-dev/kubevela/pkg/controller/core.oam.dev/v1alpha2/application/dispatch"
"github.com/oam-dev/kubevela/pkg/controller/core.oam.dev/v1alpha2/application/assemble"
oamutil "github.com/oam-dev/kubevela/pkg/oam/util"
appUtil "github.com/oam-dev/kubevela/pkg/webhook/core.oam.dev/v1alpha2/applicationrollout"
)
type rolloutHandler struct {
*Reconciler
appRollout *v1beta1.AppRollout
// source/targetRevName represent this round reconcile using source and target revision
// in most cases they are equal to appRollout.spec.target/sourceRevName but if roll forward or revert in middle of rollout
// source/targetRevName are equal to previous rollout
sourceRevName string
targetRevName string
sourceAppRevision *v1beta1.ApplicationRevision
targetAppRevision *v1beta1.ApplicationRevision
// sourceWorkloads is assembled by appRevision in assemble phase
// please be aware that they are not real status in k8s, they are just generate from appRevision include GVK+namespace+name
sourceWorkloads map[string]*unstructured.Unstructured
targetWorkloads map[string]*unstructured.Unstructured
// targetManifests used by dispatch(template targetRevision) and handleSucceed(GC) phase
targetManifests []*unstructured.Unstructured
// sourceManifests used by dispatch(template targetRevision) and handleSucceed(GC) phase
sourceManifests []*unstructured.Unstructured
// needRollComponent is find common component between source and target revision
needRollComponent string
}
// prepareRollout call assemble func to prepare info needed in whole reconcile loop
func (h *rolloutHandler) prepareRollout(ctx context.Context) error {
var err error
h.targetAppRevision = new(v1beta1.ApplicationRevision)
if err := h.Get(ctx, types.NamespacedName{Namespace: h.appRollout.Namespace, Name: h.targetRevName}, h.targetAppRevision); err != nil {
return err
}
// construct a assemble manifest for targetAppRevision
targetAssemble := assemble.NewAppManifests(h.targetAppRevision).
WithWorkloadOption(rolloutWorkloadName()).
WithWorkloadOption(assemble.PrepareWorkloadForRollout())
// in template phase, we should use targetManifests including target workloads/traits to
h.targetManifests, err = targetAssemble.AssembledManifests()
if err != nil {
klog.Error("appRollout targetAppRevision failed to assemble manifest", "appRollout", klog.KRef(h.appRollout.Namespace, h.appRollout.Name))
return err
}
// we only use workloads group by component name to find common workloads in source and target revision
h.targetWorkloads, _, _, err = targetAssemble.GroupAssembledManifests()
if err != nil {
klog.Error("appRollout targetAppRevision failed to assemble target workload", "appRollout", klog.KRef(h.appRollout.Namespace, h.appRollout.Name))
return err
}
if len(h.sourceRevName) != 0 {
h.sourceAppRevision = new(v1beta1.ApplicationRevision)
if err := h.Get(ctx, types.NamespacedName{Namespace: h.appRollout.Namespace, Name: h.sourceRevName}, h.sourceAppRevision); err != nil {
return err
}
// construct a assemble manifest for sourceAppRevision
sourceAssemble := assemble.NewAppManifests(h.sourceAppRevision).
WithWorkloadOption(assemble.PrepareWorkloadForRollout()).
WithWorkloadOption(rolloutWorkloadName())
h.sourceWorkloads, _, _, err = sourceAssemble.GroupAssembledManifests()
if err != nil {
klog.Error("appRollout sourceAppRevision failed to assemble workloads", "appRollout", klog.KRef(h.appRollout.Namespace, h.appRollout.Name))
return err
}
}
return nil
}
// we only support one workload now, so this func is to determine witch component is need to rollout
func (h *rolloutHandler) determineRolloutComponent() error {
componentList := h.appRollout.Spec.ComponentList
// if user not set ComponentList in AppRollout we also find a common component between source and target
if len(componentList) == 0 {
// we need to find a default component
commons := appUtil.FindCommonComponentWithManifest(h.targetWorkloads, h.sourceWorkloads)
if len(commons) != 1 {
return fmt.Errorf("cannot find a default component, too many common components: %+v", commons)
}
h.needRollComponent = commons[0]
} else {
// assume that the validator webhook has already guaranteed that there is no more than one component for now
// and the component exists in both the target and source app
h.needRollComponent = componentList[0]
}
return nil
}
// fetch source and target workload
func (h *rolloutHandler) fetchSourceAndTargetWorkload(ctx context.Context) (*unstructured.Unstructured, *unstructured.Unstructured, error) {
var sourceWorkload, targetWorkload *unstructured.Unstructured
var err error
if len(h.sourceRevName) == 0 {
klog.Info("source app fields not filled, this is a scale operation")
} else if sourceWorkload, err = h.extractWorkload(ctx, *h.sourceWorkloads[h.needRollComponent]); err != nil {
klog.Errorf("specified sourceRevName but cannot fetch source workload %s: %v",
h.appRollout.Spec.SourceAppRevisionName, err)
return nil, nil, err
}
if targetWorkload, err = h.extractWorkload(ctx, *h.targetWorkloads[h.needRollComponent]); err != nil {
klog.Errorf("cannot fetch target workload %s: %v", h.appRollout.Spec.TargetAppRevisionName, err)
return nil, nil, err
}
return sourceWorkload, targetWorkload, nil
}
// extractWorkload use GVK and name of workload(assembled result) to fetch real workload in cluster
func (h *rolloutHandler) extractWorkload(ctx context.Context, workload unstructured.Unstructured) (*unstructured.Unstructured, error) {
wl, err := oamutil.GetObjectGivenGVKAndName(ctx, h, workload.GroupVersionKind(), workload.GetNamespace(), workload.GetName())
if err != nil {
return nil, err
}
return wl, nil
}
// if in middle of previous rollout, continue use previous source and target appRevision as this round rollout
func (h *rolloutHandler) handleRolloutModified() {
klog.InfoS("rollout target changed, restart the rollout", "new source", h.appRollout.Spec.SourceAppRevisionName,
"new target", h.appRollout.Spec.TargetAppRevisionName)
h.record.Event(h.appRollout, event.Normal("Rollout Restarted",
"rollout target changed, restart the rollout", "new source", h.appRollout.Spec.SourceAppRevisionName,
"new target", h.appRollout.Spec.TargetAppRevisionName))
// we are okay to move directly to restart the rollout since we are at the terminal state
// however, we need to make sure we properly finalizing the existing rollout before restart if it's
// still in the middle of rolling out
if h.appRollout.Status.RollingState != v1alpha1.RolloutSucceedState &&
h.appRollout.Status.RollingState != v1alpha1.RolloutFailedState {
// happen when roll forward or revert in middle of rollout, previous rollout haven't finished
// continue to handle the previous resources until we are okay to move forward
h.targetRevName = h.appRollout.Status.LastUpgradedTargetAppRevision
h.sourceRevName = h.appRollout.Status.LastSourceAppRevision
} else {
// previous rollout have finished, go ahead using new source/target revision
h.targetRevName = h.appRollout.Spec.TargetAppRevisionName
h.sourceRevName = h.appRollout.Spec.SourceAppRevisionName
// mark so that we don't think we are modified again
h.appRollout.Status.LastUpgradedTargetAppRevision = h.appRollout.Spec.TargetAppRevisionName
h.appRollout.Status.LastSourceAppRevision = h.appRollout.Spec.SourceAppRevisionName
}
h.appRollout.Status.StateTransition(v1alpha1.RollingModifiedEvent)
}
// templateTargetManifest call dispatch to template target app revision's manifests to cluster
func (h *rolloutHandler) templateTargetManifest(ctx context.Context) error {
var rt *v1beta1.ResourceTracker
// if sourceAppRevision is not nil, we should upgrade existing resources which are also needed by target app
// revision
if h.sourceAppRevision != nil {
rt = new(v1beta1.ResourceTracker)
err := h.Get(ctx, types.NamespacedName{Name: dispatch.ConstructResourceTrackerName(h.appRollout.Spec.SourceAppRevisionName, h.appRollout.Namespace)}, rt)
if err != nil {
klog.Errorf("specified sourceAppRevisionName %s but cannot fetch the sourceResourceTracker %v",
h.appRollout.Spec.SourceAppRevisionName, err)
return err
}
}
// use source resourceTracker to handle same resource owner transfer
dispatcher := dispatch.NewAppManifestsDispatcher(h, h.targetAppRevision).EnableUpgradeAndSkipGC(rt)
_, err := dispatcher.Dispatch(ctx, h.targetManifests)
if err != nil {
klog.Errorf("dispatch targetRevision error %s:%v", h.appRollout.Spec.TargetAppRevisionName, err)
return err
}
workload, err := h.extractWorkload(ctx, *h.targetWorkloads[h.needRollComponent])
if err != nil {
return err
}
ref := metav1.GetControllerOfNoCopy(workload)
if ref != nil && ref.Kind == v1beta1.ResourceTrackerKind {
wlPatch := client.MergeFrom(workload.DeepCopy())
// guarantee resourceTracker isn't controller owner of workload
disableControllerOwner(workload)
if err = h.Client.Patch(ctx, workload, wlPatch, client.FieldOwner(h.appRollout.UID)); err != nil {
return err
}
}
return nil
}
// templateTargetManifest call dispatch to template source app revision's manifests to cluster
func (h *rolloutHandler) templateSourceManifest(ctx context.Context) error {
// only when sourceAppRevision is not nil, we need template sourceRevision revision
if h.sourceAppRevision == nil {
return nil
}
// use source resourceTracker to handle same resource owner transfer
dispatcher := dispatch.NewAppManifestsDispatcher(h, h.sourceAppRevision)
_, err := dispatcher.Dispatch(ctx, h.sourceManifests)
if err != nil {
klog.Errorf("dispatch sourceRevision error %s:%v", h.appRollout.Spec.TargetAppRevisionName, err)
return err
}
workload, err := h.extractWorkload(ctx, *h.sourceWorkloads[h.needRollComponent])
if err != nil {
return err
}
ref := metav1.GetControllerOfNoCopy(workload)
if ref != nil && ref.Kind == v1beta1.ResourceTrackerKind {
wlPatch := client.MergeFrom(workload.DeepCopy())
// guarantee resourceTracker isn't controller owner of workload
disableControllerOwner(workload)
if err = h.Client.Patch(ctx, workload, wlPatch, client.FieldOwner(h.appRollout.UID)); err != nil {
return err
}
}
return nil
}
// handle rollout succeed work left
func (h *rolloutHandler) finalizeRollingSucceeded(ctx context.Context) error {
// yield controller owner back to resourceTracker
workload, err := h.extractWorkload(ctx, *h.targetWorkloads[h.needRollComponent])
if err != nil {
return err
}
wlPatch := client.MergeFrom(workload.DeepCopy())
enableControllerOwner(workload)
if err = h.Client.Patch(ctx, workload, wlPatch, client.FieldOwner(h.appRollout.UID)); err != nil {
return err
}
// only when sourceAppRevision is not nil, we need gc old revision resources
if h.sourceAppRevision != nil {
oldRT := &v1beta1.ResourceTracker{}
err := h.Client.Get(ctx, client.ObjectKey{
Name: dispatch.ConstructResourceTrackerName(h.sourceAppRevision.Name, h.sourceAppRevision.Namespace)}, oldRT)
if err != nil && apierrors.IsNotFound(err) {
// end finalizing if source revision's tracker is already gone
// this guarantees finalizeRollingSucceeded will only GC once
return nil
}
if err != nil {
return err
}
d := dispatch.NewAppManifestsDispatcher(h.Client, h.targetAppRevision).
EnableUpgradeAndGC(oldRT)
// no need to dispatch manifest again, just do GC
if _, err := d.Dispatch(ctx, nil); err != nil {
return err
}
}
return nil
}

View File

@@ -23,7 +23,6 @@ import (
"github.com/oam-dev/kubevela/pkg/controller/core.oam.dev/v1alpha2/appdeployment"
"github.com/oam-dev/kubevela/pkg/controller/core.oam.dev/v1alpha2/application"
"github.com/oam-dev/kubevela/pkg/controller/core.oam.dev/v1alpha2/applicationconfiguration"
"github.com/oam-dev/kubevela/pkg/controller/core.oam.dev/v1alpha2/applicationcontext"
"github.com/oam-dev/kubevela/pkg/controller/core.oam.dev/v1alpha2/applicationrollout"
"github.com/oam-dev/kubevela/pkg/controller/core.oam.dev/v1alpha2/core/components/componentdefinition"
"github.com/oam-dev/kubevela/pkg/controller/core.oam.dev/v1alpha2/core/policies/policydefinition"
@@ -38,7 +37,7 @@ import (
func Setup(mgr ctrl.Manager, args controller.Args) error {
for _, setup := range []func(ctrl.Manager, controller.Args) error{
containerizedworkload.Setup, manualscalertrait.Setup, healthscope.Setup,
application.Setup, applicationrollout.Setup, applicationcontext.Setup, appdeployment.Setup,
application.Setup, applicationrollout.Setup, appdeployment.Setup,
traitdefinition.Setup, componentdefinition.Setup, policydefinition.Setup, workflowstepdefinition.Setup,
} {
if err := setup(mgr, args); err != nil {

View File

@@ -33,6 +33,8 @@ const (
LabelOAMResourceType = "app.oam.dev/resourceType"
// LabelAppRevisionHash records the Hash value of the application revision
LabelAppRevisionHash = "app.oam.dev/app-revision-hash"
// LabelAppNamespace records the namespace of Application
LabelAppNamespace = "app.oam.dev/namesapce"
// WorkloadTypeLabel indicates the type of the workloadDefinition
WorkloadTypeLabel = "workload.oam.dev/type"
@@ -89,4 +91,7 @@ const (
// AnnotationWorkflowContext is used to pass in the workflow context marshalled in json format.
AnnotationWorkflowContext = "app.oam.dev/workflow-context"
// AnnotationKubeVelaVersion is used to record current KubeVela version
AnnotationKubeVelaVersion = "oam.dev/kubevela-version"
)

View File

@@ -19,7 +19,6 @@ package apply
import (
"context"
"github.com/oam-dev/kubevela/apis/core.oam.dev/v1beta1"
"github.com/oam-dev/kubevela/pkg/oam"
"github.com/pkg/errors"
@@ -176,13 +175,39 @@ func MustBeControllableBy(u types.UID) ApplyOption {
if c == nil {
return nil
}
// if workload is a cross namespace resource, skip check UID
if c.Kind == v1beta1.ResourceTrackerKind {
return nil
}
if c.UID != u {
return errors.Errorf("existing object is not controlled by UID %q", u)
}
return nil
}
}
// MustBeControllableByAny requires that the new object is controllable by any of the object with
// the supplied UID.
func MustBeControllableByAny(ctrlUIDs []types.UID) ApplyOption {
return func(_ context.Context, existing, _ runtime.Object) error {
if existing == nil || len(ctrlUIDs) == 0 {
return nil
}
existingObjMeta, _ := existing.(metav1.Object)
c := metav1.GetControllerOf(existingObjMeta)
if c == nil {
return nil
}
// NOTE This is for backward compatibility after ApplicationContext is deprecated.
// In legacy clusters, existing resources are ctrl-owned by ApplicationContext or ResourceTracker (only for
// cx-namespace and cluster-scope resources). We use a particular annotation to identify legacy resources.
if len(existingObjMeta.GetAnnotations()[oam.AnnotationKubeVelaVersion]) == 0 {
// just skip checking UIDs, '3-way-merge' will remove the legacy ctrl-owner automatically
return nil
}
for _, u := range ctrlUIDs {
if c.UID == u {
return nil
}
}
return errors.Errorf("existing object is not controlled by any of UID %q", ctrlUIDs)
}
}

View File

@@ -30,9 +30,11 @@ import (
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/apimachinery/pkg/types"
"k8s.io/utils/pointer"
"sigs.k8s.io/controller-runtime/pkg/client"
"github.com/oam-dev/kubevela/apis/core.oam.dev/v1beta1"
"github.com/oam-dev/kubevela/pkg/oam"
)
var ctx = context.Background()
@@ -371,3 +373,65 @@ func TestMustBeControllableBy(t *testing.T) {
})
}
}
func TestMustBeControllableByAny(t *testing.T) {
ctrlByAny := []types.UID{"owner1", "owner2"}
cases := map[string]struct {
reason string
current runtime.Object
want error
}{
"NoExistingObject": {
reason: "No error should be returned if no existing object",
},
"Adoptable": {
reason: "A current object with no controller reference may be adopted and controlled",
current: &testObject{ObjectMeta: metav1.ObjectMeta{
Annotations: map[string]string{
oam.AnnotationKubeVelaVersion: "undefined",
}},
},
},
"ControlledBySuppliedUID": {
reason: "A current object that is already controlled by the supplied UID is controllable",
current: &testObject{ObjectMeta: metav1.ObjectMeta{
Annotations: map[string]string{
oam.AnnotationKubeVelaVersion: "undefined",
},
OwnerReferences: []metav1.OwnerReference{{
UID: types.UID("owner1"),
Controller: pointer.BoolPtr(true),
}}}},
},
"ControlledBySomeoneElse": {
reason: "A current object that is already controlled by a different UID is not controllable",
current: &testObject{ObjectMeta: metav1.ObjectMeta{
Annotations: map[string]string{
oam.AnnotationKubeVelaVersion: "undefined",
},
OwnerReferences: []metav1.OwnerReference{{
UID: types.UID("some-other-uid"),
Controller: pointer.BoolPtr(true),
}}}},
want: errors.Errorf("existing object is not controlled by any of UID %q", ctrlByAny),
},
"BackwardCompatability": {
reason: "A current object without annotation 'kubevelavesion' is legacy",
current: &testObject{ObjectMeta: metav1.ObjectMeta{
OwnerReferences: []metav1.OwnerReference{{
UID: types.UID("some-other-uid"),
Controller: pointer.BoolPtr(true),
}}}},
},
}
for name, tc := range cases {
t.Run(name, func(t *testing.T) {
ao := MustBeControllableByAny(ctrlByAny)
err := ao(context.TODO(), tc.current, nil)
if diff := cmp.Diff(tc.want, err, test.EquateErrors()); diff != "" {
t.Errorf("\n%s\nMustBeControllableByAny(...)(...): -want error, +got error\n%s\n", tc.reason, diff)
}
})
}
}

View File

@@ -17,12 +17,38 @@ limitations under the License.
package applicationrollout
import (
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"github.com/oam-dev/kubevela/apis/core.oam.dev/v1alpha2"
"github.com/oam-dev/kubevela/pkg/controller/utils"
)
// FindCommonComponent finds the common components in both the source and target application
// FindCommonComponentWithManifest finds the common components in both the source and target workloads
// the source can be nil
func FindCommonComponentWithManifest(target, source map[string]*unstructured.Unstructured) []string {
var commonComponents []string
if source == nil {
for compName := range target {
commonComponents = append(commonComponents, compName)
}
return commonComponents
}
// find the common components in both the source and target components
// write an O(N) algorithm just for fun, totally doesn't worth the extra space
targetComponents := make(map[string]bool)
for comp := range target {
targetComponents[comp] = true
}
for comp := range source {
if targetComponents[comp] {
commonComponents = append(commonComponents, comp)
}
}
return commonComponents
}
// FindCommonComponent finds the common components in both the source and target application
// only used for rollout webhook, will delete after refactor rollout webhook
func FindCommonComponent(targetApp, sourceApp *v1alpha2.ApplicationConfiguration) []string {
var commonComponents []string
if sourceApp == nil {

View File

@@ -17,8 +17,11 @@ limitations under the License.
package applicationrollout
import (
"sort"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"github.com/oam-dev/kubevela/apis/core.oam.dev/v1alpha2"
"github.com/oam-dev/kubevela/pkg/controller/utils"
@@ -88,3 +91,53 @@ func fillApplication(app *v1alpha2.ApplicationConfigurationSpec, componentNames
})
}
}
var _ = Describe("Test find common component func", func() {
It("Test source app is nil", func() {
target := fillWorkloads([]string{"a", "b", "c"})
common := FindCommonComponentWithManifest(target, nil)
sort.Strings(common)
Expect(common).Should(BeEquivalentTo([]string{"a", "b", "c"}))
})
It("Test has one component", func() {
target := fillWorkloads([]string{"a", "b", "c"})
source := fillWorkloads([]string{"c"})
common := FindCommonComponentWithManifest(target, source)
sort.Strings(common)
Expect(common).Should(BeEquivalentTo([]string{"c"}))
})
It("Test has one common components", func() {
target := fillWorkloads([]string{"a", "b", "c"})
source := fillWorkloads([]string{"d", "c"})
common := FindCommonComponentWithManifest(target, source)
sort.Strings(common)
Expect(common).Should(BeEquivalentTo([]string{"c"}))
})
It("Test has more than 1 common component", func() {
target := fillWorkloads([]string{"b", "a", "c"})
source := fillWorkloads([]string{"c", "b"})
common := FindCommonComponentWithManifest(target, source)
sort.Strings(common)
Expect(common).Should(BeEquivalentTo([]string{"b", "c"}))
})
It("Test has more than 1 common component", func() {
target := fillWorkloads([]string{"a", "b", "c"})
source := fillWorkloads([]string{"d", "e", "c", "a"})
common := FindCommonComponentWithManifest(target, source)
sort.Strings(common)
Expect(common).Should(BeEquivalentTo([]string{"a", "c"}))
})
})
func fillWorkloads(componentNames []string) map[string]*unstructured.Unstructured {
w := make(map[string]*unstructured.Unstructured)
for _, s := range componentNames {
// we don't need real workload
w[s] = nil
}
return w
}

View File

@@ -33,9 +33,7 @@ import (
"sigs.k8s.io/controller-runtime/pkg/client"
apicommon "github.com/oam-dev/kubevela/apis/core.oam.dev/common"
"github.com/oam-dev/kubevela/apis/core.oam.dev/v1alpha2"
"github.com/oam-dev/kubevela/apis/standard.oam.dev/v1alpha1"
"github.com/oam-dev/kubevela/apis/types"
"github.com/oam-dev/kubevela/pkg/controller/utils"
"github.com/oam-dev/kubevela/apis/core.oam.dev/v1beta1"
@@ -137,8 +135,7 @@ var _ = Describe("Cloneset based app embed rollout tests", func() {
Expect(k8sClient.Delete(ctx, &ns, client.PropagationPolicy(metav1.DeletePropagationBackground))).Should(BeNil())
})
verifyRolloutSucceeded := func(targetAppContextName string, cpu string) {
By(fmt.Sprintf("Wait for the rollout `%s` to succeed", targetAppContextName))
verifyRolloutSucceeded := func(targetAppRevisionName string, cpu string) {
Eventually(
func() error {
app = v1beta1.Application{}
@@ -155,23 +152,6 @@ var _ = Describe("Cloneset based app embed rollout tests", func() {
Expect(app.Status.Rollout.UpgradedReplicas).Should(BeEquivalentTo(app.Status.Rollout.RolloutTargetSize))
clonesetName := app.Spec.Components[0].Name
Expect(app.Status.Phase).Should(BeEquivalentTo(apicommon.ApplicationRunning))
By("Verify AppContext rolling status")
appContext := &v1alpha2.ApplicationContext{}
Eventually(
func() error {
if err := k8sClient.Get(ctx, client.ObjectKey{Namespace: namespaceName, Name: targetAppContextName}, appContext); err != nil {
return err
}
if appContext.Status.RollingStatus != types.RollingCompleted {
return fmt.Errorf("appcontext %s rolling state mismatch actualy %s", targetAppContextName, appContext.Status.RollingStatus)
}
owner := metav1.GetControllerOf(appContext)
if owner.Name != appName && owner.Kind != app.Kind && owner.APIVersion != app.APIVersion {
return fmt.Errorf("appcontext owner mismatch")
}
return nil
},
time.Second*120, time.Microsecond*300).Should(BeNil())
By("Verify cloneset status")
var clonesetOwner *metav1.OwnerReference
@@ -181,16 +161,22 @@ var _ = Describe("Cloneset based app embed rollout tests", func() {
return err
}
clonesetOwner = metav1.GetControllerOf(&kc)
if clonesetOwner.Kind != v1alpha2.ApplicationContextKind {
return fmt.Errorf("cloneset owner mismatch actually %s", v1alpha2.ApplicationContextKind)
if clonesetOwner == nil {
return fmt.Errorf("cloneset don't have any controller owner")
}
if clonesetOwner.Kind != v1beta1.ResourceTrackerKind {
return fmt.Errorf("cloneset owner mismatch wants %s actually %s", v1beta1.ResourceTrackerKind, clonesetOwner.Kind)
}
if kc.Status.UpdatedReplicas != *kc.Spec.Replicas {
return fmt.Errorf("upgraded pod number error")
}
resourceTrackerName := fmt.Sprintf("%s-%s", targetAppRevisionName, app.Namespace)
if clonesetOwner.Name != resourceTrackerName {
return fmt.Errorf("resourceTracker haven't take back controller owner")
}
return nil
},
time.Second*30, time.Millisecond*500).Should(BeNil())
Expect(clonesetOwner.Name).Should(BeEquivalentTo(targetAppContextName))
By("Verify pod status")
Eventually(func() error {
podList := corev1.PodList{}

View File

@@ -59,11 +59,11 @@ var _ = Describe("Test application cross namespace resource", func() {
Eventually(func() error {
ns := new(corev1.Namespace)
return k8sClient.Get(ctx, types.NamespacedName{Name: namespace}, ns)
}, time.Second*60, time.Microsecond*300).Should(BeNil())
}, time.Second*5, time.Millisecond*300).Should(BeNil())
Eventually(func() error {
ns := new(corev1.Namespace)
return k8sClient.Get(ctx, types.NamespacedName{Name: crossNamespace}, ns)
}, time.Second*60, time.Microsecond*300).Should(BeNil())
}, time.Second*5, time.Millisecond*300).Should(BeNil())
})
AfterEach(func() {
@@ -253,7 +253,7 @@ var _ = Describe("Test application cross namespace resource", func() {
}, 20*time.Second, 2*time.Second).Should(SatisfyAll(&util.NotFoundMatcher{}))
})
It("Test application have cross-namespace workload", func() {
It("Test application have cross-namespace workload", func() {
// install component definition
crossCdJson, _ := yaml.YAMLToJSON([]byte(fmt.Sprintf(crossCompDefYaml, namespace, crossNamespace)))
ccd := new(v1beta1.ComponentDefinition)
@@ -271,7 +271,7 @@ var _ = Describe("Test application cross namespace resource", func() {
},
Spec: v1beta1.ApplicationSpec{
Components: []v1beta1.ApplicationComponent{
v1beta1.ApplicationComponent{
{
Name: componentName,
Type: "cross-worker",
Properties: runtime.RawExtension{Raw: []byte(`{"cmd":["sleep","1000"],"image":"busybox"}`)},
@@ -287,27 +287,20 @@ var _ = Describe("Test application cross namespace resource", func() {
if err := k8sClient.Get(ctx, types.NamespacedName{Namespace: namespace, Name: appName}, app); err != nil {
return fmt.Errorf("app not found %v", err)
}
if err := k8sClient.Get(ctx, generateResourceTrackerKey(app.Namespace, app.Name), resourceTracker); err != nil {
if err := k8sClient.Get(ctx, generateResourceTrackerKey(app.Namespace, app.Name, 1), resourceTracker); err != nil {
return err
}
if app.Status.Phase != common.ApplicationRunning {
return fmt.Errorf("application status is not running")
}
if app.Status.ResourceTracker == nil || app.Status.ResourceTracker.UID != resourceTracker.UID {
return fmt.Errorf("appication status error ")
}
return nil
}, time.Second*300, time.Microsecond*300).Should(BeNil())
}, time.Second*5, time.Millisecond*300).Should(BeNil())
By("check resource is generated correctly")
Expect(k8sClient.Get(ctx, types.NamespacedName{Namespace: namespace, Name: appName}, app)).Should(BeNil())
var workload appsv1.Deployment
Eventually(func() error {
appContext := &v1alpha2.ApplicationContext{}
if err := k8sClient.Get(ctx, types.NamespacedName{Namespace: namespace, Name: appName}, appContext); err != nil {
return fmt.Errorf("cannot generate AppContext %v", err)
}
checkRt := new(v1beta1.ResourceTracker)
if err := k8sClient.Get(ctx, generateResourceTrackerKey(app.Namespace, app.Name), checkRt); err != nil {
if err := k8sClient.Get(ctx, generateResourceTrackerKey(app.Namespace, app.Name, 1), checkRt); err != nil {
return err
}
component := &v1alpha2.Component{}
@@ -339,14 +332,13 @@ var _ = Describe("Test application cross namespace resource", func() {
return fmt.Errorf("resourceTracker status recode trackedResource name mismatch recorded %s, actually %s", checkRt.Status.TrackedResources[0].Name, workload.Name)
}
return nil
}, time.Second*50, time.Microsecond*300).Should(BeNil())
}, time.Second*5, time.Millisecond*300).Should(BeNil())
By("deleting application will remove resourceTracker and related workload will be removed")
time.Sleep(3 * time.Second) // wait informer cache to be synced
Expect(k8sClient.Get(ctx, types.NamespacedName{Namespace: namespace, Name: appName}, app)).Should(BeNil())
Expect(k8sClient.Delete(ctx, app)).Should(BeNil())
Eventually(func() error {
err := k8sClient.Get(ctx, generateResourceTrackerKey(app.Namespace, app.Name), resourceTracker)
err := k8sClient.Get(ctx, generateResourceTrackerKey(app.Namespace, app.Name, 1), resourceTracker)
if err == nil {
return fmt.Errorf("resourceTracker still exist")
}
@@ -361,7 +353,7 @@ var _ = Describe("Test application cross namespace resource", func() {
return err
}
return nil
}, time.Second*30, time.Microsecond*300).Should(BeNil())
}, time.Second*5, time.Millisecond*300).Should(BeNil())
})
It("Test update application by add a cross namespace trait resource", func() {
@@ -389,7 +381,7 @@ var _ = Describe("Test application cross namespace resource", func() {
},
Spec: v1beta1.ApplicationSpec{
Components: []v1beta1.ApplicationComponent{
v1beta1.ApplicationComponent{
{
Name: componentName,
Type: "normal-worker",
Properties: runtime.RawExtension{Raw: []byte(`{"cmd":["sleep","1000"],"image":"busybox"}`)},
@@ -421,18 +413,14 @@ var _ = Describe("Test application cross namespace resource", func() {
return fmt.Errorf("error workload number %v", err)
}
workload := depolys.Items[0]
if len(workload.OwnerReferences) != 1 || workload.OwnerReferences[0].Kind != v1alpha2.ApplicationContextKind {
if len(workload.OwnerReferences) != 1 || workload.OwnerReferences[0].Kind != v1beta1.ResourceTrackerKind {
return fmt.Errorf("workload owneRefernece err")
}
err = k8sClient.Get(ctx, generateResourceTrackerKey(app.Namespace, app.Name), resourceTracker)
if err == nil {
return fmt.Errorf("resourceTracker should not be created")
}
if !apierrors.IsNotFound(err) {
if err = k8sClient.Get(ctx, generateResourceTrackerKey(app.Namespace, app.Name, 1), resourceTracker); err != nil {
return err
}
return nil
}, time.Second*60, time.Microsecond*300).Should(BeNil())
}, time.Second*5, time.Millisecond*500).Should(BeNil())
Eventually(func() error {
err := k8sClient.Get(ctx, types.NamespacedName{Namespace: namespace, Name: appName}, app)
@@ -440,13 +428,13 @@ var _ = Describe("Test application cross namespace resource", func() {
return err
}
app.Spec.Components[0].Traits = []v1beta1.ApplicationTrait{
v1beta1.ApplicationTrait{
{
Type: "cross-scaler",
Properties: runtime.RawExtension{Raw: []byte(`{"replicas": 1}`)},
},
}
return k8sClient.Update(ctx, app)
}, time.Second*30, time.Microsecond*300).Should(BeNil())
}, time.Second*5, time.Millisecond*300).Should(BeNil())
By("add a cross namespace trait, check resourceTracker and trait status")
Eventually(func() error {
@@ -457,7 +445,7 @@ var _ = Describe("Test application cross namespace resource", func() {
if app.Status.Phase != common.ApplicationRunning {
return fmt.Errorf("application status not running")
}
err := k8sClient.Get(ctx, generateResourceTrackerKey(app.Namespace, app.Name), resourceTracker)
err := k8sClient.Get(ctx, generateResourceTrackerKey(app.Namespace, app.Name, 2), resourceTracker)
if err != nil {
return fmt.Errorf("resourceTracker not generated %v", err)
}
@@ -476,14 +464,11 @@ var _ = Describe("Test application cross namespace resource", func() {
if len(trait.OwnerReferences) != 1 || trait.OwnerReferences[0].UID != resourceTracker.UID {
return fmt.Errorf("trait owner reference missmatch")
}
if len(resourceTracker.Status.TrackedResources) != 1 {
return fmt.Errorf("resourceTracker status recode trackedResource length missmatch")
}
if resourceTracker.Status.TrackedResources[0].Name != trait.Name {
return fmt.Errorf("resourceTracker status recode trackedResource name mismatch recorded %s, actually %s", resourceTracker.Status.TrackedResources[0].Name, trait.Name)
if len(resourceTracker.Status.TrackedResources) != 2 {
return fmt.Errorf("expect track %q resources, but got %q", 2, len(resourceTracker.Status.TrackedResources))
}
return nil
}, time.Second*60, time.Microsecond*300).Should(BeNil())
}, time.Second*5, time.Millisecond*500).Should(BeNil())
})
It("Test update application by delete a cross namespace trait resource", func() {
@@ -511,12 +496,12 @@ var _ = Describe("Test application cross namespace resource", func() {
},
Spec: v1beta1.ApplicationSpec{
Components: []v1beta1.ApplicationComponent{
v1beta1.ApplicationComponent{
{
Name: componentName,
Type: "normal-worker",
Properties: runtime.RawExtension{Raw: []byte(`{"cmd":["sleep","1000"],"image":"busybox"}`)},
Traits: []v1beta1.ApplicationTrait{
v1beta1.ApplicationTrait{
{
Type: "cross-scaler",
Properties: runtime.RawExtension{Raw: []byte(`{"replicas": 1}`)},
},
@@ -526,7 +511,6 @@ var _ = Describe("Test application cross namespace resource", func() {
},
}
Expect(k8sClient.Create(ctx, app)).Should(BeNil())
time.Sleep(3 * time.Second) // give informer cache to sync
resourceTracker := new(v1beta1.ResourceTracker)
By("create application will create a cross ns trait, and resourceTracker. check those status")
Eventually(func() error {
@@ -537,7 +521,7 @@ var _ = Describe("Test application cross namespace resource", func() {
if app.Status.Phase != common.ApplicationRunning {
return fmt.Errorf("application status not running")
}
err := k8sClient.Get(ctx, generateResourceTrackerKey(app.Namespace, app.Name), resourceTracker)
err := k8sClient.Get(ctx, generateResourceTrackerKey(app.Namespace, app.Name, 1), resourceTracker)
if err != nil {
return fmt.Errorf("error to get resourceTracker %v", err)
}
@@ -556,22 +540,19 @@ var _ = Describe("Test application cross namespace resource", func() {
if len(trait.OwnerReferences) != 1 || trait.OwnerReferences[0].UID != resourceTracker.UID {
return fmt.Errorf("trait owner reference missmatch")
}
if len(resourceTracker.Status.TrackedResources) != 1 {
return fmt.Errorf("resourceTracker status recode trackedResource length missmatch")
}
if resourceTracker.Status.TrackedResources[0].Name != trait.Name {
return fmt.Errorf("resourceTracker status recode trackedResource name mismatch recorded %s, actually %s", resourceTracker.Status.TrackedResources[0].Name, trait.Name)
if len(resourceTracker.Status.TrackedResources) != 2 {
return fmt.Errorf("expect track %q resources, but got %q", 2, len(resourceTracker.Status.TrackedResources))
}
return nil
}, time.Second*60, time.Microsecond*300).Should(BeNil())
}, time.Second*5, time.Millisecond*300).Should(BeNil())
By("update application trait by delete cross ns trait, will delete resourceTracker and related trait resource")
By("update application trait by delete cross ns trait")
Eventually(func() error {
app = new(v1beta1.Application)
Expect(k8sClient.Get(ctx, types.NamespacedName{Namespace: namespace, Name: appName}, app)).Should(BeNil())
app.Spec.Components[0].Traits = []v1beta1.ApplicationTrait{}
return k8sClient.Update(ctx, app)
}, time.Second*30, time.Microsecond*300).Should(BeNil())
}, time.Second*5, time.Millisecond*300).Should(BeNil())
fmt.Println(app.ResourceVersion)
Eventually(func() error {
app = new(v1beta1.Application)
@@ -581,9 +562,8 @@ var _ = Describe("Test application cross namespace resource", func() {
if app.Status.Phase != common.ApplicationRunning {
return fmt.Errorf("application status not running")
}
err := k8sClient.Get(ctx, generateResourceTrackerKey(app.Namespace, app.Name), resourceTracker)
if err == nil {
return fmt.Errorf("resourceTracker still exist")
if err := k8sClient.Get(ctx, generateResourceTrackerKey(app.Namespace, app.Name, 2), resourceTracker); err != nil {
return err
}
mts := new(v1alpha2.ManualScalerTraitList)
opts := []client.ListOption{
@@ -596,11 +576,8 @@ var _ = Describe("Test application cross namespace resource", func() {
if err != nil || len(mts.Items) != 0 {
return fmt.Errorf("cross ns trait still exist")
}
if app.Status.ResourceTracker != nil {
return fmt.Errorf("application status resourceTracker field still exist %s", string(util.JSONMarshal(app.Status.ResourceTracker)))
}
return nil
}, time.Second*60, time.Microsecond*300).Should(BeNil())
}, time.Second*5, time.Millisecond*500).Should(BeNil())
})
It("Test application have two different workload", func() {
@@ -629,12 +606,12 @@ var _ = Describe("Test application cross namespace resource", func() {
},
Spec: v1beta1.ApplicationSpec{
Components: []v1beta1.ApplicationComponent{
v1beta1.ApplicationComponent{
{
Name: component1Name,
Type: "normal-worker",
Properties: runtime.RawExtension{Raw: []byte(`{"cmd":["sleep","1000"],"image":"busybox"}`)},
},
v1beta1.ApplicationComponent{
{
Name: component2Name,
Type: "cross-worker",
Properties: runtime.RawExtension{Raw: []byte(`{"cmd":["sleep","1000"],"image":"busybox"}`)},
@@ -644,7 +621,6 @@ var _ = Describe("Test application cross namespace resource", func() {
}
Expect(k8sClient.Create(ctx, app)).Should(BeNil())
time.Sleep(3 * time.Second) // give informer cache to sync
resourceTracker := new(v1beta1.ResourceTracker)
By("create application will generate two workload, and generate resourceTracker")
@@ -656,7 +632,7 @@ var _ = Describe("Test application cross namespace resource", func() {
if app.Status.Phase != common.ApplicationRunning {
return fmt.Errorf("application status not running")
}
err := k8sClient.Get(ctx, generateResourceTrackerKey(app.Namespace, app.Name), resourceTracker)
err := k8sClient.Get(ctx, generateResourceTrackerKey(app.Namespace, app.Name, 1), resourceTracker)
if err != nil {
return fmt.Errorf("error to generate resourceTracker %v", err)
}
@@ -678,7 +654,7 @@ var _ = Describe("Test application cross namespace resource", func() {
return fmt.Errorf("failed generate same namespace workload")
}
sameDeplpoy := same.Items[0]
if len(sameDeplpoy.OwnerReferences) != 1 || sameDeplpoy.OwnerReferences[0].Kind != v1alpha2.ApplicationContextKind {
if len(sameDeplpoy.OwnerReferences) != 1 || sameDeplpoy.OwnerReferences[0].Kind != v1beta1.ResourceTrackerKind {
return fmt.Errorf("same ns deploy have error ownerReference")
}
err = k8sClient.List(ctx, cross, crossOpts...)
@@ -689,24 +665,18 @@ var _ = Describe("Test application cross namespace resource", func() {
if len(sameDeplpoy.OwnerReferences) != 1 || crossDeplpoy.OwnerReferences[0].UID != resourceTracker.UID {
return fmt.Errorf("same ns deploy have error ownerReference")
}
if app.Status.ResourceTracker == nil || app.Status.ResourceTracker.UID != resourceTracker.UID {
return fmt.Errorf("app status resourceTracker error")
}
if len(resourceTracker.Status.TrackedResources) != 1 {
return fmt.Errorf("resourceTracker status recode trackedResource length missmatch")
}
if resourceTracker.Status.TrackedResources[0].Name != crossDeplpoy.Name {
return fmt.Errorf("resourceTracker status recode trackedResource name mismatch recorded %s, actually %s", resourceTracker.Status.TrackedResources[0].Name, crossDeplpoy.Name)
if len(resourceTracker.Status.TrackedResources) != 2 {
return fmt.Errorf("expect track %q resources, but got %q", 2, len(resourceTracker.Status.TrackedResources))
}
return nil
}, time.Second*60, time.Microsecond*300).Should(BeNil())
By("update application by delete cross namespace workload, resource tracker will be deleted, then check app status")
}, time.Second*5, time.Millisecond*500).Should(BeNil())
By("update application by delete cross namespace workload")
Eventually(func() error {
app = new(v1beta1.Application)
Expect(k8sClient.Get(ctx, types.NamespacedName{Namespace: namespace, Name: appName}, app)).Should(BeNil())
app.Spec.Components = app.Spec.Components[:1] // delete a component
return k8sClient.Update(ctx, app)
}, time.Second*30, time.Microsecond*300).Should(BeNil())
}, time.Second*5, time.Millisecond*300).Should(BeNil())
Eventually(func() error {
app = new(v1beta1.Application)
if err := k8sClient.Get(ctx, types.NamespacedName{Namespace: namespace, Name: appName}, app); err != nil {
@@ -715,9 +685,8 @@ var _ = Describe("Test application cross namespace resource", func() {
if app.Status.Phase != common.ApplicationRunning {
return fmt.Errorf("application status not running")
}
err := k8sClient.Get(ctx, generateResourceTrackerKey(app.Namespace, app.Name), resourceTracker)
if err == nil {
return fmt.Errorf("resourceTracker still exist")
if err := k8sClient.Get(ctx, generateResourceTrackerKey(app.Namespace, app.Name, 2), resourceTracker); err != nil {
return err
}
sameOpts := []client.ListOption{
client.InNamespace(namespace),
@@ -737,18 +706,15 @@ var _ = Describe("Test application cross namespace resource", func() {
return fmt.Errorf("failed generate same namespace workload")
}
sameDeplpoy := same.Items[0]
if len(sameDeplpoy.OwnerReferences) != 1 || sameDeplpoy.OwnerReferences[0].Kind != v1alpha2.ApplicationContextKind {
if len(sameDeplpoy.OwnerReferences) != 1 || sameDeplpoy.OwnerReferences[0].Kind != v1beta1.ResourceTrackerKind {
return fmt.Errorf("same ns deploy have error ownerReference")
}
err = k8sClient.List(ctx, cross, crossOpts...)
if err != nil || len(cross.Items) != 0 {
return fmt.Errorf("error : cross namespace workload still exist")
}
if app.Status.ResourceTracker != nil {
return fmt.Errorf("error app status resourceTracker")
}
return nil
}, time.Second*60, time.Microsecond*300).Should(BeNil())
}, time.Second*5, time.Millisecond*300).Should(BeNil())
})
It("Update a cross namespace workload of application", func() {
@@ -769,7 +735,7 @@ var _ = Describe("Test application cross namespace resource", func() {
},
Spec: v1beta1.ApplicationSpec{
Components: []v1beta1.ApplicationComponent{
v1beta1.ApplicationComponent{
{
Name: componentName,
Type: "cross-worker",
Properties: runtime.RawExtension{Raw: []byte(`{"cmd":["sleep","1000"],"image":"busybox"}`)},
@@ -785,25 +751,18 @@ var _ = Describe("Test application cross namespace resource", func() {
if err := k8sClient.Get(ctx, types.NamespacedName{Namespace: namespace, Name: appName}, app); err != nil {
return fmt.Errorf("app not found %v", err)
}
if err := k8sClient.Get(ctx, generateResourceTrackerKey(app.Namespace, app.Name), resourceTracker); err != nil {
if err := k8sClient.Get(ctx, generateResourceTrackerKey(app.Namespace, app.Name, 1), resourceTracker); err != nil {
return err
}
if app.Status.Phase != common.ApplicationRunning {
return fmt.Errorf("application status is not running")
}
if app.Status.ResourceTracker == nil || app.Status.ResourceTracker.UID != resourceTracker.UID {
return fmt.Errorf("appication status error ")
}
return nil
}, time.Second*600, time.Microsecond*300).Should(BeNil())
}, time.Second*5, time.Millisecond*300).Should(BeNil())
By("check resource is generated correctly")
Expect(k8sClient.Get(ctx, types.NamespacedName{Namespace: namespace, Name: appName}, app)).Should(BeNil())
var workload appsv1.Deployment
Eventually(func() error {
appContext := &v1alpha2.ApplicationContext{}
if err := k8sClient.Get(ctx, types.NamespacedName{Namespace: namespace, Name: appName}, appContext); err != nil {
return fmt.Errorf("cannot generate AppContext %v", err)
}
component := &v1alpha2.Component{}
if err := k8sClient.Get(ctx, types.NamespacedName{Namespace: namespace, Name: componentName}, component); err != nil {
return fmt.Errorf("cannot generate component %v", err)
@@ -823,7 +782,7 @@ var _ = Describe("Test application cross namespace resource", func() {
return fmt.Errorf("error workload number %v", err)
}
workload = depolys.Items[0]
if err := k8sClient.Get(ctx, generateResourceTrackerKey(app.Namespace, app.Name), resourceTracker); err != nil {
if err := k8sClient.Get(ctx, generateResourceTrackerKey(app.Namespace, app.Name, 1), resourceTracker); err != nil {
return err
}
if len(workload.OwnerReferences) != 1 || workload.OwnerReferences[0].UID != resourceTracker.UID {
@@ -833,13 +792,10 @@ var _ = Describe("Test application cross namespace resource", func() {
return fmt.Errorf("container image not match")
}
if len(resourceTracker.Status.TrackedResources) != 1 {
return fmt.Errorf("resourceTracker status recode trackedResource length missmatch")
}
if resourceTracker.Status.TrackedResources[0].Name != workload.Name {
return fmt.Errorf("resourceTracker status recode trackedResource name mismatch recorded %s, actually %s", resourceTracker.Status.TrackedResources[0].Name, workload.Name)
return fmt.Errorf("expect track %q resources, but got %q", 1, len(resourceTracker.Status.TrackedResources))
}
return nil
}, time.Second*50, time.Microsecond*300).Should(BeNil())
}, time.Second*50, time.Millisecond*300).Should(BeNil())
By("update application and check resource status")
Eventually(func() error {
@@ -854,13 +810,9 @@ var _ = Describe("Test application cross namespace resource", func() {
return err
}
return nil
}, time.Second*60, time.Microsecond*300).Should(BeNil())
}, time.Second*5, time.Millisecond*300).Should(BeNil())
Eventually(func() error {
appContext := &v1alpha2.ApplicationContext{}
if err := k8sClient.Get(ctx, types.NamespacedName{Namespace: namespace, Name: appName}, appContext); err != nil {
return fmt.Errorf("cannot generate AppContext %v", err)
}
component := &v1alpha2.Component{}
if err := k8sClient.Get(ctx, types.NamespacedName{Namespace: namespace, Name: componentName}, component); err != nil {
return fmt.Errorf("cannot generate component %v", err)
@@ -868,6 +820,12 @@ var _ = Describe("Test application cross namespace resource", func() {
if component.ObjectMeta.Labels[oam.LabelAppName] != appName {
return fmt.Errorf("component error label ")
}
if err := k8sClient.Get(ctx, generateResourceTrackerKey(app.Namespace, app.Name, 2), resourceTracker); err != nil {
return err
}
if len(resourceTracker.Status.TrackedResources) != 1 {
return fmt.Errorf("expect track %q resources, but got %q", 1, len(resourceTracker.Status.TrackedResources))
}
depolys := new(appsv1.DeploymentList)
opts := []client.ListOption{
client.InNamespace(crossNamespace),
@@ -886,24 +844,14 @@ var _ = Describe("Test application cross namespace resource", func() {
if workload.Spec.Template.Spec.Containers[0].Image != "nginx" {
return fmt.Errorf("container image not match")
}
if err := k8sClient.Get(ctx, generateResourceTrackerKey(app.Namespace, app.Name), resourceTracker); err != nil {
return err
}
if len(resourceTracker.Status.TrackedResources) != 1 {
return fmt.Errorf("resourceTracker status recode trackedResource length missmatch")
}
if resourceTracker.Status.TrackedResources[0].Name != workload.Name {
return fmt.Errorf("resourceTracker status recode trackedResource name mismatch recorded %s, actually %s", resourceTracker.Status.TrackedResources[0].Name, workload.Name)
}
return nil
}, time.Second*60, time.Microsecond*1000).Should(BeNil())
}, time.Second*5, time.Millisecond*500).Should(BeNil())
By("deleting application will remove resourceTracker and related workload will be removed")
time.Sleep(3 * time.Second) // wait informer cache to be synced
Expect(k8sClient.Get(ctx, types.NamespacedName{Namespace: namespace, Name: appName}, app)).Should(BeNil())
Expect(k8sClient.Delete(ctx, app)).Should(BeNil())
Eventually(func() error {
err := k8sClient.Get(ctx, generateResourceTrackerKey(app.Namespace, app.Name), resourceTracker)
err := k8sClient.Get(ctx, generateResourceTrackerKey(app.Namespace, app.Name, 2), resourceTracker)
if err == nil {
return fmt.Errorf("resourceTracker still exist")
}
@@ -918,7 +866,7 @@ var _ = Describe("Test application cross namespace resource", func() {
return err
}
return nil
}, time.Second*30, time.Microsecond*300).Should(BeNil())
}, time.Second*5, time.Millisecond*500).Should(BeNil())
})
It("Test cross-namespace resource gc logic, delete a cross-ns component", func() {
@@ -943,12 +891,12 @@ var _ = Describe("Test application cross namespace resource", func() {
},
Spec: v1beta1.ApplicationSpec{
Components: []v1beta1.ApplicationComponent{
v1beta1.ApplicationComponent{
{
Name: component1Name,
Type: "cross-worker",
Properties: runtime.RawExtension{Raw: []byte(`{"cmd":["sleep","1000"],"image":"busybox"}`)},
},
v1beta1.ApplicationComponent{
{
Name: component2Name,
Type: "cross-worker",
Properties: runtime.RawExtension{Raw: []byte(`{"cmd":["sleep","1000"],"image":"busybox"}`)},
@@ -958,7 +906,6 @@ var _ = Describe("Test application cross namespace resource", func() {
}
Expect(k8sClient.Create(ctx, app)).Should(BeNil())
time.Sleep(3 * time.Second) // give informer cache to sync
resourceTracker := new(v1beta1.ResourceTracker)
By("create application will generate two workload, and generate resourceTracker")
@@ -970,7 +917,7 @@ var _ = Describe("Test application cross namespace resource", func() {
if app.Status.Phase != common.ApplicationRunning {
return fmt.Errorf("application status not running")
}
err := k8sClient.Get(ctx, generateResourceTrackerKey(app.Namespace, app.Name), resourceTracker)
err := k8sClient.Get(ctx, generateResourceTrackerKey(app.Namespace, app.Name, 1), resourceTracker)
if err != nil {
return fmt.Errorf("error to generate resourceTracker %v", err)
}
@@ -994,11 +941,8 @@ var _ = Describe("Test application cross namespace resource", func() {
if len(deploy2.OwnerReferences) != 1 || deploy2.OwnerReferences[0].UID != resourceTracker.UID {
return fmt.Errorf("deploy2 have error ownerReference")
}
if app.Status.ResourceTracker == nil || app.Status.ResourceTracker.UID != resourceTracker.UID {
return fmt.Errorf("app status resourceTracker error")
}
if len(resourceTracker.Status.TrackedResources) != 2 {
return fmt.Errorf("resourceTracker status recode trackedResource length missmatch")
return fmt.Errorf("expect track %q resources, but got %q", 2, len(resourceTracker.Status.TrackedResources))
}
if resourceTracker.Status.TrackedResources[0].Namespace != crossNamespace || resourceTracker.Status.TrackedResources[1].Namespace != crossNamespace {
return fmt.Errorf("resourceTracker recorde namespace mismatch")
@@ -1010,14 +954,14 @@ var _ = Describe("Test application cross namespace resource", func() {
return fmt.Errorf("resourceTracker status recode trackedResource name mismatch recorded %s, actually %s", resourceTracker.Status.TrackedResources[0].Name, deploy2.Name)
}
return nil
}, time.Second*60, time.Microsecond*300).Should(BeNil())
By("update application by delete a cross namespace workload, resource tracker will still exist, then check app status")
}, time.Second*5, time.Millisecond*300).Should(BeNil())
By("update application by delete a cross namespace workload")
Eventually(func() error {
app = new(v1beta1.Application)
Expect(k8sClient.Get(ctx, types.NamespacedName{Namespace: namespace, Name: appName}, app)).Should(BeNil())
app.Spec.Components = app.Spec.Components[:1] // delete a component
return k8sClient.Update(ctx, app)
}, time.Second*30, time.Microsecond*300).Should(BeNil())
}, time.Second*5, time.Millisecond*300).Should(BeNil())
Eventually(func() error {
app = new(v1beta1.Application)
if err := k8sClient.Get(ctx, types.NamespacedName{Namespace: namespace, Name: appName}, app); err != nil {
@@ -1026,7 +970,7 @@ var _ = Describe("Test application cross namespace resource", func() {
if app.Status.Phase != common.ApplicationRunning {
return fmt.Errorf("application status not running")
}
err := k8sClient.Get(ctx, generateResourceTrackerKey(app.Namespace, app.Name), resourceTracker)
err := k8sClient.Get(ctx, generateResourceTrackerKey(app.Namespace, app.Name, 2), resourceTracker)
if err != nil {
return fmt.Errorf("failed to get resourceTracker %v", err)
}
@@ -1046,28 +990,22 @@ var _ = Describe("Test application cross namespace resource", func() {
return fmt.Errorf("same ns deploy have error ownerReference")
}
checkRt := new(v1beta1.ResourceTracker)
err = k8sClient.Get(ctx, generateResourceTrackerKey(app.Namespace, app.Name), checkRt)
err = k8sClient.Get(ctx, generateResourceTrackerKey(app.Namespace, app.Name, 2), checkRt)
if err != nil {
return fmt.Errorf("error get resourceTracker")
}
if app.Status.ResourceTracker == nil {
return fmt.Errorf("app status resourceTracker error")
}
if app.Status.ResourceTracker.UID != checkRt.UID {
return fmt.Errorf("error app status resourceTracker UID")
}
if len(checkRt.Status.TrackedResources) != 1 {
return fmt.Errorf("error resourceTracker status trackedResource")
return fmt.Errorf("expect track %q resources, but got %q", 1, len(checkRt.Status.TrackedResources))
}
return nil
}, time.Second*80, time.Microsecond*300).Should(BeNil())
}, time.Second*5, time.Millisecond*500).Should(BeNil())
By("deleting application will remove resourceTracker and related resourceTracker will be removed")
By("deleting application will remove resourceTracker")
app = new(v1beta1.Application)
Expect(k8sClient.Get(ctx, types.NamespacedName{Namespace: namespace, Name: appName}, app)).Should(BeNil())
Expect(k8sClient.Delete(ctx, app)).Should(BeNil())
Eventually(func() error {
err := k8sClient.Get(ctx, generateResourceTrackerKey(app.Namespace, app.Name), resourceTracker)
err := k8sClient.Get(ctx, generateResourceTrackerKey(app.Namespace, app.Name, 2), resourceTracker)
if err == nil {
return fmt.Errorf("resourceTracker still exist")
}
@@ -1075,7 +1013,7 @@ var _ = Describe("Test application cross namespace resource", func() {
return err
}
return nil
}, time.Second*60, time.Microsecond*300).Should(BeNil())
}, time.Second*5, time.Millisecond*500).Should(BeNil())
})
It("Test cross-namespace resource gc logic, delete a cross-ns trait", func() {
@@ -1105,12 +1043,12 @@ var _ = Describe("Test application cross namespace resource", func() {
},
Spec: v1beta1.ApplicationSpec{
Components: []v1beta1.ApplicationComponent{
v1beta1.ApplicationComponent{
{
Name: componentName,
Type: "cross-worker",
Properties: runtime.RawExtension{Raw: []byte(`{"cmd":["sleep","1000"],"image":"busybox"}`)},
Traits: []v1beta1.ApplicationTrait{
v1beta1.ApplicationTrait{
{
Type: "cross-scaler",
Properties: runtime.RawExtension{Raw: []byte(`{"replicas": 0}`)},
},
@@ -1121,7 +1059,6 @@ var _ = Describe("Test application cross namespace resource", func() {
}
Expect(k8sClient.Create(ctx, app)).Should(BeNil())
time.Sleep(3 * time.Second) // give informer cache to sync
resourceTracker := new(v1beta1.ResourceTracker)
By("create app and check resource and app status")
Eventually(func() error {
@@ -1132,7 +1069,7 @@ var _ = Describe("Test application cross namespace resource", func() {
if app.Status.Phase != common.ApplicationRunning {
return fmt.Errorf("application status not running")
}
err := k8sClient.Get(ctx, generateResourceTrackerKey(app.Namespace, app.Name), resourceTracker)
err := k8sClient.Get(ctx, generateResourceTrackerKey(app.Namespace, app.Name, 1), resourceTracker)
if err != nil {
return fmt.Errorf("error to get resourceTracker %v", err)
}
@@ -1148,7 +1085,7 @@ var _ = Describe("Test application cross namespace resource", func() {
return fmt.Errorf("failed generate cross namespace trait")
}
if len(resourceTracker.Status.TrackedResources) != 2 {
return fmt.Errorf("resourceTracker status recode trackedResource length missmatch")
return fmt.Errorf("expect track %q resources, but got %q", 2, len(resourceTracker.Status.TrackedResources))
}
trait := mts.Items[0]
if len(trait.OwnerReferences) != 1 || trait.OwnerReferences[0].UID != resourceTracker.UID {
@@ -1172,7 +1109,7 @@ var _ = Describe("Test application cross namespace resource", func() {
}
}
return nil
}, time.Second*60, time.Microsecond*300).Should(BeNil())
}, time.Second*5, time.Millisecond*500).Should(BeNil())
By("update application trait by delete cross ns trait, resourceTracker will still exist")
Eventually(func() error {
@@ -1180,7 +1117,7 @@ var _ = Describe("Test application cross namespace resource", func() {
Expect(k8sClient.Get(ctx, types.NamespacedName{Namespace: namespace, Name: appName}, app)).Should(BeNil())
app.Spec.Components[0].Traits = []v1beta1.ApplicationTrait{}
return k8sClient.Update(ctx, app)
}, time.Second*30, time.Microsecond*300).Should(BeNil())
}, time.Second*5, time.Millisecond*300).Should(BeNil())
Eventually(func() error {
app = new(v1beta1.Application)
@@ -1190,7 +1127,7 @@ var _ = Describe("Test application cross namespace resource", func() {
if app.Status.Phase != common.ApplicationRunning {
return fmt.Errorf("application status not running")
}
err := k8sClient.Get(ctx, generateResourceTrackerKey(app.Namespace, app.Name), resourceTracker)
err := k8sClient.Get(ctx, generateResourceTrackerKey(app.Namespace, app.Name, 2), resourceTracker)
if err != nil {
return fmt.Errorf("error to get resourceTracker %v", err)
}
@@ -1206,7 +1143,7 @@ var _ = Describe("Test application cross namespace resource", func() {
return fmt.Errorf("cross namespace trait still exist")
}
if len(resourceTracker.Status.TrackedResources) != 1 {
return fmt.Errorf("resourceTracker status recode trackedResource length missmatch")
return fmt.Errorf("expect track %d resources, but got %d", 1, len(resourceTracker.Status.TrackedResources))
}
deploys := new(appsv1.DeploymentList)
err = k8sClient.List(ctx, deploys, opts...)
@@ -1221,13 +1158,13 @@ var _ = Describe("Test application cross namespace resource", func() {
return fmt.Errorf("error to record deploy name in app status")
}
return nil
}, time.Second*60, time.Microsecond*300).Should(BeNil())
}, time.Second*5, time.Millisecond*500).Should(BeNil())
By("deleting application will remove resourceTracker and related resourceTracker will be removed")
app = new(v1beta1.Application)
Expect(k8sClient.Get(ctx, types.NamespacedName{Namespace: namespace, Name: appName}, app)).Should(BeNil())
Expect(k8sClient.Delete(ctx, app)).Should(BeNil())
Eventually(func() error {
err := k8sClient.Get(ctx, generateResourceTrackerKey(app.Namespace, app.Name), resourceTracker)
err := k8sClient.Get(ctx, generateResourceTrackerKey(app.Namespace, app.Name, 2), resourceTracker)
if err == nil {
return fmt.Errorf("resourceTracker still exist")
}
@@ -1235,7 +1172,7 @@ var _ = Describe("Test application cross namespace resource", func() {
return err
}
return nil
}, time.Second*60, time.Microsecond*300).Should(BeNil())
}, time.Second*5, time.Millisecond*500).Should(BeNil())
})
It("Test cross-namespace resource gc logic, update a cross-ns workload's namespace", func() {
@@ -1262,7 +1199,7 @@ var _ = Describe("Test application cross namespace resource", func() {
},
Spec: v1beta1.ApplicationSpec{
Components: []v1beta1.ApplicationComponent{
v1beta1.ApplicationComponent{
{
Name: componentName,
Type: "cross-worker",
Properties: runtime.RawExtension{Raw: []byte(`{"cmd":["sleep","1000"],"image":"busybox"}`)},
@@ -1278,23 +1215,20 @@ var _ = Describe("Test application cross namespace resource", func() {
if err := k8sClient.Get(ctx, types.NamespacedName{Namespace: namespace, Name: appName}, app); err != nil {
return fmt.Errorf("app not found %v", err)
}
if err := k8sClient.Get(ctx, generateResourceTrackerKey(app.Namespace, app.Name), resourceTracker); err != nil {
if err := k8sClient.Get(ctx, generateResourceTrackerKey(app.Namespace, app.Name, 1), resourceTracker); err != nil {
return err
}
if app.Status.Phase != common.ApplicationRunning {
return fmt.Errorf("application status is not running")
}
if app.Status.ResourceTracker == nil || app.Status.ResourceTracker.UID != resourceTracker.UID {
return fmt.Errorf("appication status error ")
}
return nil
}, time.Second*60, time.Microsecond*300).Should(BeNil())
}, time.Second*5, time.Millisecond*500).Should(BeNil())
By("check resource is generated correctly")
Expect(k8sClient.Get(ctx, types.NamespacedName{Namespace: namespace, Name: appName}, app)).Should(BeNil())
var workload appsv1.Deployment
Eventually(func() error {
checkRt := new(v1beta1.ResourceTracker)
if err := k8sClient.Get(ctx, generateResourceTrackerKey(app.Namespace, app.Name), checkRt); err != nil {
if err := k8sClient.Get(ctx, generateResourceTrackerKey(app.Namespace, app.Name, 1), checkRt); err != nil {
return err
}
depolys := new(appsv1.DeploymentList)
@@ -1319,10 +1253,9 @@ var _ = Describe("Test application cross namespace resource", func() {
return fmt.Errorf("resourceTracker status recode trackedResource name mismatch recorded %s, actually %s", checkRt.Status.TrackedResources[0].Name, workload.Name)
}
return nil
}, time.Second*60, time.Microsecond*300).Should(BeNil())
}, time.Second*5, time.Millisecond*500).Should(BeNil())
By("update application modify workload namespace will remove resourceTracker and related old workload will be removed")
time.Sleep(3 * time.Second) // wait informer cache to be synced
By("update application modify workload namespace")
Eventually(func() error {
app = new(v1beta1.Application)
err := k8sClient.Get(ctx, types.NamespacedName{Namespace: namespace, Name: appName}, app)
@@ -1335,16 +1268,12 @@ var _ = Describe("Test application cross namespace resource", func() {
return err
}
return nil
}, time.Second*30, time.Microsecond).Should(BeNil())
}, time.Second*5, time.Millisecond*500).Should(BeNil())
Eventually(func() error {
err := k8sClient.Get(ctx, generateResourceTrackerKey(app.Namespace, app.Name), resourceTracker)
if err == nil {
return fmt.Errorf("resourceTracker still exist")
}
if !apierrors.IsNotFound(err) {
if err := k8sClient.Get(ctx, generateResourceTrackerKey(app.Namespace, app.Name, 2), resourceTracker); err != nil {
return err
}
err = k8sClient.Get(ctx, types.NamespacedName{Namespace: crossNamespace, Name: workload.GetName()}, &workload)
err := k8sClient.Get(ctx, types.NamespacedName{Namespace: crossNamespace, Name: workload.GetName()}, &workload)
if err == nil {
return fmt.Errorf("wrokload still exist")
}
@@ -1357,12 +1286,12 @@ var _ = Describe("Test application cross namespace resource", func() {
return fmt.Errorf("generate same namespace workload error")
}
return nil
}, time.Second*60, time.Microsecond*300).Should(BeNil())
}, time.Second*5, time.Millisecond*500).Should(BeNil())
})
})
func generateResourceTrackerKey(namespace string, name string) types.NamespacedName {
return types.NamespacedName{Name: fmt.Sprintf("%s-%s", namespace, name)}
func generateResourceTrackerKey(namespace string, appName string, revision int) types.NamespacedName {
return types.NamespacedName{Name: fmt.Sprintf("%s-v%d-%s", appName, revision, namespace)}
}
const (

View File

@@ -31,9 +31,9 @@ import (
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/intstr"
"k8s.io/utils/pointer"
"sigs.k8s.io/controller-runtime/pkg/client"
"github.com/oam-dev/kubevela/apis/core.oam.dev/v1alpha2"
"github.com/oam-dev/kubevela/apis/core.oam.dev/v1beta1"
"github.com/oam-dev/kubevela/apis/standard.oam.dev/v1alpha1"
"github.com/oam-dev/kubevela/pkg/oam"
@@ -46,12 +46,12 @@ var (
var _ = Describe("Test application controller clean up appRevision", func() {
ctx := context.TODO()
namespace := "clean-up-revision"
var namespace string
cd := &v1beta1.ComponentDefinition{}
cdDefJson, _ := yaml.YAMLToJSON([]byte(compDefYaml))
BeforeEach(func() {
namespace = randomNamespaceName("clean-up-revision-test")
ns := v1.Namespace{
ObjectMeta: metav1.ObjectMeta{
Name: namespace,
@@ -59,6 +59,7 @@ var _ = Describe("Test application controller clean up appRevision", func() {
}
Expect(k8sClient.Create(ctx, &ns)).Should(SatisfyAny(BeNil(), &util.AlreadyExistMatcher{}))
cdDefJson, _ := yaml.YAMLToJSON([]byte(fmt.Sprintf(compDefYaml, namespace)))
Expect(json.Unmarshal(cdDefJson, cd)).Should(BeNil())
Expect(k8sClient.Create(ctx, cd.DeepCopy())).Should(SatisfyAny(BeNil(), &util.AlreadyExistMatcher{}))
})
@@ -71,15 +72,6 @@ var _ = Describe("Test application controller clean up appRevision", func() {
k8sClient.DeleteAllOf(ctx, &v1beta1.TraitDefinition{}, client.InNamespace(namespace))
Expect(k8sClient.Delete(ctx, &v1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: namespace}}, client.PropagationPolicy(metav1.DeletePropagationForeground))).Should(Succeed())
// guarantee namespace have been deleted
Eventually(func() error {
ns := new(v1.Namespace)
err := k8sClient.Get(ctx, types.NamespacedName{Name: namespace}, ns)
if err == nil {
return fmt.Errorf("namespace still exist")
}
return nil
}, time.Second*60, time.Microsecond*300).Should(BeNil())
})
It("Test clean up appRevision", func() {
@@ -96,7 +88,7 @@ var _ = Describe("Test application controller clean up appRevision", func() {
return fmt.Errorf("application point to wrong revision")
}
return nil
}, time.Second*30, time.Microsecond*300).Should(BeNil())
}, time.Second*10, time.Millisecond*500).Should(BeNil())
Eventually(func() error {
checkApp = new(v1beta1.Application)
Expect(k8sClient.Get(ctx, appKey, checkApp)).Should(BeNil())
@@ -106,10 +98,8 @@ var _ = Describe("Test application controller clean up appRevision", func() {
return err
}
return nil
}, time.Second*30, time.Microsecond*300).Should(BeNil())
}, time.Second*10, time.Millisecond*500).Should(BeNil())
}
appContext := new(v1alpha2.ApplicationContext)
Expect(k8sClient.Get(ctx, appKey, appContext)).Should(BeNil())
listOpts := []client.ListOption{
client.InNamespace(namespace),
client.MatchingLabels{
@@ -126,13 +116,7 @@ var _ = Describe("Test application controller clean up appRevision", func() {
return fmt.Errorf("error appRevison number wants %d, actually %d", appRevisionLimit+1, len(appRevisionList.Items))
}
return nil
}, time.Second*30, time.Microsecond*300).Should(BeNil())
Eventually(func() error {
if err := k8sClient.Get(ctx, appKey, appContext); err != nil {
return err
}
return nil
}, time.Second*30, time.Microsecond*300).Should(BeNil())
}, time.Second*10, time.Millisecond*500).Should(BeNil())
By("create new appRevision will remove appRevison1")
Expect(k8sClient.Get(ctx, appKey, checkApp)).Should(BeNil())
property := fmt.Sprintf(`{"cmd":["sleep","1000"],"image":"busybox:%d"}`, 6)
@@ -156,7 +140,7 @@ var _ = Describe("Test application controller clean up appRevision", func() {
return fmt.Errorf("appRevision collection mismatch")
}
return nil
}, time.Second*30, time.Microsecond*300).Should(BeNil())
}, time.Second*10, time.Millisecond*500).Should(BeNil())
By("update app again will gc appRevision2")
Eventually(func() error {
@@ -187,7 +171,7 @@ var _ = Describe("Test application controller clean up appRevision", func() {
return fmt.Errorf("appRevision collection mismatch")
}
return nil
}, time.Second*30, time.Microsecond*300).Should(BeNil())
}, time.Second*10, time.Millisecond*500).Should(BeNil())
})
It("Test clean up rollout appRevision", func() {
@@ -215,10 +199,8 @@ var _ = Describe("Test application controller clean up appRevision", func() {
return err
}
return nil
}, time.Second*30, time.Microsecond*300).Should(BeNil())
}, time.Second*10, time.Millisecond*500).Should(BeNil())
}
appContext := new(v1alpha2.ApplicationContext)
Expect(k8sClient.Get(ctx, appKey, appContext)).Should(util.NotFoundMatcher{})
listOpts := []client.ListOption{
client.InNamespace(namespace),
client.MatchingLabels{
@@ -260,7 +242,7 @@ var _ = Describe("Test application controller clean up appRevision", func() {
return fmt.Errorf("appRevision collection mismatch")
}
return nil
}, time.Second*30, time.Microsecond*300).Should(BeNil())
}, time.Second*10, time.Millisecond*500).Should(BeNil())
By("update app again will gc appRevision2")
Expect(k8sClient.Get(ctx, appKey, checkApp)).Should(BeNil())
@@ -284,7 +266,7 @@ var _ = Describe("Test application controller clean up appRevision", func() {
return fmt.Errorf("appRevision collection mismatch")
}
return nil
}, time.Second*30, time.Microsecond*300).Should(BeNil())
}, time.Second*10, time.Millisecond*500).Should(BeNil())
appRollout := &v1beta1.AppRollout{
TypeMeta: metav1.TypeMeta{
APIVersion: v1beta1.AppRolloutKindAPIVersion,
@@ -298,9 +280,10 @@ var _ = Describe("Test application controller clean up appRevision", func() {
TargetAppRevisionName: appName + "-v3",
ComponentList: []string{"comp1"},
RolloutPlan: v1alpha1.RolloutPlan{
TargetSize: pointer.Int32Ptr(2),
RolloutBatches: []v1alpha1.RolloutBatch{
{
Replicas: intstr.FromInt(1),
Replicas: intstr.FromInt(2),
},
},
},
@@ -318,7 +301,7 @@ var _ = Describe("Test application controller clean up appRevision", func() {
return fmt.Errorf("application point to wrong revision")
}
return nil
}, time.Second*30, time.Microsecond*300).Should(BeNil())
}, time.Second*10, time.Millisecond*500).Should(BeNil())
Eventually(func() error {
if err := k8sClient.Get(ctx, appKey, checkApp); err != nil {
return err
@@ -354,7 +337,7 @@ var _ = Describe("Test application controller clean up appRevision", func() {
return fmt.Errorf("appRevision collection mismatch")
}
return nil
}, time.Second*30, time.Microsecond*300).Should(BeNil())
}, time.Second*60, time.Microsecond*300).Should(BeNil())
})
})
@@ -364,7 +347,7 @@ apiVersion: core.oam.dev/v1beta1
kind: ComponentDefinition
metadata:
name: normal-worker
namespace: clean-up-revision
namespace: %s
annotations:
definition.oam.dev/description: "Long-running scalable backend worker without network endpoint"
spec:

View File

@@ -1,349 +0,0 @@
/*
Copyright 2021 The KubeVela Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package controllers_test
import (
"context"
"fmt"
"time"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/types"
"sigs.k8s.io/controller-runtime/pkg/client"
"github.com/oam-dev/kubevela/apis/core.oam.dev/v1alpha2"
"github.com/oam-dev/kubevela/pkg/controller/core.oam.dev/v1alpha2/application"
"github.com/oam-dev/kubevela/pkg/oam/util"
)
var _ = Describe("Test applicationContext reconcile", func() {
ctx := context.Background()
var (
namespace = "appcontext-test-ns"
acName1 = "applicationconfig1"
acName2 = "applicationconfig2"
compName1 = "component1"
compName2 = "component2"
containerName = "test-container"
containerImage = "notarealimage"
cwName1 = "appcontext-test-cw1"
cwName2 = "appcontext-test-cw2"
arName1 = "ar1"
arName2 = "ar2"
appContextName = "appcontext1"
traitName1 = "trait1"
traitName2 = "trait2"
key = types.NamespacedName{Namespace: namespace, Name: appContextName}
)
var ns = corev1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: namespace}}
workload1 := cw(
cwWithName(cwName1),
cwWithContainers([]v1alpha2.Container{
{
Name: containerName,
Image: containerImage,
Command: []string{"sleep", "30s"},
Ports: []v1alpha2.ContainerPort{
v1alpha2.ContainerPort{
Name: "http",
Port: 80,
},
},
},
}),
)
rawWorkload1 := runtime.RawExtension{Object: workload1}
co1 := comp(
compWithName(compName1),
compWithNamespace(namespace),
compWithWorkload(rawWorkload1),
)
ac1 := ac(
acWithName(acName1),
acWithNamspace(namespace),
acWithComps([]v1alpha2.ApplicationConfigurationComponent{
{
ComponentName: compName1,
Traits: []v1alpha2.ComponentTrait{},
},
}),
)
workload2 := cw(
cwWithName(cwName2),
cwWithContainers([]v1alpha2.Container{
{
Name: containerName,
Image: containerImage,
Command: []string{"sleep", "30s"},
Ports: []v1alpha2.ContainerPort{
v1alpha2.ContainerPort{
Name: "http",
Port: 80,
},
},
},
}),
)
rawWorkload2 := runtime.RawExtension{Object: workload2}
co2 := comp(
compWithName(compName2),
compWithNamespace(namespace),
compWithWorkload(rawWorkload2),
)
dummyApp := &v1alpha2.Application{
ObjectMeta: metav1.ObjectMeta{
Name: "dummy",
Namespace: namespace,
},
Spec: v1alpha2.ApplicationSpec{
Components: []v1alpha2.ApplicationComponent{},
},
}
ar1 := &v1alpha2.ApplicationRevision{
ObjectMeta: metav1.ObjectMeta{
Name: arName1,
Namespace: namespace,
},
Spec: v1alpha2.ApplicationRevisionSpec{
Components: application.ConvertComponents2RawRevisions([]*v1alpha2.Component{co1}),
ApplicationConfiguration: util.Object2RawExtension(ac1),
Application: *dummyApp,
}}
trait2 := &v1alpha2.ManualScalerTrait{
TypeMeta: metav1.TypeMeta{
Kind: v1alpha2.ManualScalerTraitKind,
APIVersion: v1alpha2.SchemeGroupVersion.String(),
},
ObjectMeta: metav1.ObjectMeta{
Name: traitName2,
Namespace: namespace,
},
Spec: v1alpha2.ManualScalerTraitSpec{
ReplicaCount: 3,
},
}
rawTrait2 := runtime.RawExtension{Object: trait2}
ac2 := ac(
acWithName(acName2),
acWithNamspace(namespace),
acWithComps([]v1alpha2.ApplicationConfigurationComponent{
{
ComponentName: compName2,
Traits: []v1alpha2.ComponentTrait{
v1alpha2.ComponentTrait{
Trait: rawTrait2,
},
},
},
}),
)
ar2 := &v1alpha2.ApplicationRevision{
ObjectMeta: metav1.ObjectMeta{
Name: arName2,
Namespace: namespace,
},
Spec: v1alpha2.ApplicationRevisionSpec{
ApplicationConfiguration: util.Object2RawExtension(ac2),
Components: application.ConvertComponents2RawRevisions([]*v1alpha2.Component{co2}),
Application: *dummyApp,
}}
appContext := &v1alpha2.ApplicationContext{
ObjectMeta: metav1.ObjectMeta{
Name: appContextName,
Namespace: namespace,
},
Spec: v1alpha2.ApplicationContextSpec{
ApplicationRevisionName: arName1,
},
}
BeforeEach(func() {
Expect(k8sClient.Create(ctx, &ns)).Should(SatisfyAny(BeNil()))
Eventually(
func() error {
return k8sClient.Get(ctx, types.NamespacedName{Name: namespace}, &ns)
},
time.Second*3, time.Millisecond*300).Should(BeNil())
Expect(k8sClient.Create(ctx, co1)).Should(Succeed())
Expect(k8sClient.Create(ctx, co2)).Should(Succeed())
Expect(k8sClient.Create(ctx, ar1)).Should(Succeed())
Expect(k8sClient.Create(ctx, ar2)).Should(Succeed())
})
AfterEach(func() {
By("Clean up resources after a test")
Expect(k8sClient.Delete(ctx, &ns, client.PropagationPolicy(metav1.DeletePropagationForeground))).Should(Succeed())
time.Sleep(15 * time.Second)
})
It("Test appContext reconcile logic ", func() {
By("Test AppRevision1 only have 1 workload on trait")
Expect(k8sClient.Create(ctx, appContext)).Should(Succeed())
updateTime := time.Now()
Eventually(func() error {
appCtx := new(v1alpha2.ApplicationContext)
err := k8sClient.Get(ctx, key, appCtx)
if err != nil {
return err
}
now := time.Now()
if now.Sub(updateTime) > 4*time.Second {
requestReconcileNow(ctx, appCtx)
updateTime = now
}
if len(appCtx.Status.Workloads) != 1 {
return fmt.Errorf("appContext status error:the number of workloads not right")
}
if appCtx.Status.Workloads[0].ComponentName != compName1 {
return fmt.Errorf("appContext status error:the name of component not right, expect %s", compName1)
}
cw := new(v1alpha2.ContainerizedWorkload)
return k8sClient.Get(ctx, types.NamespacedName{Namespace: namespace, Name: cwName1}, cw)
}, time.Second*60, time.Millisecond*300).Should(BeNil())
By("Test revision have both workload and trait , switch AppContext to revision2")
Eventually(func() error {
updateContext := new(v1alpha2.ApplicationContext)
err := k8sClient.Get(ctx, key, updateContext)
if err != nil {
return err
}
updateContext.Spec.ApplicationRevisionName = arName2
err = k8sClient.Update(ctx, updateContext)
if err != nil {
return err
}
return nil
}, time.Second*60, time.Microsecond*300).Should(BeNil())
updateTime = time.Now()
Eventually(func() error {
appCtx := new(v1alpha2.ApplicationContext)
err := k8sClient.Get(ctx, types.NamespacedName{Namespace: namespace, Name: appContextName}, appCtx)
if err != nil {
return err
}
now := time.Now()
if now.Sub(updateTime) > 4*time.Second {
requestReconcileNow(ctx, appCtx)
updateTime = now
}
if len(appCtx.Status.Workloads) != 1 {
return fmt.Errorf("appContext status error:the number of workloads not right")
}
if appCtx.Status.Workloads[0].ComponentName != compName2 {
return fmt.Errorf("appContext status error:the name of component not right, expect %s", compName2)
}
cw := new(v1alpha2.ContainerizedWorkload)
err = k8sClient.Get(ctx, types.NamespacedName{Namespace: namespace, Name: cwName2}, cw)
if err != nil {
return fmt.Errorf("cannot get workload 2 %v", err)
}
if len(appCtx.Status.Workloads[0].Traits) != 1 {
return fmt.Errorf("appContext status error:the number of traits status not right")
}
mt := new(v1alpha2.ManualScalerTrait)
err = k8sClient.Get(ctx, types.NamespacedName{Namespace: namespace, Name: traitName2}, mt)
if err != nil {
return fmt.Errorf("cannot get component trait %v", err)
}
return nil
}, time.Second*60, time.Millisecond*300).Should(BeNil())
By("Test add trait in AppRevision1, and switch context to AppRevision1")
trait1 := &v1alpha2.ManualScalerTrait{
TypeMeta: metav1.TypeMeta{
Kind: v1alpha2.ManualScalerTraitKind,
APIVersion: v1alpha2.SchemeGroupVersion.String(),
},
ObjectMeta: metav1.ObjectMeta{
Name: traitName1,
Namespace: namespace,
},
Spec: v1alpha2.ManualScalerTraitSpec{
ReplicaCount: 2,
},
}
rawTrait1 := runtime.RawExtension{Object: trait1}
ac1.Spec.Components[0].Traits = []v1alpha2.ComponentTrait{
v1alpha2.ComponentTrait{
Trait: rawTrait1,
},
}
Expect(k8sClient.Get(ctx, types.NamespacedName{Namespace: namespace, Name: arName1}, ar1)).Should(BeNil())
ar1.Spec.ApplicationConfiguration = util.Object2RawExtension(ac1)
Expect(k8sClient.Update(ctx, ar1)).Should(Succeed())
Eventually(func() error {
updateContext := new(v1alpha2.ApplicationContext)
err := k8sClient.Get(ctx, key, updateContext)
if err != nil {
return err
}
updateContext.Spec.ApplicationRevisionName = arName1
err = k8sClient.Update(ctx, updateContext)
if err != nil {
return err
}
return nil
}, time.Second*60, time.Microsecond*300).Should(BeNil())
Eventually(func() error {
mt := new(v1alpha2.ManualScalerTrait)
err := k8sClient.Get(ctx, types.NamespacedName{Namespace: namespace, Name: traitName1}, mt)
if err != nil {
return err
}
if mt.Spec.ReplicaCount != 2 {
return fmt.Errorf("repica number missmatch , actual: %d", mt.Spec.ReplicaCount)
}
return nil
}, time.Second*60, time.Millisecond*300).Should(BeNil())
ac1.Spec.Components[0].Traits = []v1alpha2.ComponentTrait{}
By("Test delete trait in AppRevision2, and switch context to AppRevision2")
Expect(k8sClient.Get(ctx, types.NamespacedName{Namespace: namespace, Name: arName2}, ar2)).Should(BeNil())
ac2.Spec.Components[0].Traits = []v1alpha2.ComponentTrait{}
ar1.Spec.ApplicationConfiguration = util.Object2RawExtension(ac2)
Expect(k8sClient.Update(ctx, ar2)).Should(BeNil())
Eventually(func() error {
updateContext := new(v1alpha2.ApplicationContext)
err := k8sClient.Get(ctx, key, updateContext)
if err != nil {
return err
}
updateContext.Spec.ApplicationRevisionName = arName2
err = k8sClient.Update(ctx, updateContext)
if err != nil {
return err
}
return nil
}, time.Second*60, time.Microsecond*300).Should(BeNil())
Eventually(func() error {
mt := new(v1alpha2.ManualScalerTrait)
return k8sClient.Get(ctx, types.NamespacedName{Namespace: namespace, Name: traitName2}, mt)
}, time.Second*60, time.Millisecond*300).Should(util.NotFoundMatcher{})
})
})

View File

@@ -0,0 +1,368 @@
/*
Copyright 2021 The KubeVela Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package controllers_test
import (
"context"
"fmt"
"time"
"github.com/crossplane/crossplane-runtime/apis/core/v1alpha1"
"github.com/crossplane/crossplane-runtime/pkg/meta"
"github.com/ghodss/yaml"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/kind/pkg/errors"
appsv1 "k8s.io/api/apps/v1"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/utils/pointer"
"github.com/oam-dev/kubevela/apis/core.oam.dev/v1alpha2"
"github.com/oam-dev/kubevela/apis/core.oam.dev/v1beta1"
"github.com/oam-dev/kubevela/pkg/oam/util"
"github.com/oam-dev/kubevela/pkg/utils/apply"
)
// For legacy clusters that use appContext to create/update resources, we should guarantee backward compatibility while we
// deprecate appContext and replace it with assemble/dispatch modules.
// This test is to simulate a scenario where a cluster has an application whose resources are already created and owned
// by appContext and resource tracker(for cx-ns), and once we create a new application whose resources are same as
// existing ones, existing resources's ctrl-owner should be changed from appContext to resource tracker.
var _ = Describe("Test compatibility for deprecation of appContext", func() {
ctx := context.Background()
var namespaceName string
var ns corev1.Namespace
BeforeEach(func() {
namespaceName = randomNamespaceName("deprecation-appctx-test")
ns = corev1.Namespace{}
ns.SetName(namespaceName)
Expect(k8sClient.Create(ctx, &ns)).Should(Succeed())
_, err := createFromYAML(ctx, pvTraitDefinition, namespaceName, nil)
Expect(err).Should(BeNil())
})
AfterEach(func() {
Expect(k8sClient.Delete(ctx, &ns)).Should(Succeed())
Expect(k8sClient.DeleteAllOf(ctx, &v1beta1.ResourceTracker{})).Should(Succeed())
Expect(k8sClient.DeleteAllOf(ctx, &corev1.PersistentVolume{})).Should(Succeed())
})
It("Test application can update its resources' owners", func() {
var err error
var appCtxKey, rtKey *client.ObjectKey
By("Mock existing owners in a legacy cluster")
appCtxKey, err = createFromYAML(ctx, legacyAppCtx, namespaceName, nil)
Expect(err).Should(BeNil())
rtKey, err = createFromYAML(ctx, legacyResourceTracker, namespaceName, nil)
Expect(err).Should(BeNil())
By("Mock owner references: appCtx owns Deployment and Service")
appCtx := &v1alpha2.ApplicationContext{}
Expect(k8sClient.Get(ctx, *appCtxKey, appCtx)).Should(Succeed())
appCtxOwnRef := meta.AsController(&v1alpha1.TypedReference{
APIVersion: "core.oam.dev/v1alpha2",
Kind: "ApplicationContext",
Name: appCtx.GetName(),
UID: appCtx.GetUID(),
})
By("Mock owner references: rscTracker owns PersistentVolume")
rt := &v1beta1.ResourceTracker{}
Expect(k8sClient.Get(ctx, *rtKey, rt)).Should(Succeed())
rtOwnerRef := meta.AsController(&v1alpha1.TypedReference{
APIVersion: "core.oam.dev/v1beta1",
Kind: "ResourceTracker",
Name: rt.GetName(),
UID: rt.GetUID(),
})
var deployKey, svcKey, pvKey *client.ObjectKey
By("Mock existing resources in a legacy cluster")
deployKey, err = createFromYAML(ctx, legacyDeploy, namespaceName, &appCtxOwnRef)
Expect(err).Should(BeNil())
svcKey, err = createFromYAML(ctx, legacyService, namespaceName, &appCtxOwnRef)
Expect(err).Should(BeNil())
pvKey, err = createFromYAML(ctx, legacyPersistentVolume, namespaceName, &rtOwnerRef)
Expect(err).Should(BeNil())
By("Create an application")
_, err = createFromYAML(ctx, newApplication, namespaceName, nil)
Expect(err).Should(BeNil())
By("Wait for new resource tracker is created")
Eventually(func() error {
rt = &v1beta1.ResourceTracker{}
if err := k8sClient.Get(ctx, client.ObjectKey{Name: "myapp-v1-" + namespaceName}, rt); err != nil {
return errors.Wrap(err, "cannot get new resource tracker")
}
return nil
}, 10*time.Second, 500*time.Millisecond).Should(Succeed())
wantNewOwner := metav1.OwnerReference{
APIVersion: "core.oam.dev/v1beta1",
Kind: "ResourceTracker",
Name: rt.GetName(),
UID: rt.GetUID(),
Controller: pointer.BoolPtr(true),
BlockOwnerDeletion: pointer.BoolPtr(true),
}
By("Verify existing resources' new owners")
deploy := &appsv1.Deployment{}
Expect(k8sClient.Get(ctx, *deployKey, deploy)).Should(Succeed())
newDeployOwner := metav1.GetControllerOf(deploy)
Expect(newDeployOwner).ShouldNot(BeNil())
Expect(*newDeployOwner).Should(BeEquivalentTo(wantNewOwner))
svc := &corev1.Service{}
Expect(k8sClient.Get(ctx, *svcKey, svc)).Should(Succeed())
newSvcOwner := metav1.GetControllerOf(svc)
Expect(newSvcOwner).ShouldNot(BeNil())
Expect(*newSvcOwner).Should(Equal(wantNewOwner))
pv := &corev1.PersistentVolume{}
Expect(k8sClient.Get(ctx, *pvKey, pv)).Should(Succeed())
newPVOwner := metav1.GetControllerOf(svc)
Expect(newPVOwner).ShouldNot(BeNil())
Expect(*newPVOwner).Should(Equal(wantNewOwner))
By("Delete the application")
app := &v1beta1.Application{}
app.SetName("myapp")
app.SetNamespace(namespaceName)
Expect(k8sClient.Delete(ctx, app)).Should(Succeed())
By("Verify all resources can be deleted")
Eventually(func() error {
return k8sClient.Get(ctx, client.ObjectKey{Name: "myapp-v1-" + namespaceName}, rt)
}, 10*time.Second, 500*time.Millisecond).Should(util.NotFoundMatcher{})
Eventually(func() error {
return k8sClient.Get(ctx, *deployKey, deploy)
}, 30*time.Second, 500*time.Millisecond).Should(util.NotFoundMatcher{})
Eventually(func() error {
return k8sClient.Get(ctx, *svcKey, svc)
}, 30*time.Second, 500*time.Millisecond).Should(util.NotFoundMatcher{})
Eventually(func() error {
return k8sClient.Get(ctx, *pvKey, pv)
}, 30*time.Second, 500*time.Millisecond).Should(util.NotFoundMatcher{})
})
It("Test delete an application with a legacy finalizer", func() {
var err error
var rtKey *client.ObjectKey
// simulate a resource tracker created by a legacy application
rtKey, err = createFromYAML(ctx, legacyResourceTracker, namespaceName, nil)
Expect(err).Should(BeNil())
By("Create the application")
app := &v1beta1.Application{
ObjectMeta: metav1.ObjectMeta{
Name: "myapp",
Namespace: namespaceName,
},
Spec: v1beta1.ApplicationSpec{
Components: []v1beta1.ApplicationComponent{{
Name: "mycomp",
Type: "worker",
Properties: runtime.RawExtension{Raw: []byte(`{"cmd":["sleep","1000"],"image":"busybox"}`)},
}},
},
}
app.SetFinalizers([]string{
// this finalizer only apperars in a legacy application
// this case is to test whether a legacy application with this finalizer can be deleted
"resourceTracker.finalizer.core.oam.dev",
})
Expect(k8sClient.Create(ctx, app.DeepCopy())).Should(Succeed())
By("Delete the application")
Expect(k8sClient.Delete(ctx, app)).Should(Succeed())
By("Verify legacy resource tracker is deleted")
Eventually(func() error {
return k8sClient.Get(ctx, client.ObjectKey{Name: rtKey.Name}, &v1beta1.ResourceTracker{})
}, 10*time.Second, 500*time.Millisecond).Should(util.NotFoundMatcher{})
})
})
var createFromYAML = func(ctx context.Context, objYAML string, ns string, owner *metav1.OwnerReference) (*client.ObjectKey, error) {
u := &unstructured.Unstructured{}
if err := yaml.Unmarshal([]byte(fmt.Sprintf(objYAML, ns)), u); err != nil {
return nil, err
}
if owner != nil {
u.SetOwnerReferences([]metav1.OwnerReference{*owner})
}
objKey := client.ObjectKey{
Name: u.GetName(),
Namespace: ns,
}
// use apply.Applicator to simulate real scenario
applicator := apply.NewAPIApplicator(k8sClient)
if err := applicator.Apply(ctx, u); err != nil {
return nil, err
}
return &objKey, nil
}
var (
legacyDeploy = `apiVersion: apps/v1
kind: Deployment
metadata:
labels:
app.oam.dev/app-revision-hash: 3dc894b5cb767c4b
app.oam.dev/appRevision: myapp-v1
app.oam.dev/component: myworker
app.oam.dev/name: myapp
app.oam.dev/resourceType: WORKLOAD
app.oam.dev/revision: myworker-v1
workload.oam.dev/type: worker
name: myworker
namespace: %s
spec:
selector:
matchLabels:
app.oam.dev/component: myworker
template:
metadata:
labels:
app.oam.dev/component: myworker
spec:
containers:
- image: nginx:latest
name: myworker`
legacyService = `apiVersion: v1
kind: Service
metadata:
labels:
app.oam.dev/app-revision-hash: 3dc894b5cb767c4b
app.oam.dev/appRevision: myapp-v1
app.oam.dev/component: myworker
app.oam.dev/name: myapp
app.oam.dev/resourceType: TRAIT
app.oam.dev/revision: myworker-v1
trait.oam.dev/resource: service
trait.oam.dev/type: ingress
name: myworker
namespace: %s
spec:
ports:
- port: 8080
protocol: TCP
targetPort: 8080
selector:
app.oam.dev/component: myworker
type: ClusterIP`
legacyPersistentVolume = `apiVersion: v1
kind: PersistentVolume
metadata:
labels:
app.oam.dev/appRevision: myapp-v1
app.oam.dev/component: myworker
app.oam.dev/name: myapp
app.oam.dev/resourceType: TRAIT
app.oam.dev/revision: myworker-v1
trait.oam.dev/resource: pv
trait.oam.dev/type: testpv
name: csi-gcs-pv-myworker
namespace: %s
spec:
accessModes:
- ReadWriteOnce
capacity:
storage: 5Gi
nfs:
server: 1.1.1.1
path: "/"
persistentVolumeReclaimPolicy: Retain
storageClassName: csi-gcs-test-sc`
legacyAppCtx = `apiVersion: core.oam.dev/v1alpha2
kind: ApplicationContext
metadata:
labels:
app.oam.dev/app-revision-hash: 3dc894b5cb767c4b
name: myapp
namespace: %s
spec:
applicationRevisionName: myapp-v1`
legacyResourceTracker = `apiVersion: core.oam.dev/v1beta1
kind: ResourceTracker
metadata:
name: %s-myapp`
newApplication = `apiVersion: core.oam.dev/v1beta1
kind: Application
metadata:
name: myapp
namespace: %s
spec:
components:
- name: myworker
type: worker
properties:
image: "nginx:latest"
traits:
- type: ingress
properties:
domain: localhost
http:
"/": 8080
- type: testpv
properties:
secretName: testSecret`
pvTraitDefinition = `apiVersion: core.oam.dev/v1beta1
kind: TraitDefinition
metadata:
name: testpv
namespace: %s
annotations:
definition.oam.dev/description: Mock a cluster-scope resource
spec:
schematic:
cue:
template: |
parameter: {
secretName: string
}
outputs: {
pv: {
apiVersion: "v1"
kind: "PersistentVolume"
metadata: name: "csi-gcs-pv-\(context.name)"
spec: {
accessModes: ["ReadWriteOnce"]
capacity: storage: "5Gi"
persistentVolumeReclaimPolicy: "Retain"
storageClassName: "csi-gcs-test-sc"
nfs: {
server: "1.1.1.1"
path: "/"
}
}
}
}`
)

View File

@@ -24,6 +24,7 @@ import (
"github.com/ghodss/yaml"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"github.com/pkg/errors"
appsv1 "k8s.io/api/apps/v1"
batchv1 "k8s.io/api/batch/v1"
corev1 "k8s.io/api/core/v1"
@@ -33,7 +34,6 @@ import (
"sigs.k8s.io/controller-runtime/pkg/client"
"github.com/oam-dev/kubevela/apis/core.oam.dev/common"
"github.com/oam-dev/kubevela/apis/core.oam.dev/v1alpha2"
"github.com/oam-dev/kubevela/apis/core.oam.dev/v1beta1"
"github.com/oam-dev/kubevela/pkg/oam/util"
)
@@ -198,26 +198,15 @@ var _ = Describe("Test application of the specified definition version", func()
By("Create application")
Expect(k8sClient.Create(ctx, &app)).Should(Succeed())
ac := &v1alpha2.ApplicationContext{}
acName := appName
By("Verify the ApplicationContext is created & reconciled successfully")
Eventually(func() bool {
if err := k8sClient.Get(ctx, client.ObjectKey{Name: acName, Namespace: namespace}, ac); err != nil {
return false
}
return len(ac.Status.Workloads) > 0
}, 60*time.Second, time.Second).Should(BeTrue())
By("Verify the workload(deployment) is created successfully")
Expect(len(ac.Status.Workloads)).Should(Equal(len(app.Spec.Components)))
webServiceDeploy := &appsv1.Deployment{}
deployName := ac.Status.Workloads[0].Reference.Name
deployName := comp1Name
Eventually(func() error {
return k8sClient.Get(ctx, client.ObjectKey{Name: deployName, Namespace: namespace}, webServiceDeploy)
}, 30*time.Second, 3*time.Second).Should(Succeed())
workerDeploy := &appsv1.Deployment{}
deployName = ac.Status.Workloads[1].Reference.Name
deployName = comp2Name
Eventually(func() error {
return k8sClient.Get(ctx, client.ObjectKey{Name: deployName, Namespace: namespace}, workerDeploy)
}, 30*time.Second, 3*time.Second).Should(Succeed())
@@ -264,25 +253,29 @@ var _ = Describe("Test application of the specified definition version", func()
}
Expect(k8sClient.Patch(ctx, &app, client.Merge)).Should(Succeed())
By("Verify the ApplicationContext is update successfully")
Eventually(func() bool {
if err := k8sClient.Get(ctx, client.ObjectKey{Name: acName, Namespace: namespace}, ac); err != nil {
return false
By("Wait for dispatching v2 resources successfully")
Eventually(func() error {
requestReconcileNow(ctx, &app)
rt := &v1beta1.ResourceTracker{}
if err := k8sClient.Get(ctx, client.ObjectKey{Name: fmt.Sprintf("%s-v2-%s", appName, namespace)}, rt); err != nil {
return err
}
return ac.Generation == 2
}, 10*time.Second, time.Second).Should(BeTrue())
if len(rt.Status.TrackedResources) != 0 {
return nil
}
return errors.New("v2 resources have not been dispatched")
}, 10*time.Second, 500*time.Millisecond).Should(Succeed())
By("Verify the workload(deployment) is created successfully")
Expect(len(ac.Status.Workloads)).Should(Equal(len(app.Spec.Components)))
webServiceV1Deploy := &appsv1.Deployment{}
deployName = ac.Status.Workloads[0].Reference.Name
deployName = comp1Name
Eventually(func() error {
return k8sClient.Get(ctx, client.ObjectKey{Name: deployName, Namespace: namespace}, webServiceV1Deploy)
}, 30*time.Second, 3*time.Second).Should(Succeed())
By("Verify the workload(job) is created successfully")
workerJob := &batchv1.Job{}
jobName := ac.Status.Workloads[1].Reference.Name
jobName := comp2Name
Eventually(func() error {
return k8sClient.Get(ctx, client.ObjectKey{Name: jobName, Namespace: namespace}, workerJob)
}, 30*time.Second, 3*time.Second).Should(Succeed())
@@ -417,13 +410,6 @@ var _ = Describe("Test application of the specified definition version", func()
By("Create application")
Expect(k8sClient.Create(ctx, &app)).Should(Succeed())
ac := &v1alpha2.ApplicationContext{}
acName := appName
By("Verify the ApplicationContext is created successfully")
Eventually(func() error {
return k8sClient.Get(ctx, client.ObjectKey{Name: acName, Namespace: namespace}, ac)
}, 30*time.Second, time.Second).Should(Succeed())
By("Verify the workload(deployment) is created successfully by Helm")
deploy := &appsv1.Deployment{}
deployName := fmt.Sprintf("%s-%s-podinfo", appName, compName)
@@ -438,7 +424,6 @@ var _ = Describe("Test application of the specified definition version", func()
By("Verify trait is applied to the workload")
Eventually(func() bool {
requestReconcileNow(ctx, ac)
deploy := &appsv1.Deployment{}
if err := k8sClient.Get(ctx, client.ObjectKey{Name: deployName, Namespace: namespace}, deploy); err != nil {
return false
@@ -471,15 +456,6 @@ var _ = Describe("Test application of the specified definition version", func()
By("Create application")
Expect(k8sClient.Patch(ctx, &app, client.Merge)).Should(Succeed())
By("Verify the ApplicationContext is updated")
Eventually(func() bool {
ac = &v1alpha2.ApplicationContext{}
if err := k8sClient.Get(ctx, client.ObjectKey{Name: acName, Namespace: namespace}, ac); err != nil {
return false
}
return ac.GetGeneration() == 2
}, 15*time.Second, 3*time.Second).Should(BeTrue())
By("Verify the workload(deployment) is update successfully by Helm")
deploy = &appsv1.Deployment{}
Eventually(func() bool {
@@ -589,19 +565,9 @@ var _ = Describe("Test application of the specified definition version", func()
By("Create application")
Expect(k8sClient.Create(ctx, &app)).Should(Succeed())
ac := &v1alpha2.ApplicationContext{}
acName := appName
By("Verify the ApplicationContext is created & reconciled successfully")
Eventually(func() bool {
if err := k8sClient.Get(ctx, client.ObjectKey{Name: acName, Namespace: namespace}, ac); err != nil {
return false
}
return len(ac.Status.Workloads) > 0
}, 60*time.Second, time.Second).Should(BeTrue())
By("Verify the workload(job) is created successfully")
job := &batchv1.Job{}
jobName := ac.Status.Workloads[0].Reference.Name
jobName := compName
Eventually(func() error {
return k8sClient.Get(ctx, client.ObjectKey{Name: jobName, Namespace: namespace}, job)
}, 30*time.Second, 3*time.Second).Should(Succeed())
@@ -640,18 +606,9 @@ var _ = Describe("Test application of the specified definition version", func()
}
Expect(k8sClient.Patch(ctx, &app, client.Merge)).Should(Succeed())
By("Verify the ApplicationContext is update successfully")
Eventually(func() bool {
if err := k8sClient.Get(ctx, client.ObjectKey{Name: acName, Namespace: namespace}, ac); err != nil {
return false
}
return ac.Generation == 2
}, 10*time.Second, time.Second).Should(BeTrue())
By("Verify the workload(deployment) is created successfully")
Expect(len(ac.Status.Workloads)).Should(Equal(len(app.Spec.Components)))
deploy := &appsv1.Deployment{}
deployName := ac.Status.Workloads[0].Reference.Name
deployName := compName
Eventually(func() error {
return k8sClient.Get(ctx, client.ObjectKey{Name: deployName, Namespace: namespace}, deploy)
}, 30*time.Second, 3*time.Second).Should(Succeed())

View File

@@ -29,7 +29,7 @@ import (
"sigs.k8s.io/controller-runtime/pkg/client"
"github.com/oam-dev/kubevela/apis/core.oam.dev/common"
"github.com/oam-dev/kubevela/apis/core.oam.dev/v1alpha2"
v1beta1 "github.com/oam-dev/kubevela/apis/core.oam.dev/v1beta1"
"github.com/oam-dev/kubevela/pkg/oam/util"
. "github.com/onsi/ginkgo"
@@ -46,7 +46,7 @@ var _ = Describe("Test application containing helm module", func() {
tdName = "virtualgroup"
)
var namespace string
var app v1alpha2.Application
var app v1beta1.Application
var ns corev1.Namespace
BeforeEach(func() {
@@ -58,7 +58,7 @@ var _ = Describe("Test application containing helm module", func() {
},
time.Second*3, time.Millisecond*300).Should(SatisfyAny(BeNil(), &util.AlreadyExistMatcher{}))
cd := v1alpha2.ComponentDefinition{}
cd := v1beta1.ComponentDefinition{}
cd.SetName(cdName)
cd.SetNamespace(namespace)
cd.Spec.Workload.Definition = common.WorkloadGVK{APIVersion: "apps/v1", Kind: "Deployment"}
@@ -80,7 +80,7 @@ var _ = Describe("Test application containing helm module", func() {
Expect(k8sClient.Create(ctx, &cd)).Should(Succeed())
By("Install a patch trait used to test CUE module")
td := v1alpha2.TraitDefinition{}
td := v1beta1.TraitDefinition{}
td.SetName(tdName)
td.SetNamespace(namespace)
td.Spec.AppliesToWorkloads = []string{"deployments.apps"}
@@ -107,7 +107,7 @@ var _ = Describe("Test application containing helm module", func() {
Expect(k8sClient.Create(ctx, &td)).Should(Succeed())
By("Add 'deployments.apps' to scaler's appliesToWorkloads")
scalerTd := v1alpha2.TraitDefinition{}
scalerTd := v1beta1.TraitDefinition{}
Expect(k8sClient.Get(ctx, client.ObjectKey{Name: "scaler", Namespace: "vela-system"}, &scalerTd)).Should(Succeed())
scalerTd.Spec.AppliesToWorkloads = []string{"deployments.apps", "webservice", "worker"}
scalerTd.SetResourceVersion("")
@@ -116,15 +116,15 @@ var _ = Describe("Test application containing helm module", func() {
AfterEach(func() {
By("Clean up resources after a test")
k8sClient.DeleteAllOf(ctx, &v1alpha2.Application{}, client.InNamespace(namespace))
k8sClient.DeleteAllOf(ctx, &v1alpha2.ComponentDefinition{}, client.InNamespace(namespace))
k8sClient.DeleteAllOf(ctx, &v1alpha2.WorkloadDefinition{}, client.InNamespace(namespace))
k8sClient.DeleteAllOf(ctx, &v1alpha2.TraitDefinition{}, client.InNamespace(namespace))
k8sClient.DeleteAllOf(ctx, &v1beta1.Application{}, client.InNamespace(namespace))
k8sClient.DeleteAllOf(ctx, &v1beta1.ComponentDefinition{}, client.InNamespace(namespace))
k8sClient.DeleteAllOf(ctx, &v1beta1.WorkloadDefinition{}, client.InNamespace(namespace))
k8sClient.DeleteAllOf(ctx, &v1beta1.TraitDefinition{}, client.InNamespace(namespace))
By(fmt.Sprintf("Delete the entire namespaceName %s", ns.Name))
Expect(k8sClient.Delete(ctx, &ns, client.PropagationPolicy(metav1.DeletePropagationForeground))).Should(Succeed())
By("Remove 'deployments.apps' from scaler's appliesToWorkloads")
scalerTd := v1alpha2.TraitDefinition{}
scalerTd := v1beta1.TraitDefinition{}
Expect(k8sClient.Get(ctx, client.ObjectKey{Name: "scaler", Namespace: "vela-system"}, &scalerTd)).Should(Succeed())
scalerTd.Spec.AppliesToWorkloads = []string{"webservice", "worker"}
scalerTd.SetResourceVersion("")
@@ -132,30 +132,30 @@ var _ = Describe("Test application containing helm module", func() {
})
It("Test deploy an application containing helm module", func() {
app = v1alpha2.Application{
app = v1beta1.Application{
ObjectMeta: metav1.ObjectMeta{
Name: appName,
Namespace: namespace,
},
Spec: v1alpha2.ApplicationSpec{
Components: []v1alpha2.ApplicationComponent{
Spec: v1beta1.ApplicationSpec{
Components: []v1beta1.ApplicationComponent{
{
Name: compName,
WorkloadType: cdName,
Settings: util.Object2RawExtension(map[string]interface{}{
Name: compName,
Type: cdName,
Properties: util.Object2RawExtension(map[string]interface{}{
"image": map[string]interface{}{
"tag": "5.1.2",
},
}),
Traits: []v1alpha2.ApplicationTrait{
Traits: []v1beta1.ApplicationTrait{
{
Name: "scaler",
Type: "scaler",
Properties: util.Object2RawExtension(map[string]interface{}{
"replicas": 2,
}),
},
{
Name: tdName,
Type: tdName,
Properties: util.Object2RawExtension(map[string]interface{}{
"group": "my-group",
"type": "cluster",
@@ -169,13 +169,6 @@ var _ = Describe("Test application containing helm module", func() {
By("Create application")
Expect(k8sClient.Create(ctx, &app)).Should(Succeed())
ac := &v1alpha2.ApplicationContext{}
acName := appName
By("Verify the ApplicationContext is created successfully")
Eventually(func() error {
return k8sClient.Get(ctx, client.ObjectKey{Name: acName, Namespace: namespace}, ac)
}, 30*time.Second, time.Second).Should(Succeed())
By("Verify the workload(deployment) is created successfully by Helm")
deploy := &appsv1.Deployment{}
deployName := fmt.Sprintf("%s-%s-podinfo", appName, compName)
@@ -185,7 +178,6 @@ var _ = Describe("Test application containing helm module", func() {
By("Verify two traits are applied to the workload")
Eventually(func() bool {
requestReconcileNow(ctx, ac)
deploy := &appsv1.Deployment{}
if err := k8sClient.Get(ctx, client.ObjectKey{Name: deployName, Namespace: namespace}, deploy); err != nil {
return false
@@ -206,30 +198,30 @@ var _ = Describe("Test application containing helm module", func() {
}, 120*time.Second, 10*time.Second).Should(BeTrue())
By("Update the application")
app = v1alpha2.Application{
app = v1beta1.Application{
ObjectMeta: metav1.ObjectMeta{
Name: appName,
Namespace: namespace,
},
Spec: v1alpha2.ApplicationSpec{
Components: []v1alpha2.ApplicationComponent{
Spec: v1beta1.ApplicationSpec{
Components: []v1beta1.ApplicationComponent{
{
Name: compName,
WorkloadType: cdName,
Settings: util.Object2RawExtension(map[string]interface{}{
Name: compName,
Type: cdName,
Properties: util.Object2RawExtension(map[string]interface{}{
"image": map[string]interface{}{
"tag": "5.1.3", // change 5.1.4 => 5.1.3
},
}),
Traits: []v1alpha2.ApplicationTrait{
Traits: []v1beta1.ApplicationTrait{
{
Name: "scaler",
Type: "scaler",
Properties: util.Object2RawExtension(map[string]interface{}{
"replicas": 3, // change 2 => 3
}),
},
{
Name: tdName,
Type: tdName,
Properties: util.Object2RawExtension(map[string]interface{}{
"group": "my-group-0", // change my-group => my-group-0
"type": "cluster",
@@ -242,19 +234,8 @@ var _ = Describe("Test application containing helm module", func() {
}
Expect(k8sClient.Patch(ctx, &app, client.Merge)).Should(Succeed())
By("Verify the ApplicationContext is updated")
deploy = &appsv1.Deployment{}
Eventually(func() bool {
ac = &v1alpha2.ApplicationContext{}
if err := k8sClient.Get(ctx, client.ObjectKey{Name: acName, Namespace: namespace}, ac); err != nil {
return false
}
return ac.GetGeneration() == 2
}, 15*time.Second, 3*time.Second).Should(BeTrue())
By("Verify the changes are applied to the workload")
Eventually(func() bool {
requestReconcileNow(ctx, ac)
deploy := &appsv1.Deployment{}
if err := k8sClient.Get(ctx, client.ObjectKey{Name: deployName, Namespace: namespace}, deploy); err != nil {
return false
@@ -277,7 +258,7 @@ var _ = Describe("Test application containing helm module", func() {
It("Test deploy an application containing helm module defined by workloadDefinition", func() {
workloaddef := v1alpha2.WorkloadDefinition{}
workloaddef := v1beta1.WorkloadDefinition{}
workloaddef.SetName(wdName)
workloaddef.SetNamespace(namespace)
workloaddef.Spec.Reference = common.DefinitionReference{Name: "deployments.apps", Version: "v1"}
@@ -300,17 +281,17 @@ var _ = Describe("Test application containing helm module", func() {
Expect(k8sClient.Create(ctx, &workloaddef)).Should(Succeed())
appTestName := "test-app-refer-to-workloaddef"
appTest := v1alpha2.Application{
appTest := v1beta1.Application{
ObjectMeta: metav1.ObjectMeta{
Name: appTestName,
Namespace: namespace,
},
Spec: v1alpha2.ApplicationSpec{
Components: []v1alpha2.ApplicationComponent{
Spec: v1beta1.ApplicationSpec{
Components: []v1beta1.ApplicationComponent{
{
Name: compName,
WorkloadType: wdName,
Settings: util.Object2RawExtension(map[string]interface{}{
Name: compName,
Type: wdName,
Properties: util.Object2RawExtension(map[string]interface{}{
"image": map[string]interface{}{
"tag": "5.1.2",
},
@@ -322,13 +303,6 @@ var _ = Describe("Test application containing helm module", func() {
By("Create application")
Expect(k8sClient.Create(ctx, &appTest)).Should(Succeed())
ac := &v1alpha2.ApplicationContext{}
acName := appTestName
By("Verify the AppConfig is created successfully")
Eventually(func() error {
return k8sClient.Get(ctx, client.ObjectKey{Name: acName, Namespace: namespace}, ac)
}, 30*time.Second, time.Second).Should(Succeed())
By("Verify the workload(deployment) is created successfully by Helm")
deploy := &appsv1.Deployment{}
deployName := fmt.Sprintf("%s-%s-podinfo", appTestName, compName)
@@ -338,7 +312,7 @@ var _ = Describe("Test application containing helm module", func() {
})
It("Test deploy an application containing helm module and the componet refer to autodetect type worklaod", func() {
cd := v1alpha2.ComponentDefinition{}
cd := v1beta1.ComponentDefinition{}
cd.SetName("podinfo")
cd.SetNamespace(namespace)
cd.Spec.Schematic = &common.Schematic{
@@ -359,17 +333,17 @@ var _ = Describe("Test application containing helm module", func() {
Expect(k8sClient.Create(ctx, &cd)).Should(Succeed())
newAppName := "test-autodetect"
newApp := v1alpha2.Application{
newApp := v1beta1.Application{
ObjectMeta: metav1.ObjectMeta{
Name: newAppName,
Namespace: namespace,
},
Spec: v1alpha2.ApplicationSpec{
Components: []v1alpha2.ApplicationComponent{
Spec: v1beta1.ApplicationSpec{
Components: []v1beta1.ApplicationComponent{
{
Name: compName,
WorkloadType: "podinfo",
Settings: util.Object2RawExtension(map[string]interface{}{
Name: compName,
Type: "podinfo",
Properties: util.Object2RawExtension(map[string]interface{}{
"image": map[string]interface{}{
"tag": "5.1.2",
},

View File

@@ -31,7 +31,6 @@ import (
"sigs.k8s.io/controller-runtime/pkg/client"
"github.com/oam-dev/kubevela/apis/core.oam.dev/common"
"github.com/oam-dev/kubevela/apis/core.oam.dev/v1alpha2"
"github.com/oam-dev/kubevela/apis/core.oam.dev/v1beta1"
"github.com/oam-dev/kubevela/pkg/oam/util"
@@ -200,26 +199,15 @@ spec:
By("Create application")
Expect(k8sClient.Create(ctx, &app)).Should(Succeed())
ac := &v1alpha2.ApplicationContext{}
acName := appName
By("Verify the ApplicationContext is created & reconciled successfully")
Eventually(func() bool {
if err := k8sClient.Get(ctx, client.ObjectKey{Name: acName, Namespace: namespace}, ac); err != nil {
return false
}
return len(ac.Status.Workloads) > 0
}, 60*time.Second, time.Second).Should(BeTrue())
By("Verify the workload(deployment) is created successfully")
deploy := &appsv1.Deployment{}
deployName := ac.Status.Workloads[0].Reference.Name
deployName := compName
Eventually(func() error {
return k8sClient.Get(ctx, client.ObjectKey{Name: deployName, Namespace: namespace}, deploy)
}, 30*time.Second, 3*time.Second).Should(Succeed())
By("Verify two traits are applied to the workload")
Eventually(func() bool {
requestReconcileNow(ctx, ac)
deploy := &appsv1.Deployment{}
if err := k8sClient.Get(ctx, client.ObjectKey{Name: deployName, Namespace: namespace}, deploy); err != nil {
return false
@@ -272,21 +260,12 @@ spec:
}
Expect(k8sClient.Patch(ctx, &app, client.Merge)).Should(Succeed())
By("Verify the ApplicationContext is update successfully")
Eventually(func() bool {
if err := k8sClient.Get(ctx, client.ObjectKey{Name: acName, Namespace: namespace}, ac); err != nil {
return false
}
return ac.Generation == 2
}, 10*time.Second, time.Second).Should(BeTrue())
By("Verify the workload(deployment) is created successfully")
deploy = &appsv1.Deployment{}
deployName = ac.Status.Workloads[0].Reference.Name
deployName = compName
By("Verify the changes are applied to the workload")
Eventually(func() bool {
requestReconcileNow(ctx, ac)
deploy := &appsv1.Deployment{}
if err := k8sClient.Get(ctx, client.ObjectKey{Name: deployName, Namespace: namespace}, deploy); err != nil {
return false
@@ -348,19 +327,9 @@ spec:
By("Create application")
Expect(k8sClient.Create(ctx, &appTest)).Should(Succeed())
ac := &v1alpha2.ApplicationContext{}
acName := appTestName
By("Verify the ApplicationContext is created & reconciled successfully")
Eventually(func() bool {
if err := k8sClient.Get(ctx, client.ObjectKey{Name: acName, Namespace: namespace}, ac); err != nil {
return false
}
return len(ac.Status.Workloads) > 0
}, 15*time.Second, time.Second).Should(BeTrue())
By("Verify the workload(deployment) is created successfully")
deploy := &appsv1.Deployment{}
deployName := ac.Status.Workloads[0].Reference.Name
deployName := compName
Eventually(func() error {
return k8sClient.Get(ctx, client.ObjectKey{Name: deployName, Namespace: namespace}, deploy)
}, 15*time.Second, 3*time.Second).Should(Succeed())

View File

@@ -21,24 +21,25 @@ import (
"fmt"
"time"
"github.com/oam-dev/kubevela/pkg/oam"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
kruise "github.com/openkruise/kruise-api/apps/v1alpha1"
corev1 "k8s.io/api/core/v1"
corev1beta1 "k8s.io/api/networking/v1beta1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
"k8s.io/utils/pointer"
"sigs.k8s.io/controller-runtime/pkg/client"
kruise "github.com/openkruise/kruise-api/apps/v1alpha1"
oamcomm "github.com/oam-dev/kubevela/apis/core.oam.dev/common"
"github.com/oam-dev/kubevela/apis/core.oam.dev/v1alpha2"
"github.com/oam-dev/kubevela/apis/core.oam.dev/v1beta1"
oamstd "github.com/oam-dev/kubevela/apis/standard.oam.dev/v1alpha1"
"github.com/oam-dev/kubevela/apis/types"
"github.com/oam-dev/kubevela/pkg/controller/core.oam.dev/v1alpha2/application/dispatch"
"github.com/oam-dev/kubevela/pkg/controller/utils"
"github.com/oam-dev/kubevela/pkg/oam"
"github.com/oam-dev/kubevela/pkg/oam/util"
"github.com/oam-dev/kubevela/pkg/utils/common"
)
@@ -92,6 +93,18 @@ var _ = Describe("Cloneset based rollout tests", func() {
time.Second*3, time.Millisecond*300).Should(SatisfyAny(BeNil(), &util.AlreadyExistMatcher{}))
}
CreateIngressDef := func() {
By("Install Ingress trait definition")
var td v1beta1.TraitDefinition
Expect(common.ReadYamlToObject("testdata/rollout/cloneset/ingressDefinition.yaml", &td)).Should(BeNil())
// create the traitDefinition if not exist
Eventually(
func() error {
return k8sClient.Create(ctx, &td)
},
time.Second*3, time.Millisecond*300).Should(SatisfyAny(BeNil(), &util.AlreadyExistMatcher{}))
}
applySourceApp := func(source string) {
By("Apply an application")
var newApp v1beta1.Application
@@ -119,21 +132,22 @@ var _ = Describe("Cloneset based rollout tests", func() {
Eventually(
func() error {
k8sClient.Get(ctx, client.ObjectKey{Namespace: namespaceName, Name: app.Name}, &app)
app.Spec = targetApp.Spec
return k8sClient.Update(ctx, &app)
app.Spec = targetApp.DeepCopy().Spec
return k8sClient.Update(ctx, app.DeepCopy())
}, time.Second*15, time.Millisecond*500).Should(Succeed())
By("Get Application Revision created with more than one")
Eventually(
func() bool {
var appRevList = &v1beta1.ApplicationRevisionList{}
_ = k8sClient.List(ctx, appRevList, client.MatchingLabels(map[string]string{oam.LabelAppName: targetApp.Name}))
_ = k8sClient.List(ctx, appRevList, client.InNamespace(namespaceName),
client.MatchingLabels(map[string]string{oam.LabelAppName: targetApp.Name}))
if appRevList != nil {
return len(appRevList.Items) >= 2
}
return false
},
time.Second*30, time.Millisecond*500).Should(BeTrue())
time.Second*15, time.Millisecond*500).Should(BeTrue())
}
createAppRolling := func(newAppRollout *v1beta1.AppRollout) {
@@ -183,53 +197,80 @@ var _ = Describe("Cloneset based rollout tests", func() {
Expect(appRollout.Status.UpgradedReplicas).Should(BeEquivalentTo(appRollout.Status.RolloutTargetSize))
clonesetName := appRollout.Spec.ComponentList[0]
By("Verify AppContext rolling status")
var appContext v1alpha2.ApplicationContext
Eventually(
func() types.RollingStatus {
k8sClient.Get(ctx, client.ObjectKey{Namespace: namespaceName, Name: targetAppName}, &appContext)
return appContext.Status.RollingStatus
},
time.Second*60, time.Second).Should(BeEquivalentTo(types.RollingCompleted))
By("Wait for resourceTracker to resume the control of cloneset")
By("Wait for AppContext to resume the control of cloneset")
var clonesetOwner *metav1.OwnerReference
Eventually(
func() string {
func() error {
var clonesetOwner *metav1.OwnerReference
err := k8sClient.Get(ctx, client.ObjectKey{Namespace: namespaceName, Name: clonesetName}, &kc)
if err != nil {
return ""
return err
}
if kc.Status.UpdatedReplicas != *kc.Spec.Replicas {
return fmt.Errorf("expect cloneset updated replicas %d, but got %d",
kc.Status.UpdatedReplicas, *kc.Spec.Replicas)
}
clonesetOwner = metav1.GetControllerOf(&kc)
if clonesetOwner != nil {
return clonesetOwner.Kind
if clonesetOwner == nil {
return fmt.Errorf("controller owner missed")
}
return ""
if clonesetOwner.Kind != v1beta1.ResourceTrackerKind {
return fmt.Errorf("controller kind missmatch wants %s actually %s", v1beta1.ResourceTrackerKind, clonesetOwner.Kind)
}
resourceTrackerName := dispatch.ConstructResourceTrackerName(targetAppName, namespaceName)
if resourceTrackerName != clonesetOwner.Name {
return fmt.Errorf("controller name missmatch wants %s actually %s", resourceTrackerName, clonesetOwner.Name)
}
return nil
},
time.Second*30, time.Millisecond*500).Should(BeEquivalentTo(v1alpha2.ApplicationContextKind))
Expect(clonesetOwner.Name).Should(BeEquivalentTo(targetAppName))
Expect(kc.Status.UpdatedReplicas).Should(BeEquivalentTo(*kc.Spec.Replicas))
time.Second*60, time.Millisecond*500).Should(BeNil())
// make sure all pods are upgraded
image := kc.Spec.Template.Spec.Containers[0].Image
podList := corev1.PodList{}
Expect(k8sClient.List(ctx, &podList, client.MatchingLabels(kc.Spec.Template.Labels),
client.InNamespace(namespaceName))).Should(Succeed())
Expect(len(podList.Items)).Should(BeEquivalentTo(*kc.Spec.Replicas))
for _, pod := range podList.Items {
Expect(pod.Spec.Containers[0].Image).Should(Equal(image))
Expect(pod.Status.Phase).Should(Equal(corev1.PodRunning))
}
Eventually(func() error {
if err := k8sClient.List(ctx, &podList, client.MatchingLabels(kc.Spec.Template.Labels),
client.InNamespace(namespaceName)); err != nil {
return err
}
if len(podList.Items) != int(*kc.Spec.Replicas) {
return fmt.Errorf("expect pod numbers %q, got %q", int(*kc.Spec.Replicas), len(podList.Items))
}
for _, pod := range podList.Items {
gotImage := pod.Spec.Containers[0].Image
if gotImage != image {
return fmt.Errorf("expect pod container image %q, got %q", image, gotImage)
}
if pod.Status.Phase != corev1.PodRunning {
return fmt.Errorf("expect pod phase %q, got %q", corev1.PodRunning, pod.Status.Phase)
}
}
return nil
}, 60*time.Second, 500*time.Millisecond).Should(Succeed())
}
verifyAppConfigInactive := func(appContextName string) {
var appContext v1alpha2.ApplicationContext
By("Verify AppConfig is inactive")
Eventually(
func() types.RollingStatus {
k8sClient.Get(ctx, client.ObjectKey{Namespace: namespaceName, Name: appContextName}, &appContext)
return appContext.Status.RollingStatus
},
time.Second*30, time.Millisecond*500).Should(BeEquivalentTo(types.InactiveAfterRollingCompleted))
verifyIngress := func(domain string) {
ingress := &corev1beta1.Ingress{}
Eventually(func() error {
var err error
if err = k8sClient.Get(ctx, types.NamespacedName{Namespace: namespaceName, Name: appRollout.Spec.ComponentList[0]}, ingress); err != nil {
return err
}
owner := metav1.GetControllerOf(ingress)
if owner == nil {
return fmt.Errorf("ingress don't have controller owner")
}
if owner.Kind != v1beta1.ResourceTrackerKind {
return fmt.Errorf("ingress owner kind miss match wants %s actually %s", v1beta1.ResourceTrackerKind, owner.Kind)
}
rtName := dispatch.ConstructResourceTrackerName(appRollout.Spec.TargetAppRevisionName, appRollout.Namespace)
if owner.Name != rtName {
return fmt.Errorf("ingress owner error wants %s actually %s", rtName, owner.Name)
}
if ingress.Spec.Rules[0].Host != domain {
return fmt.Errorf("domain mismatch wants %s actually %s", domain, ingress.Spec.Rules[0].Host)
}
return nil
}, time.Second*30, time.Microsecond*300).Should(BeNil())
}
applyTwoAppVersion := func() {
@@ -275,7 +316,6 @@ var _ = Describe("Cloneset based rollout tests", func() {
time.Second*10, time.Millisecond*10).Should(BeEquivalentTo(oamstd.RollingInBatchesState))
verifyRolloutSucceeded(appRollout.Spec.TargetAppRevisionName)
verifyAppConfigInactive(appRollout.Spec.SourceAppRevisionName)
}
BeforeEach(func() {
@@ -363,7 +403,6 @@ var _ = Describe("Cloneset based rollout tests", func() {
RolloutBatches) - 1))
Expect(k8sClient.Update(ctx, &appRollout)).Should(Succeed())
verifyRolloutSucceeded(appRollout.Spec.TargetAppRevisionName)
verifyAppConfigInactive(appRollout.Spec.SourceAppRevisionName)
})
It("Test pause and modify rollout plan after rolling succeeded", func() {
@@ -461,8 +500,6 @@ var _ = Describe("Cloneset based rollout tests", func() {
time.Second*10, time.Millisecond*10).Should(BeEquivalentTo(oamstd.RollingInBatchesState))
verifyRolloutSucceeded(appRollout.Spec.TargetAppRevisionName)
verifyAppConfigInactive(appRollout.Spec.SourceAppRevisionName)
rollForwardToSource()
})
@@ -570,12 +607,9 @@ var _ = Describe("Cloneset based rollout tests", func() {
appRollout.Spec.RolloutPlan.BatchPartition = nil
return k8sClient.Update(ctx, &appRollout)
}, time.Second*15, time.Millisecond*500).Should(Succeed())
verifyRolloutSucceeded(appRollout.Spec.TargetAppRevisionName)
verifyAppConfigInactive(appRollout.Spec.SourceAppRevisionName)
// wait for a bit until the application takes back control
By("Verify that application does not control the cloneset")
By("Verify that resourceTracker control the cloneset")
clonesetName := appRollout.Spec.ComponentList[0]
Eventually(
func() string {
@@ -585,7 +619,75 @@ var _ = Describe("Cloneset based rollout tests", func() {
return ""
}
return clonesetOwner.Kind
}, time.Second*30, time.Second).Should(BeEquivalentTo(v1alpha2.ApplicationContextKind))
}, time.Second*30, time.Second).Should(BeEquivalentTo(v1beta1.ResourceTrackerKind))
})
It("Test rollout will update same name trait", func() {
CreateClonesetDef()
CreateIngressDef()
applySourceApp("app-with-ingress-source.yaml")
By("Apply the application rollout go directly to the target")
appRollout = v1beta1.AppRollout{}
Expect(common.ReadYamlToObject("testdata/rollout/cloneset/appRollout.yaml", &appRollout)).Should(BeNil())
appRollout.Namespace = namespaceName
appRollout.Spec.SourceAppRevisionName = ""
appRollout.Spec.TargetAppRevisionName = utils.ConstructRevisionName(app.GetName(), 1)
appRollout.Spec.RolloutPlan.TargetSize = pointer.Int32Ptr(7)
appRollout.Spec.RolloutPlan.BatchPartition = nil
createAppRolling(&appRollout)
appRolloutName = appRollout.Name
verifyRolloutSucceeded(appRollout.Spec.TargetAppRevisionName)
By("verify ingress status")
verifyIngress("test.example.com")
By("rollout to revision 2")
updateApp("app-with-ingress-target.yaml")
Eventually(
func() error {
k8sClient.Get(ctx, client.ObjectKey{Namespace: namespaceName, Name: appRollout.Name}, &appRollout)
appRollout.Spec.SourceAppRevisionName = utils.ConstructRevisionName(app.GetName(), 1)
appRollout.Spec.TargetAppRevisionName = utils.ConstructRevisionName(app.GetName(), 2)
appRollout.Spec.RolloutPlan.BatchPartition = nil
return k8sClient.Update(ctx, &appRollout)
}, time.Second*10, time.Millisecond*500).Should(Succeed())
verifyRolloutSucceeded(appRollout.Spec.TargetAppRevisionName)
By("verify after rollout ingress status")
verifyIngress("test-1.example.com")
})
It("Test rollout succeed will gc useless trait", func() {
CreateClonesetDef()
CreateIngressDef()
applySourceApp("app-with-ingress-source.yaml")
By("Apply the application rollout go directly to the target")
appRollout = v1beta1.AppRollout{}
Expect(common.ReadYamlToObject("testdata/rollout/cloneset/appRollout.yaml", &appRollout)).Should(BeNil())
appRollout.Namespace = namespaceName
appRollout.Spec.SourceAppRevisionName = ""
appRollout.Spec.TargetAppRevisionName = utils.ConstructRevisionName(app.GetName(), 1)
appRollout.Spec.RolloutPlan.TargetSize = pointer.Int32Ptr(7)
appRollout.Spec.RolloutPlan.BatchPartition = nil
createAppRolling(&appRollout)
appRolloutName = appRollout.Name
verifyRolloutSucceeded(appRollout.Spec.TargetAppRevisionName)
By("verify ingress status")
verifyIngress("test.example.com")
By("rollout to revision 2 to disable ingress trait")
updateApp("app-remove-ingress.yaml")
Eventually(
func() error {
k8sClient.Get(ctx, client.ObjectKey{Namespace: namespaceName, Name: appRollout.Name}, &appRollout)
appRollout.Spec.SourceAppRevisionName = utils.ConstructRevisionName(app.GetName(), 1)
appRollout.Spec.TargetAppRevisionName = utils.ConstructRevisionName(app.GetName(), 2)
appRollout.Spec.RolloutPlan.BatchPartition = nil
return k8sClient.Update(ctx, &appRollout)
}, time.Second*10, time.Millisecond*500).Should(Succeed())
verifyRolloutSucceeded(appRollout.Spec.TargetAppRevisionName)
By("verify after rollout ingress have been removed")
Eventually(func() error {
ingress := &corev1beta1.Ingress{}
return k8sClient.Get(ctx, types.NamespacedName{Namespace: namespaceName, Name: appRollout.Spec.ComponentList[0]}, ingress)
}, time.Second*30, 300*time.Microsecond).Should(util.NotFoundMatcher{})
})
PIt("Test rolling by changing the definition", func() {
@@ -611,7 +713,6 @@ var _ = Describe("Cloneset based rollout tests", func() {
createAppRolling(&newAppRollout)
verifyRolloutSucceeded(appRollout.Spec.TargetAppRevisionName)
verifyAppConfigInactive(appRollout.Spec.SourceAppRevisionName)
// Clean up
k8sClient.Delete(ctx, &appRollout)
})

View File

@@ -0,0 +1,17 @@
apiVersion: core.oam.dev/v1beta1
kind: Application
metadata:
name: test-e2e-rolling
annotations:
"app.oam.dev/rollout-template": "true"
spec:
components:
- name: metrics-provider
type: clonesetservice
properties:
cmd:
- ./podinfo
- stress-cpu=1
image: stefanprodan/podinfo:5.0.2
port: 8080
updateStrategyType: InPlaceOnly

View File

@@ -0,0 +1,23 @@
apiVersion: core.oam.dev/v1beta1
kind: Application
metadata:
name: test-e2e-rolling
annotations:
"app.oam.dev/rollout-template": "true"
spec:
components:
- name: metrics-provider
type: clonesetservice
properties:
cmd:
- ./podinfo
- stress-cpu=1
image: stefanprodan/podinfo:4.0.3
port: 8080
updateStrategyType: InPlaceOnly
traits:
- type: ingress
properties:
domain: test.example.com
http:
"/": 8080

View File

@@ -0,0 +1,23 @@
apiVersion: core.oam.dev/v1beta1
kind: Application
metadata:
name: test-e2e-rolling
annotations:
"app.oam.dev/rollout-template": "true"
spec:
components:
- name: metrics-provider
type: clonesetservice
properties:
cmd:
- ./podinfo
- stress-cpu=1
image: stefanprodan/podinfo:5.0.2
port: 8080
updateStrategyType: InPlaceOnly
traits:
- type: ingress
properties:
domain: test-1.example.com
http:
"/": 8080

View File

@@ -0,0 +1,79 @@
apiVersion: core.oam.dev/v1beta1
kind: TraitDefinition
metadata:
annotations:
definition.oam.dev/description: "Enable public web traffic for the component."
name: ingress
namespace: vela-system
spec:
status:
customStatus: |-
let igs = context.outputs.ingress.status.loadBalancer.ingress
if igs == _|_ {
message: "No loadBalancer found, visiting by using 'vela port-forward " + context.appName + " --route'\n"
}
if len(igs) > 0 {
if igs[0].ip != _|_ {
message: "Visiting URL: " + context.outputs.ingress.spec.rules[0].host + ", IP: " + igs[0].ip
}
if igs[0].ip == _|_ {
message: "Visiting URL: " + context.outputs.ingress.spec.rules[0].host
}
}
healthPolicy: |
isHealth: len(context.outputs.service.spec.clusterIP) > 0
appliesToWorkloads:
- deployments.apps
podDisruptive: false
schematic:
cue:
template: |
// trait template can have multiple outputs in one trait
outputs: service: {
apiVersion: "v1"
kind: "Service"
metadata:
name: context.name
spec: {
selector: {
"app.oam.dev/component": context.name
}
ports: [
for k, v in parameter.http {
port: v
targetPort: v
},
]
}
}
outputs: ingress: {
apiVersion: "networking.k8s.io/v1beta1"
kind: "Ingress"
metadata:
name: context.name
spec: {
rules: [{
host: parameter.domain
http: {
paths: [
for k, v in parameter.http {
path: k
backend: {
serviceName: context.name
servicePort: v
}
},
]
}
}]
}
}
parameter: {
// +usage=Specify the domain you want to expose
domain: string
// +usage=Specify the mapping relationship between the http path and the workload port
http: [string]: int
}