remove appContext from app/appRollout controller (#1774)

* refine  assemble and dispatch

Signed-off-by: roy wang <seiwy2010@gmail.com>

* remove app context in app controller

modify clean up app revision

remove old resource tracker related logic

fix unit tests

Signed-off-by: roy wang <seiwy2010@gmail.com>

* fix e2e-test

- get rid of appCtx in test cases
- fix test cases according other logic changes in app controller

remove whole appcontext_test.go file

disable rollout related e2e test provisionally

disable resource tracker related e2e test provisionally

Signed-off-by: roy wang <seiwy2010@gmail.com>

* add finalizer logic for app controller

Signed-off-by: roywang <seiwy2010@gmail.com>

* add new apply option MustBeControllableByAny

make dispatch idempotent

Signed-off-by: roywang <seiwy2010@gmail.com>

* refactor rollout

* fix rollout finalize succeed

Signed-off-by: roywang <seiwy2010@gmail.com>

* add update trait and gc test

fix lint

* fix flaky e2e test

Signed-off-by: roywang <seiwy2010@gmail.com>

* fix comment

* fix comments and add sourceRevision dispatch

delete useless

Signed-off-by: Yue Wang <seiwy2010@gmail.com>

* fix app finalizer backward compatible

Signed-off-by: roywang <seiwy2010@gmail.com>

* fix backward compatability for deprecation of appContext

add unit test for apply option

add e2e test

Signed-off-by: Yue Wang <seiwy2010@gmail.com>

* fix app controller unit test

Signed-off-by: Yue Wang <seiwy2010@gmail.com>

* refine app controller apply logic

Signed-off-by: Yue Wang <seiwy2010@gmail.com>

* fix e2e test of resource tracker

fix e2e test of rollout plan

fix flaky e2e tests

Signed-off-by: Yue Wang <seiwy2010@gmail.com>

* refine comments and remove useless codes

Signed-off-by: Yue Wang <seiwy2010@gmail.com>

* disable appCtx controller

add Component handler into app controller

Signed-off-by: Yue Wang <seiwy2010@gmail.com>

Co-authored-by: wangyike <wangyike.wyk@alibaba-inc.com>
This commit is contained in:
Yue Wang
2021-06-12 15:46:32 +09:00
committed by GitHub
parent 9de6aea5ab
commit 889e38e984
41 changed files with 1983 additions and 2098 deletions

View File

@@ -18,6 +18,7 @@ package application
import (
"context"
"fmt"
"time"
"github.com/crossplane/crossplane-runtime/apis/core/v1alpha1"
@@ -33,27 +34,38 @@ import (
"k8s.io/klog/v2"
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/reconcile"
"sigs.k8s.io/controller-runtime/pkg/source"
"github.com/oam-dev/kubevela/apis/core.oam.dev/common"
"github.com/oam-dev/kubevela/apis/core.oam.dev/v1alpha2"
"github.com/oam-dev/kubevela/apis/core.oam.dev/v1beta1"
velatypes "github.com/oam-dev/kubevela/apis/types"
"github.com/oam-dev/kubevela/pkg/appfile"
core "github.com/oam-dev/kubevela/pkg/controller/core.oam.dev"
"github.com/oam-dev/kubevela/pkg/controller/core.oam.dev/v1alpha2/application/dispatch"
ac "github.com/oam-dev/kubevela/pkg/controller/core.oam.dev/v1alpha2/applicationconfiguration"
"github.com/oam-dev/kubevela/pkg/cue/packages"
"github.com/oam-dev/kubevela/pkg/oam"
"github.com/oam-dev/kubevela/pkg/oam/discoverymapper"
oamutil "github.com/oam-dev/kubevela/pkg/oam/util"
"github.com/oam-dev/kubevela/pkg/utils/apply"
"github.com/oam-dev/kubevela/version"
)
// RolloutReconcileWaitTime is the time to wait before reconcile again an application still in rollout phase
const (
RolloutReconcileWaitTime = time.Second * 3
resourceTrackerFinalizer = "resourceTracker.finalizer.core.oam.dev"
errUpdateApplicationStatus = "cannot update application status"
errUpdateApplicationFinalizer = "cannot update application finalizer"
)
const (
legacyResourceTrackerFinalizer = "resourceTracker.finalizer.core.oam.dev"
// resourceTrackerFinalizer is to delete the resource tracker of the latest app revision.
resourceTrackerFinalizer = "app.oam.dev/resource-tracker-finalizer"
// onlyRevisionFinalizer is to delete all resource trackers of app revisions which may be used
// out of the domain of app controller, e.g., AppRollout controller.
onlyRevisionFinalizer = "app.oam.dev/only-revision-finalizer"
)
// Reconciler reconciles a Application object
type Reconciler struct {
client.Client
@@ -84,100 +96,80 @@ func (r *Reconciler) Reconcile(req ctrl.Request) (ctrl.Result, error) {
}
return ctrl.Result{}, err
}
ctx = oamutil.SetNamespaceInCtx(ctx, app.Namespace)
// this annotation will be propogated to all resources created by the application
if len(app.GetAnnotations()[oam.AnnotationKubeVelaVersion]) == 0 {
oamutil.AddAnnotations(app, map[string]string{
oam.AnnotationKubeVelaVersion: version.VelaVersion,
})
}
if endReconcile, err := r.handleFinalizers(ctx, app); endReconcile {
return ctrl.Result{}, err
}
handler := &appHandler{
r: r,
app: app,
}
if app.ObjectMeta.DeletionTimestamp.IsZero() {
if registerFinalizers(app) {
klog.InfoS("Register new finalizer for application", "application", klog.KObj(app), "finalizers", app.ObjectMeta.Finalizers)
return reconcile.Result{}, errors.Wrap(r.Client.Update(ctx, app), errUpdateApplicationFinalizer)
}
} else {
needUpdate, err := handler.removeResourceTracker(ctx)
if err != nil {
klog.InfoS("Failed to remove application resourceTracker", "err", err)
app.Status.SetConditions(v1alpha1.ReconcileError(errors.Wrap(err, "error to remove finalizer")))
return reconcile.Result{}, errors.Wrap(r.UpdateStatus(ctx, app), errUpdateApplicationStatus)
}
if needUpdate {
klog.InfoS("Remove finalizer of application", "application", app.Namespace+"/"+app.Name, "finalizers", app.ObjectMeta.Finalizers)
return ctrl.Result{}, errors.Wrap(r.Update(ctx, app), errUpdateApplicationFinalizer)
}
// deleting and no need to handle finalizer
return reconcile.Result{}, nil
if app.Status.LatestRevision != nil {
// record previous app revision name
handler.previousRevisionName = app.Status.LatestRevision.Name
}
klog.Info("Start Rendering")
app.Status.Phase = common.ApplicationRendering
klog.Info("Parse template")
// parse template
appParser := appfile.NewApplicationParser(r.Client, r.dm, r.pd)
ctx = oamutil.SetNamespaceInCtx(ctx, app.Namespace)
generatedAppfile, err := appParser.GenerateAppFile(ctx, app)
if err != nil {
klog.InfoS("Failed to parse application", "err", err)
klog.ErrorS(err, "Failed to parse application", "application", klog.KObj(app))
app.Status.SetConditions(errorCondition("Parsed", err))
r.Recorder.Event(app, event.Warning(velatypes.ReasonFailedParse, err))
return handler.handleErr(err)
}
app.Status.SetConditions(readyCondition("Parsed"))
handler.appfile = generatedAppfile
r.Recorder.Event(app, event.Normal(velatypes.ReasonParsed, velatypes.MessageParsed))
handler.appfile = generatedAppfile
appRev, err := handler.GenerateAppRevision(ctx)
if err != nil {
klog.InfoS("Failed to calculate appRevision", "err", err)
klog.ErrorS(err, "Failed to calculate appRevision", "application", klog.KObj(app))
app.Status.SetConditions(errorCondition("Parsed", err))
r.Recorder.Event(app, event.Warning(velatypes.ReasonFailedParse, err))
return handler.handleErr(err)
}
r.Recorder.Event(app, event.Normal(velatypes.ReasonParsed, velatypes.MessageParsed))
// Record the revision so it can be used to render data in context.appRevision
generatedAppfile.RevisionName = appRev.Name
klog.Info("Successfully calculate appRevision", "revisionName", appRev.Name,
"revisionHash", handler.revisionHash, "isNewRevision", handler.isNewRevision)
klog.Info("Build template")
// pass appRevision to appfile, so it can be used to render data in context.appRevision
generatedAppfile.RevisionName = appRev.Name
// build template to applicationconfig & component
ac, comps, err := generatedAppfile.GenerateApplicationConfiguration()
if err != nil {
klog.InfoS("Failed to generate applicationConfiguration", "err", err)
klog.ErrorS(err, "Failed to generate applicationConfiguration", "application", klog.KObj(app))
app.Status.SetConditions(errorCondition("Built", err))
r.Recorder.Event(app, event.Warning(velatypes.ReasonFailedRender, err))
return handler.handleErr(err)
}
err = handler.handleResourceTracker(ctx, comps, ac)
if err != nil {
klog.InfoS("Failed to handle resourceTracker", "err", err)
app.Status.SetConditions(errorCondition("Handle resourceTracker", err))
r.Recorder.Event(app, event.Warning(velatypes.ReasonFailedRender, err))
return handler.handleErr(err)
}
// pass the App label and annotation to ac except some app specific ones
oamutil.PassLabelAndAnnotation(app, ac)
app.Status.SetConditions(readyCondition("Built"))
r.Recorder.Event(app, event.Normal(velatypes.ReasonRendered, velatypes.MessageRendered))
klog.Info("Apply application revision & component to the cluster")
// apply application revision & component to the cluster
klog.Info("Successfully render application resources", "application", klog.KObj(app))
// pass application's labels and annotations to ac
oamutil.PassLabelAndAnnotation(app, ac)
// apply application resources' manifests to the cluster
if err := handler.apply(ctx, appRev, ac, comps); err != nil {
klog.InfoS("Failed to apply application revision & component to the cluster", "err", err)
klog.ErrorS(err, "Failed to apply application resources' manifests",
"application", klog.KObj(app))
app.Status.SetConditions(errorCondition("Applied", err))
r.Recorder.Event(app, event.Warning(velatypes.ReasonFailedApply, err))
return handler.handleErr(err)
}
klog.Info("Successfully apply application resources' manifests", "application", klog.KObj(app))
// if inplace is false and rolloutPlan is nil, it means the user will use an outer AppRollout object to rollout the application
if handler.app.Spec.RolloutPlan != nil {
res, err := handler.handleRollout(ctx)
if err != nil {
klog.InfoS("Failed to handle rollout", "err", err)
klog.ErrorS(err, "Failed to handle rollout", "application", klog.KObj(app))
app.Status.SetConditions(errorCondition("Rollout", err))
r.Recorder.Event(app, event.Warning(velatypes.ReasonFailedRollout, err))
return handler.handleErr(err)
@@ -203,7 +195,7 @@ func (r *Reconciler) Reconcile(req ctrl.Request) (ctrl.Result, error) {
// check application health status
appCompStatus, healthy, err := handler.statusAggregate(generatedAppfile)
if err != nil {
klog.InfoS("Failed to aggregate status", "err", err)
klog.ErrorS(err, "Failed to aggregate status", "application", klog.KObj(app))
app.Status.SetConditions(errorCondition("HealthCheck", err))
r.Recorder.Event(app, event.Warning(velatypes.ReasonFailedHealthCheck, err))
return handler.handleErr(err)
@@ -220,11 +212,12 @@ func (r *Reconciler) Reconcile(req ctrl.Request) (ctrl.Result, error) {
r.Recorder.Event(app, event.Normal(velatypes.ReasonHealthCheck, velatypes.MessageHealthCheck))
app.Status.Phase = common.ApplicationRunning
err = garbageCollection(ctx, handler)
if err != nil {
klog.InfoS("Failed to run Garbage collection", "err", err)
if err := garbageCollection(ctx, handler); err != nil {
klog.ErrorS(err, "Failed to run Garbage collection")
r.Recorder.Event(app, event.Warning(velatypes.ReasonFailedGC, err))
return handler.handleErr(err)
}
klog.Info("Successfully garbage collect", "application", klog.KObj(app))
// Gather status of components
var refComps []v1alpha1.TypedReference
@@ -241,23 +234,93 @@ func (r *Reconciler) Reconcile(req ctrl.Request) (ctrl.Result, error) {
return ctrl.Result{}, r.UpdateStatus(ctx, app)
}
// if any finalizers newly registered, return true
func registerFinalizers(app *v1beta1.Application) bool {
if !meta.FinalizerExists(&app.ObjectMeta, resourceTrackerFinalizer) && app.Status.ResourceTracker != nil {
meta.AddFinalizer(&app.ObjectMeta, resourceTrackerFinalizer)
return true
// NOTE Because resource tracker is cluster-scoped resources, we cannot garbage collect them
// by setting application(namespace-scoped) as their owner.
// We delete all resource trackers related to an application through below finalizer logic.
func (r *Reconciler) handleFinalizers(ctx context.Context, app *v1beta1.Application) (bool, error) {
if app.ObjectMeta.DeletionTimestamp.IsZero() {
if !meta.FinalizerExists(app, resourceTrackerFinalizer) {
meta.AddFinalizer(app, resourceTrackerFinalizer)
klog.InfoS("Register new finalizer for application", "application", klog.KObj(app), "finalizer", resourceTrackerFinalizer)
return true, errors.Wrap(r.Client.Update(ctx, app), errUpdateApplicationFinalizer)
}
if appWillReleaseByRollout(app) {
klog.InfoS("Found an application which will be released by rollout", "application", klog.KObj(app))
if !meta.FinalizerExists(app, onlyRevisionFinalizer) {
meta.AddFinalizer(app, onlyRevisionFinalizer)
klog.InfoS("Register new finalizer for application", "application", klog.KObj(app), "finalizer", onlyRevisionFinalizer)
return true, errors.Wrap(r.Client.Update(ctx, app), errUpdateApplicationFinalizer)
}
}
} else {
if meta.FinalizerExists(app, legacyResourceTrackerFinalizer) {
// TODO(roywang) legacyResourceTrackerFinalizer will be deprecated in the future
// this is for backward compatibility
rt := &v1beta1.ResourceTracker{}
rt.SetName(fmt.Sprintf("%s-%s", app.Namespace, app.Name))
if err := r.Client.Delete(ctx, rt); err != nil && !kerrors.IsNotFound(err) {
klog.ErrorS(err, "Failed to delete legacy resource tracker", "name", rt.Name)
app.Status.SetConditions(v1alpha1.ReconcileError(errors.Wrap(err, "error to remove finalizer")))
return true, errors.Wrap(r.UpdateStatus(ctx, app), errUpdateApplicationStatus)
}
meta.RemoveFinalizer(app, legacyResourceTrackerFinalizer)
return true, errors.Wrap(r.Client.Update(ctx, app), errUpdateApplicationFinalizer)
}
if meta.FinalizerExists(app, resourceTrackerFinalizer) {
if app.Status.LatestRevision != nil && len(app.Status.LatestRevision.Name) != 0 {
latestTracker := &v1beta1.ResourceTracker{}
latestTracker.SetName(dispatch.ConstructResourceTrackerName(app.Status.LatestRevision.Name, app.Namespace))
if err := r.Client.Delete(ctx, latestTracker); err != nil && !kerrors.IsNotFound(err) {
klog.ErrorS(err, "Failed to delete latest resource tracker", "name", latestTracker.Name)
app.Status.SetConditions(v1alpha1.ReconcileError(errors.Wrap(err, "error to remove finalizer")))
return true, errors.Wrap(r.UpdateStatus(ctx, app), errUpdateApplicationStatus)
}
}
meta.RemoveFinalizer(app, resourceTrackerFinalizer)
return true, errors.Wrap(r.Client.Update(ctx, app), errUpdateApplicationFinalizer)
}
if meta.FinalizerExists(app, onlyRevisionFinalizer) {
listOpts := []client.ListOption{
client.MatchingLabels{
oam.LabelAppName: app.Name,
oam.LabelAppNamespace: app.Namespace,
}}
rtList := &v1beta1.ResourceTrackerList{}
if err := r.Client.List(ctx, rtList, listOpts...); err != nil {
klog.ErrorS(err, "Failed to list resource tracker of app", "name", app.Name)
app.Status.SetConditions(v1alpha1.ReconcileError(errors.Wrap(err, "error to remove finalizer")))
return true, errors.Wrap(r.UpdateStatus(ctx, app), errUpdateApplicationStatus)
}
for _, rt := range rtList.Items {
if err := r.Client.Delete(ctx, rt.DeepCopy()); err != nil && !kerrors.IsNotFound(err) {
klog.ErrorS(err, "Failed to delete resource tracker", "name", rt.Name)
app.Status.SetConditions(v1alpha1.ReconcileError(errors.Wrap(err, "error to remove finalizer")))
return true, errors.Wrap(r.UpdateStatus(ctx, app), errUpdateApplicationStatus)
}
}
meta.RemoveFinalizer(app, onlyRevisionFinalizer)
return true, errors.Wrap(r.Client.Update(ctx, app), errUpdateApplicationFinalizer)
}
}
return false
return false, nil
}
// appWillReleaseByRollout judge whether the application will be released by rollout.
// If it's true, application controller will only create or update application revision but not emit any other K8s
// resources into the cluster. Rollout controller will do real release works.
func appWillReleaseByRollout(app *v1beta1.Application) bool {
return len(app.GetAnnotations()[oam.AnnotationAppRollout]) != 0 || app.Spec.RolloutPlan != nil
}
// SetupWithManager install to manager
func (r *Reconciler) SetupWithManager(mgr ctrl.Manager) error {
func (r *Reconciler) SetupWithManager(mgr ctrl.Manager, compHandler *ac.ComponentHandler) error {
// If Application Own these two child objects, AC status change will notify application controller and recursively update AC again, and trigger application event again...
return ctrl.NewControllerManagedBy(mgr).
WithOptions(controller.Options{
MaxConcurrentReconciles: r.concurrentReconciles,
}).
For(&v1beta1.Application{}).
Watches(&source.Kind{Type: &v1alpha2.Component{}}, compHandler).
Complete(r)
}
@@ -285,5 +348,10 @@ func Setup(mgr ctrl.Manager, args core.Args) error {
appRevisionLimit: args.AppRevisionLimit,
concurrentReconciles: args.ConcurrentReconciles,
}
return reconciler.SetupWithManager(mgr)
compHandler := &ac.ComponentHandler{
Client: mgr.GetClient(),
RevisionLimit: args.RevisionLimit,
CustomRevisionHookURL: args.CustomRevisionHookURL,
}
return reconciler.SetupWithManager(mgr, compHandler)
}

View File

@@ -295,6 +295,7 @@ var _ = Describe("Test Application Controller", func() {
Expect(k8sClient.Create(ctx, appFailParse.DeepCopyObject())).Should(BeNil())
reconcileOnce(reconciler, reconcile.Request{NamespacedName: appFailParseKey})
reconcileOnce(reconciler, reconcile.Request{NamespacedName: appFailParseKey})
parseEvents, err := recorder.GetEventsWithName(appFailParse.Name)
Expect(err).Should(BeNil())
@@ -312,6 +313,7 @@ var _ = Describe("Test Application Controller", func() {
}
Expect(k8sClient.Create(ctx, appFailRender.DeepCopyObject())).Should(BeNil())
reconcileOnce(reconciler, reconcile.Request{NamespacedName: appFailRenderKey})
reconcileOnce(reconciler, reconcile.Request{NamespacedName: appFailRenderKey})
renderEvents, err := recorder.GetEventsWithName(appFailRender.Name)
Expect(err).Should(BeNil())
@@ -393,10 +395,6 @@ spec:
err = k8sClient.Get(ctx, appKey, &a)
Expect(err).Should(BeNil())
By("Check ApplicationContext Created")
var appContext v1alpha2.ApplicationContext
Expect(k8sClient.Get(ctx, appKey, &appContext)).Should(BeNil())
By("Check Component Created with the expected workload spec")
var component v1alpha2.Component
Expect(k8sClient.Get(ctx, client.ObjectKey{Namespace: ns, Name: componentName}, &component)).Should(BeNil())
@@ -431,16 +429,11 @@ spec:
Expect(k8sClient.Get(ctx, appKey, checkApp)).Should(BeNil())
Expect(checkApp.Status.Phase).Should(Equal(common.ApplicationRunning))
By("Check ApplicationContext Created")
appContext := &v1alpha2.ApplicationContext{}
Expect(k8sClient.Get(ctx, client.ObjectKey{
Namespace: appwithNoTrait.Namespace,
Name: appwithNoTrait.Name,
}, appContext)).Should(BeNil())
// check that the new appContext has the correct annotation and labels
Expect(appContext.GetAnnotations()[oam.AnnotationAppRollout]).Should(BeEmpty())
Expect(appContext.GetLabels()[oam.LabelAppRevisionHash]).ShouldNot(BeEmpty())
Expect(appContext.Spec.ApplicationRevisionName).ShouldNot(BeEmpty())
By("Check affiliated resource tracker is created")
expectRTName := fmt.Sprintf("%s-%s", checkApp.Status.LatestRevision.Name, checkApp.GetNamespace())
Eventually(func() error {
return k8sClient.Get(ctx, client.ObjectKey{Name: expectRTName}, &v1beta1.ResourceTracker{})
}, 10*time.Second, 500*time.Millisecond).Should(Succeed())
By("Check Component Created with the expected workload spec")
var component v1alpha2.Component
@@ -488,12 +481,11 @@ spec:
Expect(k8sClient.Get(ctx, appKey, checkApp)).Should(BeNil())
Expect(checkApp.Status.Phase).Should(Equal(common.ApplicationRunning))
By("Check ApplicationContext Created")
appContext := &v1alpha2.ApplicationContext{}
Expect(k8sClient.Get(ctx, client.ObjectKey{
Namespace: app.Namespace,
Name: app.Name,
}, appContext)).Should(BeNil())
By("Check affiliated resource tracker is created")
expectRTName := fmt.Sprintf("%s-%s", checkApp.Status.LatestRevision.Name, checkApp.GetNamespace())
Eventually(func() error {
return k8sClient.Get(ctx, client.ObjectKey{Name: expectRTName}, &v1beta1.ResourceTracker{})
}, 10*time.Second, 500*time.Millisecond).Should(Succeed())
By("Check Component Created with the expected workload spec")
component := &v1alpha2.Component{}
@@ -534,18 +526,18 @@ spec:
Expect(k8sClient.Get(ctx, appKey, curApp)).Should(BeNil())
Expect(curApp.Status.Phase).Should(Equal(common.ApplicationRunning))
By("Check ApplicationContext and trait created as expected")
appContext := &v1alpha2.ApplicationContext{}
Expect(k8sClient.Get(ctx, client.ObjectKey{
Namespace: app.Namespace,
Name: app.Name,
}, appContext)).Should(BeNil())
appRevision := &v1beta1.ApplicationRevision{}
Expect(k8sClient.Get(ctx, client.ObjectKey{
Namespace: app.Namespace,
Name: curApp.Status.LatestRevision.Name,
}, appRevision)).Should(BeNil())
By("Check affiliated resource tracker is created")
expectRTName := fmt.Sprintf("%s-%s", appRevision.GetName(), appRevision.GetNamespace())
Eventually(func() error {
return k8sClient.Get(ctx, client.ObjectKey{Name: expectRTName}, &v1beta1.ResourceTracker{})
}, 10*time.Second, 500*time.Millisecond).Should(Succeed())
gotTrait := unstructured.Unstructured{}
ac, err := util.RawExtension2AppConfig(appRevision.Spec.ApplicationConfiguration)
@@ -608,19 +600,17 @@ spec:
Expect(k8sClient.Get(ctx, appKey, curApp)).Should(BeNil())
Expect(curApp.Status.Phase).Should(Equal(common.ApplicationRunning))
By("Check AppConfig and trait created as expected")
appContext := &v1alpha2.ApplicationContext{}
Expect(k8sClient.Get(ctx, client.ObjectKey{
Namespace: app.Namespace,
Name: app.Name,
}, appContext)).Should(BeNil())
appRevision := &v1beta1.ApplicationRevision{}
Expect(k8sClient.Get(ctx, client.ObjectKey{
Namespace: app.Namespace,
Name: curApp.Status.LatestRevision.Name,
}, appRevision)).Should(BeNil())
Expect(appContext.Spec.ApplicationRevisionName).Should(Equal(appRevision.Name))
By("Check affiliated resource tracker is created")
expectRTName := fmt.Sprintf("%s-%s", appRevision.GetName(), appRevision.GetNamespace())
Eventually(func() error {
return k8sClient.Get(ctx, client.ObjectKey{Name: expectRTName}, &v1beta1.ResourceTracker{})
}, 10*time.Second, 500*time.Millisecond).Should(Succeed())
ac, err := util.RawExtension2AppConfig(appRevision.Spec.ApplicationConfiguration)
Expect(err).Should(BeNil())
@@ -717,19 +707,17 @@ spec:
Expect(scopes[0].Kind).Should(BeEquivalentTo("HealthScope"))
Expect(scopes[0].Name).Should(BeEquivalentTo("appWithTraitAndScope-default-health"))
By("Check AppConfig and trait created as expected")
appContext := &v1alpha2.ApplicationContext{}
Expect(k8sClient.Get(ctx, client.ObjectKey{
Namespace: app.Namespace,
Name: app.Name,
}, appContext)).Should(BeNil())
appRevision := &v1beta1.ApplicationRevision{}
Expect(k8sClient.Get(ctx, client.ObjectKey{
Namespace: app.Namespace,
Name: curApp.Status.LatestRevision.Name,
}, appRevision)).Should(BeNil())
Expect(appContext.Spec.ApplicationRevisionName).Should(Equal(appRevision.Name))
Expect(appContext.GetAnnotations()[oam.AnnotationInplaceUpgrade]).Should(Equal("true"))
By("Check affiliated resource tracker is created")
expectRTName := fmt.Sprintf("%s-%s", appRevision.GetName(), appRevision.GetNamespace())
Eventually(func() error {
return k8sClient.Get(ctx, client.ObjectKey{Name: expectRTName}, &v1beta1.ResourceTracker{})
}, 10*time.Second, 500*time.Millisecond).Should(Succeed())
gotTrait := unstructured.Unstructured{}
ac, err := util.RawExtension2AppConfig(appRevision.Spec.ApplicationConfiguration)
@@ -788,19 +776,17 @@ spec:
Expect(k8sClient.Get(ctx, appKey, curApp)).Should(BeNil())
Expect(curApp.Status.Phase).Should(Equal(common.ApplicationRunning))
By("Check AppConfig and trait created as expected")
appContext := &v1alpha2.ApplicationContext{}
Expect(k8sClient.Get(ctx, client.ObjectKey{
Namespace: app.Namespace,
Name: app.Name,
}, appContext)).Should(BeNil())
appRevision := &v1beta1.ApplicationRevision{}
Expect(k8sClient.Get(ctx, client.ObjectKey{
Namespace: app.Namespace,
Name: curApp.Status.LatestRevision.Name,
}, appRevision)).Should(BeNil())
Expect(appContext.Spec.ApplicationRevisionName).Should(Equal(appRevision.Name))
Expect(appContext.GetAnnotations()[oam.AnnotationInplaceUpgrade]).Should(Equal("true"))
By("Check affiliated resource tracker is created")
expectRTName := fmt.Sprintf("%s-%s", appRevision.GetName(), appRevision.GetNamespace())
Eventually(func() error {
return k8sClient.Get(ctx, client.ObjectKey{Name: expectRTName}, &v1beta1.ResourceTracker{})
}, 10*time.Second, 500*time.Millisecond).Should(Succeed())
gotTrait := unstructured.Unstructured{}
ac, err := util.RawExtension2AppConfig(appRevision.Spec.ApplicationConfiguration)
@@ -866,21 +852,19 @@ spec:
Expect(k8sClient.Get(ctx, appKey, curApp)).Should(BeNil())
Expect(curApp.Status.Phase).Should(Equal(common.ApplicationRunning))
By("check AC and Component updated")
Expect(k8sClient.Get(ctx, client.ObjectKey{
Namespace: app.Namespace,
Name: app.Name,
}, appContext)).Should(BeNil())
Expect(k8sClient.Get(ctx, client.ObjectKey{
Namespace: app.Namespace,
Name: curApp.Status.LatestRevision.Name,
}, appRevision)).Should(BeNil())
Expect(appContext.Spec.ApplicationRevisionName).Should(Equal(appRevision.Name))
Expect(appContext.GetAnnotations()[oam.AnnotationInplaceUpgrade]).Should(Equal("true"))
Expect(json.Unmarshal(ac.Spec.Components[0].Traits[0].Trait.Raw, &gotTrait)).Should(BeNil())
Expect(gotTrait).Should(BeEquivalentTo(expectScalerTrait("myweb5", app.Name)))
By("Check affiliated resource tracker is upgraded")
expectRTName = fmt.Sprintf("%s-%s", appRevision.GetName(), appRevision.GetNamespace())
Eventually(func() error {
return k8sClient.Get(ctx, client.ObjectKey{Name: expectRTName}, &v1beta1.ResourceTracker{})
}, 10*time.Second, 500*time.Millisecond).Should(Succeed())
Expect(ac.Spec.Components[0].Scopes[0].ScopeReference).Should(BeEquivalentTo(v1alpha1.TypedReference{
APIVersion: "core.oam.dev/v1alpha2",
Kind: "HealthScope",
@@ -952,18 +936,11 @@ spec:
Expect(k8sClient.Get(ctx, appKey, curApp)).Should(BeNil())
Expect(curApp.Status.Phase).Should(Equal(common.ApplicationRunning))
By("Check AppConfig and trait created as expected")
appContext := &v1alpha2.ApplicationContext{}
Expect(k8sClient.Get(ctx, client.ObjectKey{
Namespace: app.Namespace,
Name: app.Name,
}, appContext)).Should(BeNil())
appRevision := &v1beta1.ApplicationRevision{}
Expect(k8sClient.Get(ctx, client.ObjectKey{
Namespace: app.Namespace,
Name: curApp.Status.LatestRevision.Name,
}, appRevision)).Should(BeNil())
Expect(appContext.Spec.ApplicationRevisionName).Should(Equal(appRevision.Name))
gotTrait := unstructured.Unstructured{}
ac, err := util.RawExtension2AppConfig(appRevision.Spec.ApplicationConfiguration)
@@ -1058,12 +1035,12 @@ spec:
By("Check App running successfully")
checkApp := &v1beta1.Application{}
Eventually(func() string {
_, err := reconciler.Reconcile(reconcile.Request{NamespacedName: appKey})
if err != nil {
return err.Error()
}
checkApp := &v1beta1.Application{}
err = k8sClient.Get(ctx, appKey, checkApp)
if err != nil {
return err.Error()
@@ -1074,6 +1051,12 @@ spec:
return string(checkApp.Status.Phase)
}(), 5*time.Second, time.Second).Should(BeEquivalentTo(common.ApplicationRunning))
By("Check affiliated resource tracker is created")
expectRTName := fmt.Sprintf("%s-%s", checkApp.Status.LatestRevision.Name, checkApp.GetNamespace())
Eventually(func() error {
return k8sClient.Get(ctx, client.ObjectKey{Name: expectRTName}, &v1beta1.ResourceTracker{})
}, 10*time.Second, 500*time.Millisecond).Should(Succeed())
Expect(k8sClient.Delete(ctx, app)).Should(BeNil())
})
@@ -1109,15 +1092,11 @@ spec:
Name: utils.ConstructRevisionName(rolloutApp.Name, 1),
}, appRevision)).Should(BeNil())
By("Check ApplicationContext not created")
appContext := &v1alpha2.ApplicationContext{}
// no appContext same name as app exist
Expect(k8sClient.Get(ctx, appKey, appContext)).ShouldNot(Succeed())
// no appContext same name as apprevision exist
Expect(k8sClient.Get(ctx, client.ObjectKey{
Namespace: rolloutApp.Namespace,
Name: utils.ConstructRevisionName(rolloutApp.Name, 1),
}, appContext)).ShouldNot(Succeed())
By("Check affiliated resource tracker is not created")
expectRTName := fmt.Sprintf("%s-%s", appRevision.GetName(), appRevision.GetNamespace())
Eventually(func() error {
return k8sClient.Get(ctx, client.ObjectKey{Name: expectRTName}, &v1beta1.ResourceTracker{})
}, 10*time.Second, 500*time.Millisecond).ShouldNot(Succeed())
By("Check Component Created with the expected workload spec")
var component v1alpha2.Component
@@ -1135,20 +1114,19 @@ spec:
Expect(ac.Spec.Components[0].ComponentName).Should(BeEmpty())
Expect(ac.Spec.Components[0].RevisionName).Should(Equal(component.Status.LatestRevision.Name))
By("Reconcile again to make sure we are not creating more appConfigs")
By("Reconcile again to make sure we are not creating more resource trackers")
reconcileRetry(reconciler, reconcile.Request{NamespacedName: appKey})
By("Verify that no new AppRevision created")
Expect(k8sClient.Get(ctx, client.ObjectKey{
Namespace: rolloutApp.Namespace,
Name: utils.ConstructRevisionName(rolloutApp.Name, 2),
}, appRevision)).ShouldNot(Succeed())
// no appContext same name as app exist
Expect(k8sClient.Get(ctx, appKey, appContext)).ShouldNot(Succeed())
// no appContext same name as apprevision exist
Expect(k8sClient.Get(ctx, client.ObjectKey{
Namespace: rolloutApp.Namespace,
Name: utils.ConstructRevisionName(rolloutApp.Name, 1),
}, appContext)).ShouldNot(Succeed())
By("Check no new affiliated resource tracker is created")
expectRTName = fmt.Sprintf("%s-%s", utils.ConstructRevisionName(rolloutApp.Name, 2), rolloutApp.GetNamespace())
Eventually(func() error {
return k8sClient.Get(ctx, client.ObjectKey{Name: expectRTName}, &v1beta1.ResourceTracker{})
}, 10*time.Second, 500*time.Millisecond).ShouldNot(Succeed())
By("Check no new Component created")
Expect(k8sClient.Get(ctx, client.ObjectKey{
@@ -1159,26 +1137,26 @@ spec:
Expect(component.Status.LatestRevision.Revision).ShouldNot(BeNil())
Expect(component.Status.LatestRevision.Revision).Should(BeEquivalentTo(1))
By("Remove rollout annotation should lead to new appContext created")
By("Remove rollout annotation should lead to new resource tracker created")
Expect(k8sClient.Get(ctx, appKey, rolloutApp)).Should(Succeed())
rolloutApp.SetAnnotations(map[string]string{
"keep": "true",
})
Expect(k8sClient.Update(ctx, rolloutApp)).Should(BeNil())
reconcileRetry(reconciler, reconcile.Request{NamespacedName: appKey})
// app should create an appContext
Expect(k8sClient.Get(ctx, appKey, appContext)).Should(Succeed())
Expect(appContext.Spec.ApplicationRevisionName).Should(Equal(utils.ConstructRevisionName(rolloutApp.Name, 1)))
By("Verify that no new AppRevision created")
Expect(k8sClient.Get(ctx, client.ObjectKey{
Namespace: rolloutApp.Namespace,
Name: utils.ConstructRevisionName(rolloutApp.Name, 2),
}, appRevision)).ShouldNot(Succeed())
// no appContext same name as apprevision exist
Expect(k8sClient.Get(ctx, client.ObjectKey{
Namespace: rolloutApp.Namespace,
Name: utils.ConstructRevisionName(rolloutApp.Name, 1),
}, appContext)).ShouldNot(Succeed())
By("Check no new affiliated resource tracker is created")
expectRTName = fmt.Sprintf("%s-%s", utils.ConstructRevisionName(rolloutApp.Name, 2), rolloutApp.GetNamespace())
Eventually(func() error {
return k8sClient.Get(ctx, client.ObjectKey{Name: expectRTName}, &v1beta1.ResourceTracker{})
}, 10*time.Second, 500*time.Millisecond).ShouldNot(Succeed())
By("Delete Application, clean the resource")
Expect(k8sClient.Delete(ctx, rolloutApp)).Should(BeNil())
})
@@ -1210,7 +1188,7 @@ spec:
app := appWithTraitHealthStatus.DeepCopy()
app.Spec.Components[0].Name = compName
app.Spec.Components[0].Type = "nworker"
app.Spec.Components[0].Properties = runtime.RawExtension{Raw: []byte(`{"cmd":["sleep","1000"],"image":"busybox3","lives":"3","enemies":"alain"}`)}
app.Spec.Components[0].Properties = runtime.RawExtension{Raw: []byte(`{"cmd":["sleep","1000"],"image":"busybox3","lives":"3","enemies":"alien"}`)}
app.Spec.Components[0].Traits[0].Type = "ingress"
app.Spec.Components[0].Traits[0].Properties = runtime.RawExtension{Raw: []byte(`{"domain":"example.com","http":{"/":80}}`)}
@@ -1385,12 +1363,11 @@ spec:
Name: curApp.Status.LatestRevision.Name,
}, appRevision)).Should(BeNil())
By("Check ApplicationContext created")
appContext := &v1alpha2.ApplicationContext{}
Expect(k8sClient.Get(ctx, client.ObjectKey{
Namespace: curApp.Namespace,
Name: curApp.Name,
}, appContext)).Should(BeNil())
By("Check affiliated resource tracker is created")
expectRTName := fmt.Sprintf("%s-%s", appRevision.GetName(), appRevision.GetNamespace())
Eventually(func() error {
return k8sClient.Get(ctx, client.ObjectKey{Name: expectRTName}, &v1beta1.ResourceTracker{})
}, 10*time.Second, 500*time.Millisecond).Should(Succeed())
})
It("app with two components and one component refer to an existing WorkloadDefinition", func() {
@@ -1434,13 +1411,6 @@ spec:
Namespace: curApp.Namespace,
Name: curApp.Status.LatestRevision.Name,
}, appRevision)).Should(BeNil())
By("Check ApplicationContext created")
appContext := &v1alpha2.ApplicationContext{}
Expect(k8sClient.Get(ctx, client.ObjectKey{
Namespace: curApp.Namespace,
Name: curApp.Name,
}, appContext)).Should(BeNil())
})
It("app-import-pkg will create workload by imported kube package", func() {
@@ -1511,15 +1481,11 @@ spec:
Backend: v1beta12.IngressBackend{ServiceName: "myweb", ServicePort: intstr.FromInt(80)}}}}}}},
}})).Should(BeEquivalentTo(""))
By("Check ApplicationContext created")
appContext := &v1alpha2.ApplicationContext{}
Expect(k8sClient.Get(ctx, client.ObjectKey{
Namespace: curApp.Namespace,
Name: curApp.Name,
}, appContext)).Should(BeNil())
// check that the new appContext has the correct annotation and labels
Expect(appContext.GetAnnotations()[oam.AnnotationAppRollout]).Should(BeEmpty())
Expect(appContext.GetLabels()[oam.LabelAppRevisionHash]).ShouldNot(BeEmpty())
By("Check affiliated resource tracker is created")
expectRTName := fmt.Sprintf("%s-%s", appRevision.GetName(), appRevision.GetNamespace())
Eventually(func() error {
return k8sClient.Get(ctx, client.ObjectKey{Name: expectRTName}, &v1beta1.ResourceTracker{})
}, 10*time.Second, 500*time.Millisecond).Should(Succeed())
By("Check Component Created with the expected workload spec")
var component v1alpha2.Component
@@ -1597,6 +1563,28 @@ spec:
})
func reconcileRetry(r reconcile.Reconciler, req reconcile.Request) {
// 1st and 2nd time reconcile to add finalizer
Eventually(func() error {
result, err := r.Reconcile(req)
if err != nil {
By(fmt.Sprintf("reconcile err: %+v ", err))
} else if result.Requeue || result.RequeueAfter > 0 {
By("reconcile timeout as it still needs to requeue")
return fmt.Errorf("reconcile timeout as it still needs to requeue")
}
return err
}, 3*time.Second, time.Second).Should(BeNil())
Eventually(func() error {
result, err := r.Reconcile(req)
if err != nil {
By(fmt.Sprintf("reconcile err: %+v ", err))
} else if result.Requeue || result.RequeueAfter > 0 {
By("reconcile timeout as it still needs to requeue")
return fmt.Errorf("reconcile timeout as it still needs to requeue")
}
return err
}, 3*time.Second, time.Second).Should(BeNil())
// 3rd time reconcile to process main logic of app controller
Eventually(func() error {
result, err := r.Reconcile(req)
if err != nil {
@@ -1607,7 +1595,7 @@ func reconcileRetry(r reconcile.Reconciler, req reconcile.Request) {
return fmt.Errorf("reconcile timeout as it still needs to requeue")
}
return err
}, 30*time.Second, time.Second).Should(BeNil())
}, 5*time.Second, time.Second).Should(BeNil())
}
func reconcileOnce(r reconcile.Reconciler, req reconcile.Request) {

View File

@@ -20,30 +20,29 @@ import (
"context"
"encoding/json"
"fmt"
"time"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
runtimev1alpha1 "github.com/crossplane/crossplane-runtime/apis/core/v1alpha1"
"github.com/crossplane/crossplane-runtime/pkg/meta"
"github.com/ghodss/yaml"
"github.com/oam-dev/kubevela/apis/core.oam.dev/common"
"github.com/oam-dev/kubevela/apis/core.oam.dev/v1alpha2"
"github.com/oam-dev/kubevela/apis/core.oam.dev/v1beta1"
"github.com/oam-dev/kubevela/pkg/oam/util"
appsv1 "k8s.io/api/apps/v1"
v1 "k8s.io/api/core/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/types"
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/client"
)
var _ = Describe("Test application controller finalizer logic", func() {
ctx := context.TODO()
namespace := "cross-ns-namespace"
namespace := "cross-namespace"
cd := &v1beta1.ComponentDefinition{}
cDDefJson, _ := yaml.YAMLToJSON([]byte(crossCompDefYaml))
@@ -74,6 +73,8 @@ var _ = Describe("Test application controller finalizer logic", func() {
AfterEach(func() {
By("[TEST] Clean up resources after an integration test")
Expect(k8sClient.DeleteAllOf(ctx, &appsv1.Deployment{}, client.InNamespace(namespace)))
Expect(k8sClient.DeleteAllOf(ctx, &v1alpha2.ManualScalerTrait{}, client.InNamespace(namespace)))
})
It("Test component have normal workload", func() {
@@ -84,14 +85,13 @@ var _ = Describe("Test application controller finalizer logic", func() {
By("Create a normal workload app")
checkApp := &v1beta1.Application{}
_, err := reconciler.Reconcile(ctrl.Request{NamespacedName: appKey})
Expect(err).Should(BeNil())
reconcileRetry(reconciler, ctrl.Request{NamespacedName: appKey})
Expect(k8sClient.Get(ctx, appKey, checkApp)).Should(BeNil())
Expect(checkApp.Status.Phase).Should(Equal(common.ApplicationRunning))
Expect(len(checkApp.Finalizers)).Should(BeEquivalentTo(0))
Expect(len(checkApp.Finalizers)).Should(BeEquivalentTo(1))
rt := &v1beta1.ResourceTracker{}
Expect(k8sClient.Get(ctx, getTrackerKey(checkApp.Namespace, checkApp.Name), rt)).Should(util.NotFoundMatcher{})
Expect(k8sClient.Get(ctx, getTrackerKey(checkApp.Namespace, checkApp.Name, "v1"), rt)).Should(Succeed())
By("add a cross namespace trait for application")
updateApp := checkApp.DeepCopy()
@@ -102,21 +102,16 @@ var _ = Describe("Test application controller finalizer logic", func() {
},
}
Expect(k8sClient.Update(ctx, updateApp)).Should(BeNil())
// first reconcile will create resourceTracker and set resourceTracker for app status
_, err = reconciler.Reconcile(ctrl.Request{NamespacedName: appKey})
Expect(err).Should(BeNil())
reconcileRetry(reconciler, ctrl.Request{NamespacedName: appKey})
checkApp = new(v1beta1.Application)
Expect(k8sClient.Get(ctx, appKey, checkApp)).Should(BeNil())
Expect(k8sClient.Get(ctx, getTrackerKey(checkApp.Namespace, checkApp.Name), rt)).Should(BeNil())
Expect(checkApp.Status.ResourceTracker.UID).Should(BeEquivalentTo(rt.UID))
Expect(len(checkApp.Finalizers)).Should(BeEquivalentTo(0))
Expect(k8sClient.Get(ctx, getTrackerKey(checkApp.Namespace, checkApp.Name, "v2"), rt)).Should(BeNil())
Expect(len(checkApp.Finalizers)).Should(BeEquivalentTo(1))
// second reconcile will set finalizer for app
_, err = reconciler.Reconcile(ctrl.Request{NamespacedName: appKey})
reconcileRetry(reconciler, ctrl.Request{NamespacedName: appKey})
checkApp = new(v1beta1.Application)
Expect(k8sClient.Get(ctx, appKey, checkApp)).Should(BeNil())
Expect(k8sClient.Get(ctx, getTrackerKey(checkApp.Namespace, checkApp.Name), rt)).Should(BeNil())
Expect(err).Should(BeNil())
Expect(k8sClient.Get(ctx, getTrackerKey(checkApp.Namespace, checkApp.Name, "v2"), rt)).Should(BeNil())
Expect(len(checkApp.Finalizers)).Should(BeEquivalentTo(1))
Expect(checkApp.Finalizers[0]).Should(BeEquivalentTo(resourceTrackerFinalizer))
@@ -126,11 +121,10 @@ var _ = Describe("Test application controller finalizer logic", func() {
updateApp = checkApp.DeepCopy()
updateApp.Spec.Components[0].Traits = nil
Expect(k8sClient.Update(ctx, updateApp)).Should(BeNil())
_, err = reconciler.Reconcile(ctrl.Request{NamespacedName: appKey})
Expect(err).Should(BeNil())
reconcileRetry(reconciler, ctrl.Request{NamespacedName: appKey})
checkApp = new(v1beta1.Application)
Expect(k8sClient.Get(ctx, appKey, checkApp)).Should(BeNil())
Expect(k8sClient.Get(ctx, getTrackerKey(checkApp.Namespace, checkApp.Name), rt)).Should(util.NotFoundMatcher{})
Expect(k8sClient.Get(ctx, getTrackerKey(checkApp.Namespace, checkApp.Name, "v3"), rt)).Should(Succeed())
Expect(checkApp.Status.ResourceTracker).Should(BeNil())
})
@@ -141,16 +135,14 @@ var _ = Describe("Test application controller finalizer logic", func() {
Expect(k8sClient.Create(ctx, app)).Should(BeNil())
By("Create a cross workload app")
_, err := reconciler.Reconcile(ctrl.Request{NamespacedName: appKey})
Expect(err).Should(BeNil())
reconcileRetry(reconciler, ctrl.Request{NamespacedName: appKey})
checkApp := &v1beta1.Application{}
Expect(k8sClient.Get(ctx, appKey, checkApp)).Should(BeNil())
Expect(checkApp.Status.Phase).Should(Equal(common.ApplicationRunning))
Expect(len(checkApp.Finalizers)).Should(BeEquivalentTo(0))
Expect(len(checkApp.Finalizers)).Should(BeEquivalentTo(1))
rt := &v1beta1.ResourceTracker{}
Expect(k8sClient.Get(ctx, getTrackerKey(checkApp.Namespace, checkApp.Name), rt)).Should(BeNil())
_, err = reconciler.Reconcile(ctrl.Request{NamespacedName: appKey})
Expect(err).Should(BeNil())
Expect(k8sClient.Get(ctx, getTrackerKey(checkApp.Namespace, checkApp.Name, "v1"), rt)).Should(BeNil())
reconcileRetry(reconciler, ctrl.Request{NamespacedName: appKey})
checkApp = new(v1beta1.Application)
Expect(k8sClient.Get(ctx, appKey, checkApp)).Should(BeNil())
Expect(len(checkApp.Finalizers)).Should(BeEquivalentTo(1))
@@ -159,12 +151,11 @@ var _ = Describe("Test application controller finalizer logic", func() {
Expect(k8sClient.Delete(ctx, checkApp)).Should(BeNil())
By("delete app will delete resourceTracker")
// reconcile will delete resourceTracker and unset app's finalizer
_, err = reconciler.Reconcile(ctrl.Request{NamespacedName: appKey})
Expect(err).Should(BeNil())
reconcileRetry(reconciler, ctrl.Request{NamespacedName: appKey})
checkApp = new(v1beta1.Application)
Expect(k8sClient.Get(ctx, appKey, checkApp)).Should(util.NotFoundMatcher{})
checkRt := new(v1beta1.ResourceTracker)
Expect(k8sClient.Get(ctx, getTrackerKey(checkApp.Namespace, checkApp.Name), checkRt)).Should(util.NotFoundMatcher{})
Expect(k8sClient.Get(ctx, getTrackerKey(checkApp.Namespace, checkApp.Name, "v1"), checkRt)).Should(util.NotFoundMatcher{})
})
It("Test cross namespace workload, then update the app to change the namespace", func() {
@@ -174,34 +165,29 @@ var _ = Describe("Test application controller finalizer logic", func() {
Expect(k8sClient.Create(ctx, app)).Should(BeNil())
By("Create a cross workload app")
_, err := reconciler.Reconcile(ctrl.Request{NamespacedName: appKey})
Expect(err).Should(BeNil())
reconcileRetry(reconciler, ctrl.Request{NamespacedName: appKey})
checkApp := &v1beta1.Application{}
Expect(k8sClient.Get(ctx, appKey, checkApp)).Should(BeNil())
Expect(checkApp.Status.Phase).Should(Equal(common.ApplicationRunning))
Expect(len(checkApp.Finalizers)).Should(BeEquivalentTo(0))
Expect(len(checkApp.Finalizers)).Should(BeEquivalentTo(1))
rt := &v1beta1.ResourceTracker{}
Expect(k8sClient.Get(ctx, getTrackerKey(checkApp.Namespace, checkApp.Name), rt)).Should(BeNil())
_, err = reconciler.Reconcile(ctrl.Request{NamespacedName: appKey})
Expect(err).Should(BeNil())
Expect(k8sClient.Get(ctx, getTrackerKey(checkApp.Namespace, checkApp.Name, "v1"), rt)).Should(BeNil())
reconcileRetry(reconciler, ctrl.Request{NamespacedName: appKey})
checkApp = new(v1beta1.Application)
Expect(k8sClient.Get(ctx, appKey, checkApp)).Should(BeNil())
Expect(len(checkApp.Finalizers)).Should(BeEquivalentTo(1))
Expect(checkApp.Finalizers[0]).Should(BeEquivalentTo(resourceTrackerFinalizer))
Expect(checkApp.Status.ResourceTracker.UID).Should(BeEquivalentTo(rt.UID))
Expect(len(rt.Status.TrackedResources)).Should(BeEquivalentTo(1))
By("Update the app, set type to normal-worker")
checkApp.Spec.Components[0].Type = "normal-worker"
Expect(k8sClient.Update(ctx, checkApp)).Should(BeNil())
_, err = reconciler.Reconcile(ctrl.Request{NamespacedName: appKey})
Expect(err).Should(BeNil())
reconcileRetry(reconciler, ctrl.Request{NamespacedName: appKey})
checkApp = new(v1beta1.Application)
Expect(k8sClient.Get(ctx, appKey, checkApp)).Should(BeNil())
Expect(checkApp.Status.ResourceTracker).Should(BeNil())
Expect(k8sClient.Get(ctx, getTrackerKey(checkApp.Namespace, checkApp.Name), rt)).Should(util.NotFoundMatcher{})
Expect(k8sClient.Get(ctx, getTrackerKey(checkApp.Namespace, checkApp.Name, "v2"), rt)).Should(Succeed())
Expect(k8sClient.Delete(ctx, checkApp)).Should(BeNil())
_, err = reconciler.Reconcile(ctrl.Request{NamespacedName: appKey})
Expect(err).Should(BeNil())
reconcileRetry(reconciler, ctrl.Request{NamespacedName: appKey})
})
It("Test cross namespace workload and trait, then update the app to delete trait ", func() {
@@ -216,99 +202,31 @@ var _ = Describe("Test application controller finalizer logic", func() {
}
Expect(k8sClient.Create(ctx, app)).Should(BeNil())
By("Create a cross workload trait app")
_, err := reconciler.Reconcile(ctrl.Request{NamespacedName: appKey})
Expect(err).Should(BeNil())
reconcileRetry(reconciler, ctrl.Request{NamespacedName: appKey})
checkApp := &v1beta1.Application{}
Expect(k8sClient.Get(ctx, appKey, checkApp)).Should(BeNil())
Expect(checkApp.Status.Phase).Should(Equal(common.ApplicationRunning))
Expect(len(checkApp.Finalizers)).Should(BeEquivalentTo(0))
Expect(len(checkApp.Finalizers)).Should(BeEquivalentTo(1))
rt := &v1beta1.ResourceTracker{}
Expect(k8sClient.Get(ctx, getTrackerKey(checkApp.Namespace, checkApp.Name), rt)).Should(BeNil())
_, err = reconciler.Reconcile(ctrl.Request{NamespacedName: appKey})
Expect(err).Should(BeNil())
Expect(k8sClient.Get(ctx, getTrackerKey(checkApp.Namespace, checkApp.Name, "v1"), rt)).Should(BeNil())
reconcileRetry(reconciler, ctrl.Request{NamespacedName: appKey})
checkApp = new(v1beta1.Application)
Expect(k8sClient.Get(ctx, appKey, checkApp)).Should(BeNil())
Expect(len(checkApp.Finalizers)).Should(BeEquivalentTo(1))
Expect(checkApp.Finalizers[0]).Should(BeEquivalentTo(resourceTrackerFinalizer))
Expect(checkApp.Status.ResourceTracker.UID).Should(BeEquivalentTo(rt.UID))
Expect(len(rt.Status.TrackedResources)).Should(BeEquivalentTo(2))
By("Update the app, set type to normal-worker")
checkApp.Spec.Components[0].Traits = nil
Expect(k8sClient.Update(ctx, checkApp)).Should(BeNil())
_, err = reconciler.Reconcile(ctrl.Request{NamespacedName: appKey})
Expect(err).Should(BeNil())
reconcileRetry(reconciler, ctrl.Request{NamespacedName: appKey})
rt = &v1beta1.ResourceTracker{}
checkApp = new(v1beta1.Application)
Expect(k8sClient.Get(ctx, appKey, checkApp)).Should(BeNil())
Expect(k8sClient.Get(ctx, getTrackerKey(checkApp.Namespace, checkApp.Name), rt)).Should(BeNil())
Expect(checkApp.Status.ResourceTracker.UID).Should(BeEquivalentTo(rt.UID))
Expect(k8sClient.Get(ctx, getTrackerKey(checkApp.Namespace, checkApp.Name, "v2"), rt)).Should(BeNil())
Expect(len(rt.Status.TrackedResources)).Should(BeEquivalentTo(1))
Expect(k8sClient.Delete(ctx, checkApp)).Should(BeNil())
_, err = reconciler.Reconcile(ctrl.Request{NamespacedName: appKey})
Expect(err).Should(BeNil())
Expect(k8sClient.Get(ctx, getTrackerKey(checkApp.Namespace, checkApp.Name), rt)).Should(util.NotFoundMatcher{})
})
})
var _ = Describe("Test finalizer related func", func() {
ctx := context.TODO()
namespace := "cross-ns-namespace"
var handler appHandler
BeforeEach(func() {
ns := v1.Namespace{
ObjectMeta: metav1.ObjectMeta{
Name: namespace,
},
}
Expect(k8sClient.Create(ctx, &ns)).Should(SatisfyAny(BeNil(), &util.AlreadyExistMatcher{}))
})
AfterEach(func() {
By("[TEST] Clean up resources after an integration test")
})
It("Test finalizeResourceTracker func with need update ", func() {
app := getApp("app-3", namespace, "worker")
rt := &v1beta1.ResourceTracker{
ObjectMeta: metav1.ObjectMeta{
Name: namespace + "-" + app.GetName(),
},
}
Expect(k8sClient.Create(ctx, rt)).Should(BeNil())
app.Status.ResourceTracker = &runtimev1alpha1.TypedReference{
Name: rt.Name,
Kind: v1beta1.ResourceTrackerGroupKind,
APIVersion: v1beta1.ResourceTrackerKindAPIVersion,
UID: rt.UID}
meta.AddFinalizer(&app.ObjectMeta, resourceTrackerFinalizer)
handler = appHandler{
r: reconciler,
app: app,
}
need, err := handler.removeResourceTracker(ctx)
Expect(err).Should(BeNil())
Expect(need).Should(BeEquivalentTo(true))
Eventually(func() error {
err := k8sClient.Get(ctx, getTrackerKey(namespace, app.Name), rt)
if err == nil || !apierrors.IsNotFound(err) {
return fmt.Errorf("resourceTracker still exsit")
}
return nil
}, time.Second*60, time.Microsecond*300).Should(BeNil())
Expect(app.Status.ResourceTracker).Should(BeNil())
Expect(meta.FinalizerExists(app, resourceTrackerFinalizer)).Should(BeEquivalentTo(false))
})
It("Test finalizeResourceTracker func without need ", func() {
app := getApp("app-4", namespace, "worker")
handler = appHandler{
r: reconciler,
app: app,
}
need, err := handler.removeResourceTracker(ctx)
Expect(err).Should(BeNil())
Expect(need).Should(BeEquivalentTo(false))
reconcileRetry(reconciler, ctrl.Request{NamespacedName: appKey})
Expect(k8sClient.Get(ctx, getTrackerKey(checkApp.Namespace, checkApp.Name, "v2"), rt)).Should(util.NotFoundMatcher{})
})
})
@@ -334,8 +252,8 @@ func getApp(appName, namespace, comptype string) *v1beta1.Application {
}
}
func getTrackerKey(namespace, name string) types.NamespacedName {
return types.NamespacedName{Name: fmt.Sprintf("%s-%s", namespace, name)}
func getTrackerKey(namespace, name, revision string) types.NamespacedName {
return types.NamespacedName{Name: fmt.Sprintf("%s-%s-%s", name, revision, namespace)}
}
const (

View File

@@ -19,20 +19,15 @@ package application
import (
"context"
"fmt"
"strconv"
"strings"
"time"
runtimev1alpha1 "github.com/crossplane/crossplane-runtime/apis/core/v1alpha1"
"github.com/crossplane/crossplane-runtime/pkg/meta"
terraformtypes "github.com/oam-dev/terraform-controller/api/types"
terraformapi "github.com/oam-dev/terraform-controller/api/v1beta1"
"github.com/pkg/errors"
corev1 "k8s.io/api/core/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/apimachinery/pkg/runtime"
ctypes "k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/klog/v2"
@@ -46,12 +41,12 @@ import (
"github.com/oam-dev/kubevela/apis/core.oam.dev/v1beta1"
"github.com/oam-dev/kubevela/apis/types"
"github.com/oam-dev/kubevela/pkg/appfile"
"github.com/oam-dev/kubevela/pkg/controller/core.oam.dev/v1alpha2/applicationconfiguration"
"github.com/oam-dev/kubevela/pkg/controller/core.oam.dev/v1alpha2/application/assemble"
"github.com/oam-dev/kubevela/pkg/controller/core.oam.dev/v1alpha2/application/dispatch"
"github.com/oam-dev/kubevela/pkg/controller/core.oam.dev/v1alpha2/applicationrollout"
"github.com/oam-dev/kubevela/pkg/controller/utils"
"github.com/oam-dev/kubevela/pkg/cue/process"
"github.com/oam-dev/kubevela/pkg/oam"
"github.com/oam-dev/kubevela/pkg/oam/discoverymapper"
oamutil "github.com/oam-dev/kubevela/pkg/oam/util"
)
@@ -75,20 +70,13 @@ func readyCondition(tpy string) runtimev1alpha1.Condition {
}
type appHandler struct {
r *Reconciler
app *v1beta1.Application
appfile *appfile.Appfile
inplace bool
isNewRevision bool
revisionHash string
acrossNamespaceResources []v1beta1.TypedReference
resourceTracker *v1beta1.ResourceTracker
autodetect bool
}
// setInplace will mark if the application should upgrade the workload within the same instance(name never changed)
func (h *appHandler) setInplace(isInplace bool) {
h.inplace = isInplace
r *Reconciler
app *v1beta1.Application
appfile *appfile.Appfile
previousRevisionName string
isNewRevision bool
revisionHash string
autodetect bool
}
func (h *appHandler) handleErr(err error) (ctrl.Result, error) {
@@ -104,12 +92,13 @@ func (h *appHandler) handleErr(err error) (ctrl.Result, error) {
}, nil
}
// apply will
// 1. set ownerReference for ApplicationConfiguration and Components
// 2. update AC's components using the component revision name
// 3. update or create the AC with new revision and remember it in the application status
// 4. garbage collect unused components
func (h *appHandler) apply(ctx context.Context, appRev *v1beta1.ApplicationRevision, ac *v1alpha2.ApplicationConfiguration, comps []*v1alpha2.Component) error {
// don't create components if revision-only annotation is set
if ac.Annotations[oam.AnnotationAppRevisionOnly] == "true" {
h.FinalizeAppRevision(appRev, ac, comps)
return h.createOrUpdateAppRevision(ctx, appRev)
}
owners := []metav1.OwnerReference{{
APIVersion: v1beta1.SchemeGroupVersion.String(),
Kind: v1beta1.ApplicationKind,
@@ -117,36 +106,17 @@ func (h *appHandler) apply(ctx context.Context, appRev *v1beta1.ApplicationRevis
UID: h.app.UID,
Controller: pointer.BoolPtr(true),
}}
if _, exist := h.app.GetAnnotations()[oam.AnnotationAppRollout]; !exist && h.app.Spec.RolloutPlan == nil {
h.setInplace(true)
} else {
h.setInplace(false)
}
// don't create components and AC if revision-only annotation is set
if ac.Annotations[oam.AnnotationAppRevisionOnly] == "true" {
h.FinalizeAppRevision(appRev, ac, comps)
return h.createOrUpdateAppRevision(ctx, appRev)
}
var needTracker bool
ac.SetOwnerReferences(owners)
var err error
for _, comp := range comps {
comp.SetOwnerReferences(owners)
// If the helm mode component doesn't specify the workload
// we just install a helm chart resources
if h.checkAutoDetect(comp) {
if h.isNewRevision && h.checkAutoDetect(comp) {
if err = h.applyHelmModuleResources(ctx, comp, owners); err != nil {
return errors.Wrap(err, "cannot apply Helm module resources")
}
continue
}
needTracker, err = h.checkAndSetResourceTracker(&comp.Spec.Workload)
if err != nil {
return err
}
newComp := comp.DeepCopy()
// newComp will be updated and return the revision name instead of the component name
@@ -154,21 +124,12 @@ func (h *appHandler) apply(ctx context.Context, appRev *v1beta1.ApplicationRevis
if err != nil {
return err
}
if needTracker {
if err := h.recodeTrackedWorkload(comp, revisionName); err != nil {
return err
}
}
// find the ACC that contains this component
for i := 0; i < len(ac.Spec.Components); i++ {
// update the AC using the component revision instead of component name
// we have to make AC immutable including the component it's pointing to
if ac.Spec.Components[i].ComponentName == newComp.Name {
ac.Spec.Components[i].RevisionName = revisionName
ac.Spec.Components[i].ComponentName = ""
if err := h.checkResourceTrackerForTrait(ctx, ac.Spec.Components[i], newComp.Name); err != nil {
return err
}
}
}
// isNewRevision indicates app's newly created or spec has changed
@@ -179,7 +140,6 @@ func (h *appHandler) apply(ctx context.Context, appRev *v1beta1.ApplicationRevis
}
}
}
ac.SetOwnerReferences(owners)
h.FinalizeAppRevision(appRev, ac, comps)
if h.autodetect {
@@ -191,14 +151,21 @@ func (h *appHandler) apply(ctx context.Context, appRev *v1beta1.ApplicationRevis
return err
}
// `h.inplace`: the rollout will create AppContext which will launch the real K8s resources.
// Otherwise, we should create/update the appContext here when there if no rollout controller to take care of new versions
// In this case, the workload should update with the annotation `app.oam.dev/inplace-upgrade=true`
// `!h.autodetect`: If the workload type of the helm mode component is not clear, an autodetect type workload will be specified by default
// In this case, the traits attached to the helm mode component will fail to generate,
// so we only call applyHelmModuleResources to create the helm resource, don't generate ApplicationContext.
if h.inplace && !h.autodetect {
return h.createOrUpdateAppContext(ctx, owners)
if !appWillReleaseByRollout(h.app) && !h.autodetect {
a := assemble.NewAppManifests(appRev).WithWorkloadOption(assemble.DiscoveryHelmBasedWorkload(ctx, h.r.Client))
manifests, err := a.AssembledManifests()
if err != nil {
return errors.WithMessage(err, "cannot assemble resources' manifests")
}
d := dispatch.NewAppManifestsDispatcher(h.r.Client, appRev)
if len(h.previousRevisionName) != 0 {
latestTracker := &v1beta1.ResourceTracker{}
latestTracker.SetName(dispatch.ConstructResourceTrackerName(h.previousRevisionName, h.app.Namespace))
d = d.EnableUpgradeAndGC(latestTracker)
}
if _, err := d.Dispatch(ctx, manifests); err != nil {
return errors.WithMessage(err, "cannot dispatch resources' manifests")
}
}
return nil
}
@@ -397,55 +364,6 @@ func (h *appHandler) createOrUpdateComponent(ctx context.Context, comp *v1alpha2
return curRevisionName, nil
}
// createOrUpdateAppContext will make sure the appContext points to the latest application revision
// this will only be called in the case of no rollout,
func (h *appHandler) createOrUpdateAppContext(ctx context.Context, owners []metav1.OwnerReference) error {
var curAppContext v1alpha2.ApplicationContext
// AC name is the same as the app name if there is no rollout
appContext := v1alpha2.ApplicationContext{
ObjectMeta: metav1.ObjectMeta{
Name: h.app.Name,
Namespace: h.app.Namespace,
},
Spec: v1alpha2.ApplicationContextSpec{
// new AC always point to the latest app revision
ApplicationRevisionName: h.app.Status.LatestRevision.Name,
},
}
appContext.SetOwnerReferences(owners)
// set the AC label and annotation
appLabel := h.app.GetLabels()
if appLabel == nil {
appLabel = make(map[string]string)
}
appLabel[oam.LabelAppRevisionHash] = h.app.Status.LatestRevision.RevisionHash
appContext.SetLabels(appLabel)
appAnnotation := h.app.GetAnnotations()
if appAnnotation == nil {
appAnnotation = make(map[string]string)
}
appAnnotation[oam.AnnotationInplaceUpgrade] = strconv.FormatBool(h.inplace)
appContext.SetAnnotations(appAnnotation)
key := ctypes.NamespacedName{Name: appContext.Name, Namespace: appContext.Namespace}
if err := h.r.Get(ctx, key, &curAppContext); err != nil {
if !apierrors.IsNotFound(err) {
return err
}
klog.InfoS("Create a new appContext", "application name",
appContext.GetName(), "revision it points to", appContext.Spec.ApplicationRevisionName)
return h.r.Create(ctx, &appContext)
}
// we don't need to create another appConfig
klog.InfoS("Replace the existing appContext", "appContext", klog.KObj(&appContext),
"revision it points to", appContext.Spec.ApplicationRevisionName)
appContext.ResourceVersion = curAppContext.ResourceVersion
return h.r.Update(ctx, &appContext)
}
func (h *appHandler) applyHelmModuleResources(ctx context.Context, comp *v1alpha2.Component, owners []metav1.OwnerReference) error {
klog.Info("Process a Helm module component")
repo, err := oamutil.RawExtension2Unstructured(&comp.Spec.Helm.Repository)
@@ -471,30 +389,9 @@ func (h *appHandler) applyHelmModuleResources(ctx context.Context, comp *v1alpha
return nil
}
// checkAndSetResourceTracker check if resource's namespace is different with application, if yes set resourceTracker as
// resource's ownerReference
func (h *appHandler) checkAndSetResourceTracker(resource *runtime.RawExtension) (bool, error) {
needTracker := false
u, err := oamutil.RawExtension2Unstructured(resource)
if err != nil {
return false, err
}
inDiffNamespace, err := h.checkCrossNamespace(u)
if err != nil {
return false, err
}
if inDiffNamespace {
needTracker = true
ref := h.genResourceTrackerOwnerReference()
// set resourceTracker as the ownerReference of workload/trait
u.SetOwnerReferences([]metav1.OwnerReference{*ref})
raw := oamutil.Object2RawExtension(u)
*resource = raw
return needTracker, nil
}
return needTracker, nil
}
// checkAutoDetect judge whether the workload type of a helm mode component is not clear, an autodetect type workload
// will be specified by default In this case, the traits attached to the helm mode component will fail to generate, so
// we only call applyHelmModuleResources to create the helm resource, don't generate other K8s resources.
func (h *appHandler) checkAutoDetect(component *v1alpha2.Component) bool {
if len(component.Spec.Workload.Raw) == 0 && component.Spec.Workload.Object == nil && component.Spec.Helm != nil {
h.autodetect = true
@@ -503,166 +400,12 @@ func (h *appHandler) checkAutoDetect(component *v1alpha2.Component) bool {
return false
}
// genResourceTrackerOwnerReference check the related resourceTracker whether have been created.
// If not, create it. And return the ownerReference of this resourceTracker.
func (h *appHandler) genResourceTrackerOwnerReference() *metav1.OwnerReference {
return metav1.NewControllerRef(h.resourceTracker, v1beta1.ResourceTrackerKindVersionKind)
}
func (h *appHandler) generateResourceTrackerName() string {
return fmt.Sprintf("%s-%s", h.app.Namespace, h.app.Name)
}
// return true if the resource is cluser-scoped or is not in the same namespace
// with application
func (h *appHandler) checkCrossNamespace(u *unstructured.Unstructured) (bool, error) {
gk := u.GetObjectKind().GroupVersionKind().GroupKind()
isNamespacedScope, err := discoverymapper.IsNamespacedScope(h.r.dm, gk)
if err != nil {
return false, err
}
if !isNamespacedScope {
// it's cluster-scoped resource
return true, nil
}
// for a namespace-scoped resource, if its namespace is empty,
// we will set application's namespace to it latter,
// so only check non-empty namespace here
return len(u.GetNamespace()) != 0 && u.GetNamespace() != h.app.Namespace, nil
}
// finalizeResourceTracker func return whether need to update application
func (h *appHandler) removeResourceTracker(ctx context.Context) (bool, error) {
client := h.r.Client
rt := new(v1beta1.ResourceTracker)
trackerName := h.generateResourceTrackerName()
key := ctypes.NamespacedName{Name: trackerName}
err := client.Get(ctx, key, rt)
if err != nil {
if apierrors.IsNotFound(err) {
// for some cases the resourceTracker have been deleted but finalizer still exist
if meta.FinalizerExists(h.app, resourceTrackerFinalizer) {
meta.RemoveFinalizer(h.app, resourceTrackerFinalizer)
return true, nil
}
// for some cases: informer cache haven't sync resourceTracker from k8s, return error trigger reconcile again
if h.app.Status.ResourceTracker != nil {
return false, fmt.Errorf("application status has resouceTracker but cannot get from k8s ")
}
return false, nil
}
return false, err
}
rt = &v1beta1.ResourceTracker{
ObjectMeta: metav1.ObjectMeta{
Name: trackerName,
},
}
err = h.r.Client.Delete(ctx, rt)
if err != nil {
return false, err
}
klog.Info("Delete application resourceTracker")
meta.RemoveFinalizer(h.app, resourceTrackerFinalizer)
h.app.Status.ResourceTracker = nil
return true, nil
}
func (h *appHandler) recodeTrackedWorkload(comp *v1alpha2.Component, compRevisionName string) error {
workloadName, err := h.getWorkloadName(comp.Spec.Workload, comp.Name, compRevisionName)
if err != nil {
return err
}
if err = h.recodeTrackedResource(workloadName, comp.Spec.Workload); err != nil {
return err
}
return nil
}
// checkResourceTrackerForTrait check component trait namespace, if it's namespace is different with application, set resourceTracker as its ownerReference
// and recode trait in handler acrossNamespace field
func (h *appHandler) checkResourceTrackerForTrait(ctx context.Context, comp v1alpha2.ApplicationConfigurationComponent, compName string) error {
for i, ct := range comp.Traits {
needTracker, err := h.checkAndSetResourceTracker(&comp.Traits[i].Trait)
if err != nil {
return err
}
if needTracker {
traitName, err := h.getTraitName(ctx, compName, comp.Traits[i].DeepCopy(), &ct.Trait)
if err != nil {
return err
}
if err = h.recodeTrackedResource(traitName, ct.Trait); err != nil {
return err
}
}
}
return nil
}
// getWorkloadName generate workload name. By default the workload's name will be generated by applicationContext, this func is for application controller
// get name of crossNamespace workload. The logic of this func is same with the way of appConfig generating workloadName
func (h *appHandler) getWorkloadName(w runtime.RawExtension, componentName string, revisionName string) (string, error) {
workload, err := oamutil.RawExtension2Unstructured(&w)
if err != nil {
return "", err
}
var revision = 0
if len(revisionName) != 0 {
r, err := utils.ExtractRevision(revisionName)
if err != nil {
return "", err
}
revision = r
}
applicationconfiguration.SetAppWorkloadInstanceName(componentName, workload, revision, strconv.FormatBool(h.inplace))
return workload.GetName(), nil
}
// getTraitName generate trait name. By default the trait name will be generated by applicationContext, this func is for application controller
// get name of crossNamespace trait. The logic of this func is same with the way of appConfig generating traitName
func (h *appHandler) getTraitName(ctx context.Context, componentName string, ct *v1alpha2.ComponentTrait, t *runtime.RawExtension) (string, error) {
trait, err := oamutil.RawExtension2Unstructured(t)
if err != nil {
return "", err
}
traitDef, err := oamutil.FetchTraitDefinition(ctx, h.r, h.r.dm, trait)
if err != nil {
if !apierrors.IsNotFound(err) {
return "", errors.Wrapf(err, "cannot find trait definition %q %q %q", trait.GetAPIVersion(), trait.GetKind(), trait.GetName())
}
traitDef = oamutil.GetDummyTraitDefinition(trait)
}
traitType := traitDef.Name
if strings.Contains(traitType, ".") {
traitType = strings.Split(traitType, ".")[0]
}
traitName := oamutil.GenTraitName(componentName, ct, traitType)
return traitName, nil
}
// recodeTrackedResource append cross namespace resource to apphandler's acrossNamespaceResources field
func (h *appHandler) recodeTrackedResource(resourceName string, resource runtime.RawExtension) error {
u, err := oamutil.RawExtension2Unstructured(&resource)
if err != nil {
return err
}
tr := new(v1beta1.TypedReference)
tr.Name = resourceName
tr.Namespace = u.GetNamespace()
tr.APIVersion = u.GetAPIVersion()
tr.Kind = u.GetKind()
h.acrossNamespaceResources = append(h.acrossNamespaceResources, *tr)
return nil
}
type garbageCollectFunc func(ctx context.Context, h *appHandler) error
// 1. collect useless across-namespace resource
// 2. collect appRevision
// execute garbage collection functions, including:
// - clean up legacy app revisions
func garbageCollection(ctx context.Context, h *appHandler) error {
collectFuncs := []garbageCollectFunc{
garbageCollectFunc(gcAcrossNamespaceResource),
garbageCollectFunc(cleanUpApplicationRevision),
}
for _, collectFunc := range collectFuncs {
@@ -673,122 +416,6 @@ func garbageCollection(ctx context.Context, h *appHandler) error {
return nil
}
// Now if workloads or traits are in the same namespace with application, applicationContext will take over gc workloads and traits.
// Here we cover the case in witch a cross namespace component or one of its cross namespace trait is removed from an application.
func gcAcrossNamespaceResource(ctx context.Context, h *appHandler) error {
rt := new(v1beta1.ResourceTracker)
err := h.r.Get(ctx, ctypes.NamespacedName{Name: h.generateResourceTrackerName()}, rt)
if err != nil {
if apierrors.IsNotFound(err) {
// guarantee app status right
h.app.Status.ResourceTracker = nil
return nil
}
return err
}
applied := map[v1beta1.TypedReference]bool{}
if len(h.acrossNamespaceResources) == 0 {
h.app.Status.ResourceTracker = nil
if err := h.r.Delete(ctx, rt); err != nil {
return client.IgnoreNotFound(err)
}
return nil
}
for _, resource := range h.acrossNamespaceResources {
applied[resource] = true
}
for _, ref := range rt.Status.TrackedResources {
if !applied[ref] {
resource := new(unstructured.Unstructured)
resource.SetAPIVersion(ref.APIVersion)
resource.SetKind(ref.Kind)
resource.SetNamespace(ref.Namespace)
resource.SetName(ref.Name)
err := h.r.Delete(ctx, resource)
if err != nil {
if apierrors.IsNotFound(err) {
continue
}
return err
}
}
}
// update resourceTracker status, recode applied across-namespace resources
rt.Status.TrackedResources = h.acrossNamespaceResources
if err := h.r.Status().Update(ctx, rt); err != nil {
return err
}
h.app.Status.ResourceTracker = &runtimev1alpha1.TypedReference{
Name: rt.Name,
Kind: v1beta1.ResourceTrackerGroupKind,
APIVersion: v1beta1.ResourceTrackerKindAPIVersion,
UID: rt.UID}
return nil
}
// handleResourceTracker check the namespace of all workloads and traits
// if one resource is across-namespace create resourceTracker and set in appHandler field
func (h *appHandler) handleResourceTracker(ctx context.Context, components []*v1alpha2.Component, ac *v1alpha2.ApplicationConfiguration) error {
resourceTracker := new(v1beta1.ResourceTracker)
needTracker := false
for _, c := range components {
if h.checkAutoDetect(c) {
continue
}
u, err := oamutil.RawExtension2Unstructured(&c.Spec.Workload)
if err != nil {
return err
}
inDiffNamespace, err := h.checkCrossNamespace(u)
if err != nil {
return err
}
if inDiffNamespace {
needTracker = true
break
}
}
outLoop:
for _, acComponent := range ac.Spec.Components {
for _, t := range acComponent.Traits {
u, err := oamutil.RawExtension2Unstructured(&t.Trait)
if err != nil {
return err
}
inDiffNamespace, err := h.checkCrossNamespace(u)
if err != nil {
return err
}
if inDiffNamespace {
needTracker = true
break outLoop
}
}
}
if needTracker {
// check weather related resourceTracker is existed, if not create it
err := h.r.Get(ctx, ctypes.NamespacedName{Name: h.generateResourceTrackerName()}, resourceTracker)
if err == nil {
h.resourceTracker = resourceTracker
return nil
}
if apierrors.IsNotFound(err) {
resourceTracker = &v1beta1.ResourceTracker{
ObjectMeta: metav1.ObjectMeta{
Name: h.generateResourceTrackerName(),
},
}
if err = h.r.Client.Create(ctx, resourceTracker); err != nil {
return err
}
h.resourceTracker = resourceTracker
return nil
}
return err
}
return nil
}
func (h *appHandler) handleRollout(ctx context.Context) (reconcile.Result, error) {
var comps []string
for _, component := range h.app.Spec.Components {

View File

@@ -94,6 +94,10 @@ func (am *AppManifests) WithWorkloadOption(wo WorkloadOption) *AppManifests {
}
// AssembledManifests do assemble and merge all assembled resources(except referenced scopes) into one array
// The result guarantee the order of resources as defined in application originally.
// If it contains more than one component, the resources are well-orderred and also grouped.
// For example, if app = comp1 (wl1 + trait1 + trait2) + comp2 (wl2 + trait3 +trait4),
// the result is [wl1, trait1, trait2, wl2, trait3, trait4]
func (am *AppManifests) AssembledManifests() ([]*unstructured.Unstructured, error) {
if !am.finalized {
am.assemble()
@@ -102,10 +106,9 @@ func (am *AppManifests) AssembledManifests() ([]*unstructured.Unstructured, erro
return nil, am.err
}
r := make([]*unstructured.Unstructured, 0)
for _, wl := range am.assembledWorkloads {
for compName, wl := range am.assembledWorkloads {
r = append(r, wl.DeepCopy())
}
for _, ts := range am.assembledTraits {
ts := am.assembledTraits[compName]
for _, t := range ts {
r = append(r, t.DeepCopy())
}
@@ -250,11 +253,12 @@ func (am *AppManifests) complete() {
func (am *AppManifests) finalizeAssemble(err error) {
am.finalized = true
if err != nil {
klog.ErrorS(err, "Failed assembling manifests for application", "name", am.appName, "revision", am.AppRevision.GetName())
am.err = errors.WithMessagef(err, "cannot assemble resources' manifests for application %q", am.appName)
if err == nil {
klog.InfoS("Successfully assemble manifests for application", "name", am.appName, "revision", am.AppRevision.GetName(), "namespace", am.appNamespace)
return
}
klog.InfoS("Successfully assemble manifests for application", "name", am.appName, "revision", am.AppRevision.GetName(), "namespace", am.appNamespace)
klog.ErrorS(err, "Failed assembling manifests for application", "name", am.appName, "revision", am.AppRevision.GetName())
am.err = errors.WithMessagef(err, "cannot assemble resources' manifests for application %q", am.appName)
}
// AssembleOptions is highly coulped with AppRevision, should check the AppRevision provides all info
@@ -302,10 +306,6 @@ func (am *AppManifests) setNamespace(obj *unstructured.Unstructured) {
}
}
func (am *AppManifests) setOwnerReference(obj *unstructured.Unstructured) {
obj.SetOwnerReferences([]metav1.OwnerReference{*am.appOwnerRef})
}
func (am *AppManifests) assembleWorkload(comp *v1alpha2.Component, labels map[string]string) (*unstructured.Unstructured, error) {
compName := comp.Name
wl, err := util.RawExtension2Unstructured(&comp.Spec.Workload)
@@ -318,7 +318,6 @@ func (am *AppManifests) assembleWorkload(comp *v1alpha2.Component, labels map[st
am.setWorkloadLabels(wl, labels)
am.setAnnotations(wl)
am.setNamespace(wl)
am.setOwnerReference(wl)
workloadType := wl.GetLabels()[oam.WorkloadTypeLabel]
compDefinition := am.AppRevision.Spec.ComponentDefinitions[workloadType]
@@ -364,7 +363,6 @@ func (am *AppManifests) assembleTrait(compTrait v1alpha2.ComponentTrait, compNam
am.setTraitLabels(trait, labels)
am.setAnnotations(trait)
am.setNamespace(trait)
am.setOwnerReference(trait)
klog.InfoS("Successfully assemble a trait", "trait", klog.KObj(trait), "APIVersion", trait.GetAPIVersion(), "Kind", trait.GetKind())
return trait, nil
}

View File

@@ -22,7 +22,6 @@ import (
runtimev1alpha1 "github.com/crossplane/crossplane-runtime/apis/core/v1alpha1"
"github.com/ghodss/yaml"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"github.com/oam-dev/kubevela/apis/core.oam.dev/v1beta1"
@@ -103,8 +102,6 @@ var _ = Describe("Test Assemble Options", func() {
oam.WorkloadTypeLabel,
oam.LabelOAMResourceType))
Expect(len(wl.GetAnnotations())).Should(Equal(1))
ownerRef := metav1.GetControllerOf(wl)
Expect(ownerRef.Kind).Should(Equal("Application"))
By("Verify trait metadata (name, namespace, labels, annotations, ownerRef)")
trait := traits[compName][0]
@@ -124,8 +121,6 @@ var _ = Describe("Test Assemble Options", func() {
oam.TraitTypeLabel,
oam.LabelOAMResourceType))
Expect(len(wl.GetAnnotations())).Should(Equal(1))
ownerRef = metav1.GetControllerOf(trait)
Expect(ownerRef.Kind).Should(Equal("Application"))
By("Verify set workload reference to trait")
scaler := traits[compName][2]

View File

@@ -26,10 +26,8 @@ import (
kruisev1alpha1 "github.com/openkruise/kruise-api/apps/v1alpha1"
"github.com/pkg/errors"
appsv1 "k8s.io/api/apps/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/klog/v2"
"k8s.io/utils/pointer"
"sigs.k8s.io/controller-runtime/pkg/client"
"github.com/oam-dev/kubevela/apis/core.oam.dev/v1alpha2"
@@ -85,7 +83,7 @@ func discoverHelmModuleWorkload(ctx context.Context, c client.Reader, assembledW
}
}
workloadByHelm := &unstructured.Unstructured{}
workloadByHelm := assembledWorkload.DeepCopy()
if err := c.Get(ctx, client.ObjectKey{Namespace: ns, Name: qualifiedWorkloadName}, workloadByHelm); err != nil {
return err
}
@@ -102,7 +100,7 @@ func discoverHelmModuleWorkload(ctx context.Context, c client.Reader, assembledW
"annotations", annots, "labels", labels)
return err
}
*assembledWorkload = *workloadByHelm
assembledWorkload.SetName(qualifiedWorkloadName)
return nil
}
@@ -126,12 +124,7 @@ func PrepareWorkloadForRollout() WorkloadOption {
advancedStatefulSetDisablePath = "spec.updateStrategy.rollingUpdate.paused"
deploymentDisablePath = "spec.paused"
)
// change the ownerReference and rollout controller will take it over
ownerRef := metav1.GetControllerOf(assembledWorkload)
ownerRef.Controller = pointer.BoolPtr(false)
pv := fieldpath.Pave(assembledWorkload.UnstructuredContent())
// TODO: we can get the workloadDefinition name from workload.GetLabels()["oam.WorkloadTypeLabel"]
// and use a special field like "disablePath" in the definition to allow configurable behavior

View File

@@ -256,8 +256,13 @@ var _ = Describe("Test WorkloadOption", func() {
helm: &common.Helm{
Release: runtime.RawExtension{Raw: releaseRaw},
},
wantWorkload: wl.DeepCopy(),
wantErr: nil,
wantWorkload: func() *unstructured.Unstructured {
r := &unstructured.Unstructured{}
r.SetNamespace(ns)
r.SetName("test-rls-test-chart")
return r
}(),
wantErr: nil,
}),
)
})

View File

@@ -24,12 +24,14 @@ import (
kerrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/apimachinery/pkg/types"
"k8s.io/client-go/util/retry"
"k8s.io/klog/v2"
"k8s.io/utils/pointer"
"sigs.k8s.io/controller-runtime/pkg/client"
"github.com/oam-dev/kubevela/apis/core.oam.dev/v1beta1"
"github.com/oam-dev/kubevela/pkg/oam"
"github.com/oam-dev/kubevela/pkg/utils/apply"
)
@@ -51,32 +53,33 @@ type AppManifestsDispatcher struct {
applicator apply.Applicator
gcHandler GarbageCollector
appRev *v1beta1.ApplicationRevision
oldRT *v1beta1.ResourceTracker
skipGC bool
appRev *v1beta1.ApplicationRevision
previousRT *v1beta1.ResourceTracker
skipGC bool
appRevName string
namespace string
newRTName string
newRT *v1beta1.ResourceTracker
appRevName string
namespace string
currentRTName string
currentRT *v1beta1.ResourceTracker
}
// EnableGC return an AppManifestsDispatcher that always do GC after dispatching resources.
// GC will calculate diff between the dispatched resouces and ones recorded in the given resource tracker.
func (a *AppManifestsDispatcher) EnableGC(rt *v1beta1.ResourceTracker) *AppManifestsDispatcher {
// EnableUpgradeAndGC return an AppManifestsDispatcher that always do GC after dispatching resources.
// For resources exists in two revision, dispatcher will update their owner to the new resource tracker.
// GC will calculate diff between the dispatched resources and ones recorded in the given resource tracker.
func (a *AppManifestsDispatcher) EnableUpgradeAndGC(rt *v1beta1.ResourceTracker) *AppManifestsDispatcher {
if rt != nil {
a.oldRT = rt.DeepCopy()
a.previousRT = rt.DeepCopy()
}
return a
}
// EnableUpgradeAndSkipGC return an AppManifestsDispatcher that skips GC after dispatching resources.
// For the unchanged resources, dispatcher will update their owner to the newly created resource tracker.
// For resources exists in two revision, dispatcher will update their owner to the new resource tracker.
// It's helpful in a rollout scenario where new revision is going to create a new workload while the old one should not
// be deleted before rollout is terminated.
func (a *AppManifestsDispatcher) EnableUpgradeAndSkipGC(rt *v1beta1.ResourceTracker) *AppManifestsDispatcher {
if rt != nil {
a.oldRT = rt.DeepCopy()
a.previousRT = rt.DeepCopy()
a.skipGC = true
}
return a
@@ -86,7 +89,7 @@ func (a *AppManifestsDispatcher) EnableUpgradeAndSkipGC(rt *v1beta1.ResourceTrac
// If GC is enabled, it will do GC after applying.
// If 'UpgradeAndSkipGC' is enabled, it will:
// - create new resources if not exist before
// - update unchanged resources' owner from the old resource tracker to the new one
// - update unchanged resources' owner from the previous resource tracker to the new one
// - skip deleting(GC) any resources
func (a *AppManifestsDispatcher) Dispatch(ctx context.Context, manifests []*unstructured.Unstructured) (*v1beta1.ResourceTracker, error) {
if err := a.validateAndComplete(ctx); err != nil {
@@ -98,12 +101,12 @@ func (a *AppManifestsDispatcher) Dispatch(ctx context.Context, manifests []*unst
if err := a.applyAndRecordManifests(ctx, manifests); err != nil {
return nil, err
}
if !a.skipGC && a.oldRT != nil && a.oldRT.Name != a.newRTName {
if err := a.gcHandler.GarbageCollect(ctx, a.oldRT, a.newRT); err != nil {
return nil, errors.WithMessagef(err, "cannot do GC based on resource trackers %q and %q", a.oldRT.Name, a.newRTName)
if !a.skipGC && a.previousRT != nil && a.previousRT.Name != a.currentRTName {
if err := a.gcHandler.GarbageCollect(ctx, a.previousRT, a.currentRT); err != nil {
return nil, errors.WithMessagef(err, "cannot do GC based on resource trackers %q and %q", a.previousRT.Name, a.currentRTName)
}
}
return a.newRT.DeepCopy(), nil
return a.currentRT.DeepCopy(), nil
}
// ReferenceScopes add workload reference to scopes' workloadRefPath
@@ -127,61 +130,65 @@ func (a *AppManifestsDispatcher) validateAndComplete(ctx context.Context) error
}
a.appRevName = a.appRev.Name
a.namespace = a.appRev.Namespace
a.newRTName = ConstructResourceTrackerName(a.appRevName, a.namespace)
a.currentRTName = ConstructResourceTrackerName(a.appRevName, a.namespace)
// no matter GC or UpgradeAndSkipGC, it requires a valid and existing resource tracker
if a.oldRT != nil {
existingOldRT := &v1beta1.ResourceTracker{}
if err := a.c.Get(ctx, client.ObjectKey{Name: a.oldRT.Name}, existingOldRT); err != nil {
return errors.Errorf("given resource tracker %q doesn't exist", a.oldRT.Name)
// if upgrade is enbabled (no matter GC or skip GC), it requires a valid existing resource tracker
if a.previousRT != nil && a.previousRT.Name != a.currentRTName {
klog.InfoS("Validate previous resource tracker exists", "previous", klog.KObj(a.previousRT))
gotPreviousRT := &v1beta1.ResourceTracker{}
if err := a.c.Get(ctx, client.ObjectKey{Name: a.previousRT.Name}, gotPreviousRT); err != nil {
return errors.Errorf("given resource tracker %q doesn't exist", a.previousRT.Name)
}
a.oldRT = existingOldRT
a.previousRT = gotPreviousRT
}
klog.InfoS("Given old resource tracker is nil, so skip GC", "appRevision", klog.KObj(a.appRev))
klog.InfoS("Given previous resource tracker is nil or same as current one, so skip GC", "appRevision", klog.KObj(a.appRev))
return nil
}
func (a *AppManifestsDispatcher) createOrGetResourceTracker(ctx context.Context) error {
rt := &v1beta1.ResourceTracker{}
err := a.c.Get(ctx, client.ObjectKey{Name: a.newRTName}, rt)
err := a.c.Get(ctx, client.ObjectKey{Name: a.currentRTName}, rt)
if err == nil {
klog.InfoS("Found a resource tracker matching current app revision", "resourceTracker", rt.Name)
// already exists, no need to update
// because we assume the manifests' references from a specific application revision never change
a.newRT = rt
a.currentRT = rt
return nil
}
if !kerrors.IsNotFound(err) {
return errors.Wrap(err, "cannot get resource tracker")
}
klog.InfoS("Going to create a resource tracker", "resourceTracker", a.newRTName)
rt.SetName(a.newRTName)
klog.InfoS("Going to create a resource tracker", "resourceTracker", a.currentRTName)
rt.SetName(a.currentRTName)
// these labels can help to list resource trackers of a specific application
rt.SetLabels(map[string]string{
oam.LabelAppName: ExtractAppName(a.currentRTName, a.namespace),
oam.LabelAppNamespace: a.namespace,
})
if err := a.c.Create(ctx, rt); err != nil {
klog.ErrorS(err, "Failed to create a resource tracker", "resourceTracker", a.newRTName)
klog.ErrorS(err, "Failed to create a resource tracker", "resourceTracker", a.currentRTName)
return errors.Wrap(err, "cannot create resource tracker")
}
a.newRT = rt
a.currentRT = rt
return nil
}
func (a *AppManifestsDispatcher) applyAndRecordManifests(ctx context.Context, manifests []*unstructured.Unstructured) error {
applyOpts := []apply.ApplyOption{}
if a.oldRT != nil && a.oldRT.Name != a.newRTName {
klog.InfoS("Going to apply and upgrade resources", "from", a.oldRT.Name, "to", a.newRTName)
ctrlUIDs := []types.UID{a.currentRT.UID}
if a.previousRT != nil && a.previousRT.Name != a.currentRTName {
klog.InfoS("Going to apply or upgrade resources", "from", a.previousRT.Name, "to", a.currentRTName)
// if two RT's names are different, it means dispatching operation happens in an upgrade or rollout scenario
// in such two scenarios, for those unchanged manifests, we will
// - check existing resources are controlled by the old resource tracker
// - make sure existing resources are controlled by any of these two resource trackers
// - set new resource tracker as their controller owner
applyOpts = append(applyOpts, apply.MustBeControllableBy(a.oldRT.UID))
} else {
applyOpts = append(applyOpts, apply.MustBeControllableBy(a.newRT.UID))
ctrlUIDs = append(ctrlUIDs, a.previousRT.UID)
}
applyOpts := []apply.ApplyOption{apply.MustBeControllableByAny(ctrlUIDs)}
ownerRef := metav1.OwnerReference{
APIVersion: v1beta1.SchemeGroupVersion.String(),
Kind: reflect.TypeOf(v1beta1.ResourceTracker{}).Name(),
Name: a.newRT.Name,
UID: a.newRT.UID,
Name: a.currentRT.Name,
UID: a.currentRT.UID,
Controller: pointer.BoolPtr(true),
BlockOwnerDeletion: pointer.BoolPtr(true),
}
@@ -201,45 +208,45 @@ func (a *AppManifestsDispatcher) applyAndRecordManifests(ctx context.Context, ma
}
func (a *AppManifestsDispatcher) updateResourceTrackerStatus(ctx context.Context, appliedManifests []*unstructured.Unstructured) error {
// merge applied resources and already recorded ones
trackedResources := []v1beta1.TypedReference{}
// merge applied resources and already tracked ones
if a.currentRT.Status.TrackedResources == nil {
a.currentRT.Status.TrackedResources = make([]v1beta1.TypedReference, 0)
}
for _, rsc := range appliedManifests {
ref := v1beta1.TypedReference{
appliedRef := v1beta1.TypedReference{
APIVersion: rsc.GetAPIVersion(),
Kind: rsc.GetKind(),
Name: rsc.GetName(),
Namespace: rsc.GetNamespace(),
}
alreadyTracked := false
for _, existing := range a.newRT.Status.TrackedResources {
if existing.APIVersion == ref.APIVersion && existing.Kind == ref.Kind &&
existing.Name == ref.Name && existing.Namespace == ref.Namespace {
for _, tracked := range a.currentRT.Status.TrackedResources {
if tracked.APIVersion == appliedRef.APIVersion && tracked.Kind == appliedRef.Kind &&
tracked.Name == appliedRef.Name && tracked.Namespace == appliedRef.Namespace {
alreadyTracked = true
break
}
}
if alreadyTracked {
continue
if !alreadyTracked {
a.currentRT.Status.TrackedResources = append(a.currentRT.Status.TrackedResources, appliedRef)
}
trackedResources = append(trackedResources, ref)
}
a.newRT.Status.TrackedResources = trackedResources
// TODO move TrackedResources from status to spec
// update status with retry
copyRT := a.newRT.DeepCopy()
copyRT := a.currentRT.DeepCopy()
sts := copyRT.Status
if err := retry.RetryOnConflict(retry.DefaultBackoff, func() (err error) {
if err = a.c.Get(ctx, client.ObjectKey{Name: a.newRTName}, copyRT); err != nil {
if err = a.c.Get(ctx, client.ObjectKey{Name: a.currentRTName}, copyRT); err != nil {
return
}
copyRT.Status = sts
return a.c.Status().Update(ctx, copyRT)
}); err != nil {
klog.ErrorS(err, "Failed to update resource tracker status", "resourceTracker", a.newRTName)
klog.ErrorS(err, "Failed to update resource tracker status", "resourceTracker", a.currentRTName)
return errors.Wrap(err, "cannot update resource tracker status")
}
klog.InfoS("Successfully update resource tracker status", "resourceTracker", a.newRTName)
klog.InfoS("Successfully update resource tracker status", "resourceTracker", a.currentRTName)
return nil
}

View File

@@ -229,7 +229,7 @@ var _ = Describe("Test AppManifestsDispatcher", func() {
Expect(k8sClient.Get(ctx, client.ObjectKey{Name: pvName1}, &corev1.PersistentVolume{})).Should(Succeed())
By("Dispatch application revision 1 again with v1 as latest RT")
dp = NewAppManifestsDispatcher(k8sClient, appRev1).EnableGC(rtForAppV1)
dp = NewAppManifestsDispatcher(k8sClient, appRev1).EnableUpgradeAndGC(rtForAppV1)
_, err = dp.Dispatch(ctx, []*unstructured.Unstructured{deploy1, svc1, pv1})
Expect(err).Should(BeNil())
By("Verify resources still exist")
@@ -250,7 +250,7 @@ var _ = Describe("Test AppManifestsDispatcher", func() {
Expect(k8sClient.Get(ctx, client.ObjectKey{Name: pvName1}, &corev1.PersistentVolume{})).Should(Succeed())
By("Dispatch application revision 2 with v1 as latest RT")
dp2 := NewAppManifestsDispatcher(k8sClient, appRev2).EnableGC(rtForAppV1)
dp2 := NewAppManifestsDispatcher(k8sClient, appRev2).EnableUpgradeAndGC(rtForAppV1)
_, err = dp2.Dispatch(ctx, []*unstructured.Unstructured{deploy2, svc2, pv2})
Expect(err).Should(BeNil())
By("Verify v2 resources are applied successfully")
@@ -284,7 +284,7 @@ var _ = Describe("Test AppManifestsDispatcher", func() {
Expect(owner.Name).Should(Equal(rtForAppV1.Name))
By("Dispatch application revision 2 with v1 as latest RT")
dp2 := NewAppManifestsDispatcher(k8sClient, appRev2).EnableGC(rtForAppV1)
dp2 := NewAppManifestsDispatcher(k8sClient, appRev2).EnableUpgradeAndGC(rtForAppV1)
rtForAppV2, err := dp2.Dispatch(ctx, []*unstructured.Unstructured{deploy2, svc1, pv2}) // manifests have 'svc1'
Expect(err).Should(BeNil())

View File

@@ -93,8 +93,8 @@ func (h *GCHandler) validate() error {
oldRTName := h.oldRT.Name
newRTName := h.newRT.Name
if strings.HasSuffix(oldRTName, h.namespace) && strings.HasSuffix(newRTName, h.namespace) {
if extractAppNameFromResourceTrackerName(oldRTName, h.namespace) ==
extractAppNameFromResourceTrackerName(newRTName, h.namespace) {
if ExtractAppName(oldRTName, h.namespace) ==
ExtractAppName(newRTName, h.namespace) {
return nil
}
}

View File

@@ -28,7 +28,13 @@ func ConstructResourceTrackerName(appRevName, ns string) string {
return fmt.Sprintf("%s-%s", appRevName, ns)
}
func extractAppNameFromResourceTrackerName(name, ns string) string {
splits := strings.Split(strings.TrimSuffix(name, "-"+ns), "-")
// ExtractAppName get application name from resource tracker name
func ExtractAppName(resourceTrackerName, ns string) string {
splits := strings.Split(strings.TrimSuffix(resourceTrackerName, "-"+ns), "-")
return strings.Join(splits[0:len(splits)-1], "-")
}
// ExtractAppRevisionName get application revision name from resource tracker name
func ExtractAppRevisionName(resourceTrackerName, ns string) string {
return strings.TrimSuffix(resourceTrackerName, "-"+ns)
}

View File

@@ -41,12 +41,16 @@ func TestExtractAppNameFromResourceTrackerName(t *testing.T) {
for _, tc := range testcases {
gotRTName := ConstructResourceTrackerName(tc.appRevName, tc.ns)
gotAppName := extractAppNameFromResourceTrackerName(gotRTName, tc.ns)
gotAppName := ExtractAppName(gotRTName, tc.ns)
gotAppRevName := ExtractAppRevisionName(gotRTName, tc.ns)
if gotRTName != tc.wantRTName {
t.Fatalf("expect resource tracker name %q but got %q", tc.wantRTName, gotRTName)
}
if gotAppName != tc.wantAppName {
t.Fatalf("expect app name %q but got %q", tc.wantAppName, gotAppName)
}
if gotAppRevName != tc.appRevName {
t.Fatalf("expect app revision name %q but got %q", tc.appRevName, gotAppRevName)
}
}
}

View File

@@ -31,6 +31,7 @@ import (
"github.com/oam-dev/kubevela/apis/core.oam.dev/common"
"github.com/oam-dev/kubevela/apis/core.oam.dev/v1alpha2"
"github.com/oam-dev/kubevela/apis/core.oam.dev/v1beta1"
"github.com/oam-dev/kubevela/pkg/controller/core.oam.dev/v1alpha2/application/dispatch"
"github.com/oam-dev/kubevela/pkg/controller/utils"
"github.com/oam-dev/kubevela/pkg/oam"
"github.com/oam-dev/kubevela/pkg/oam/util"
@@ -339,21 +340,23 @@ func cleanUpApplicationRevision(ctx context.Context, h *appHandler) error {
// gatherUsingAppRevision get all using appRevisions include app's status pointing to and appContext point to
func gatherUsingAppRevision(ctx context.Context, h *appHandler) (map[string]bool, error) {
ns := h.app.Namespace
listOpts := []client.ListOption{
client.InNamespace(h.app.Namespace),
client.MatchingLabels{oam.LabelAppName: h.app.Name},
}
client.MatchingLabels{
oam.LabelAppName: h.app.Name,
oam.LabelAppNamespace: ns,
}}
usingRevision := map[string]bool{}
if h.app.Status.LatestRevision != nil && len(h.app.Status.LatestRevision.Name) != 0 {
usingRevision[h.app.Status.LatestRevision.Name] = true
}
appContextList := new(v1alpha2.ApplicationContextList)
err := h.r.List(ctx, appContextList, listOpts...)
if err != nil {
rtList := &v1beta1.ResourceTrackerList{}
if err := h.r.List(ctx, rtList, listOpts...); err != nil {
return nil, err
}
for _, appContext := range appContextList.Items {
usingRevision[appContext.Spec.ApplicationRevisionName] = true
for _, rt := range rtList.Items {
appRev := dispatch.ExtractAppRevisionName(rt.Name, ns)
usingRevision[appRev] = true
}
appDeployUsingRevision, err := utils.CheckAppDeploymentUsingAppRevision(ctx, h.r, h.app.Namespace, h.app.Name)
if err != nil {

View File

@@ -22,8 +22,6 @@ import (
"fmt"
"time"
"github.com/oam-dev/kubevela/apis/core.oam.dev/common"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
@@ -37,7 +35,7 @@ import (
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"github.com/oam-dev/kubevela/apis/core.oam.dev/v1alpha2"
"github.com/oam-dev/kubevela/apis/core.oam.dev/common"
"github.com/oam-dev/kubevela/apis/core.oam.dev/v1beta1"
"github.com/oam-dev/kubevela/pkg/oam"
"github.com/oam-dev/kubevela/pkg/oam/util"
@@ -77,11 +75,8 @@ var _ = Describe("Test application controller clean up ", func() {
property := fmt.Sprintf(`{"cmd":["sleep","1000"],"image":"busybox:%d"}`, i)
checkApp.Spec.Components[0].Properties = runtime.RawExtension{Raw: []byte(property)}
Expect(k8sClient.Update(ctx, checkApp)).Should(BeNil())
_, err := reconciler.Reconcile(ctrl.Request{NamespacedName: appKey})
Expect(err).Should(BeNil())
reconcileRetry(reconciler, ctrl.Request{NamespacedName: appKey})
}
appContext := new(v1alpha2.ApplicationContext)
Expect(k8sClient.Get(ctx, appKey, appContext)).Should(BeNil())
listOpts := []client.ListOption{
client.InNamespace(namespace),
client.MatchingLabels{
@@ -110,6 +105,9 @@ var _ = Describe("Test application controller clean up ", func() {
deletedRevison := new(v1beta1.ApplicationRevision)
revKey := types.NamespacedName{Namespace: namespace, Name: appName + "-v1"}
Eventually(func() error {
if _, err = reconciler.Reconcile(ctrl.Request{NamespacedName: appKey}); err != nil {
return err
}
err := k8sClient.List(ctx, appRevisionList, listOpts...)
if err != nil {
return err
@@ -132,6 +130,9 @@ var _ = Describe("Test application controller clean up ", func() {
_, err = reconciler.Reconcile(ctrl.Request{NamespacedName: appKey})
Expect(err).Should(BeNil())
Eventually(func() error {
if _, err = reconciler.Reconcile(ctrl.Request{NamespacedName: appKey}); err != nil {
return err
}
err := k8sClient.List(ctx, appRevisionList, listOpts...)
if err != nil {
return err
@@ -161,11 +162,8 @@ var _ = Describe("Test application controller clean up ", func() {
property := fmt.Sprintf(`{"cmd":["sleep","1000"],"image":"busybox:%d"}`, i)
checkApp.Spec.Components[0].Properties = runtime.RawExtension{Raw: []byte(property)}
Expect(k8sClient.Update(ctx, checkApp)).Should(BeNil())
_, err := reconciler.Reconcile(ctrl.Request{NamespacedName: appKey})
Expect(err).Should(BeNil())
reconcileRetry(reconciler, ctrl.Request{NamespacedName: appKey})
}
appContext := new(v1alpha2.ApplicationContext)
Expect(k8sClient.Get(ctx, appKey, appContext)).Should(util.NotFoundMatcher{})
listOpts := []client.ListOption{
client.InNamespace(namespace),
client.MatchingLabels{
@@ -194,6 +192,9 @@ var _ = Describe("Test application controller clean up ", func() {
deletedRevison := new(v1beta1.ApplicationRevision)
revKey := types.NamespacedName{Namespace: namespace, Name: appName + "-v1"}
Eventually(func() error {
if _, err = reconciler.Reconcile(ctrl.Request{NamespacedName: appKey}); err != nil {
return err
}
err := k8sClient.List(ctx, appRevisionList, listOpts...)
if err != nil {
return err
@@ -209,7 +210,7 @@ var _ = Describe("Test application controller clean up ", func() {
return fmt.Errorf("appRevision collection mismatch")
}
return nil
}, time.Second*30, time.Microsecond*300).Should(BeNil())
}, time.Second*10, time.Second*2).Should(BeNil())
By("update app again will gc appRevision2")
Expect(k8sClient.Get(ctx, appKey, checkApp)).Should(BeNil())
@@ -219,6 +220,9 @@ var _ = Describe("Test application controller clean up ", func() {
_, err = reconciler.Reconcile(ctrl.Request{NamespacedName: appKey})
Expect(err).Should(BeNil())
Eventually(func() error {
if _, err = reconciler.Reconcile(ctrl.Request{NamespacedName: appKey}); err != nil {
return err
}
err := k8sClient.List(ctx, appRevisionList, listOpts...)
if err != nil {
return err
@@ -251,11 +255,8 @@ var _ = Describe("Test application controller clean up ", func() {
property := fmt.Sprintf(`{"cmd":["sleep","1000"],"image":"busybox:%d"}`, i)
checkApp.Spec.Components[0].Properties = runtime.RawExtension{Raw: []byte(property)}
Expect(k8sClient.Update(ctx, checkApp)).Should(BeNil())
_, err := reconciler.Reconcile(ctrl.Request{NamespacedName: appKey})
Expect(err).Should(BeNil())
reconcileRetry(reconciler, ctrl.Request{NamespacedName: appKey})
}
appContext := new(v1alpha2.ApplicationContext)
Expect(k8sClient.Get(ctx, appKey, appContext)).Should(util.NotFoundMatcher{})
listOpts := []client.ListOption{
client.InNamespace(namespace),
client.MatchingLabels{
@@ -284,6 +285,9 @@ var _ = Describe("Test application controller clean up ", func() {
deletedRevison := new(v1beta1.ApplicationRevision)
revKey := types.NamespacedName{Namespace: namespace, Name: appName + "-v1"}
Eventually(func() error {
if _, err = reconciler.Reconcile(ctrl.Request{NamespacedName: appKey}); err != nil {
return err
}
err := k8sClient.List(ctx, appRevisionList, listOpts...)
if err != nil {
return err
@@ -299,7 +303,7 @@ var _ = Describe("Test application controller clean up ", func() {
return fmt.Errorf("appRevision collection mismatch")
}
return nil
}, time.Second*30, time.Microsecond*300).Should(BeNil())
}, time.Second*10, time.Second*2).Should(BeNil())
By("update create appDeploy check gc logic")
appDeploy := &v1beta1.AppDeployment{
@@ -331,6 +335,9 @@ var _ = Describe("Test application controller clean up ", func() {
Expect(err).Should(BeNil())
}
Eventually(func() error {
if _, err = reconciler.Reconcile(ctrl.Request{NamespacedName: appKey}); err != nil {
return err
}
err := k8sClient.List(ctx, appRevisionList, listOpts...)
if err != nil {
return err
@@ -381,11 +388,13 @@ var _ = Describe("Test gatherUsingAppRevision func", func() {
Name: appName + "-v1",
}
Expect(k8sClient.Create(ctx, app)).Should(BeNil())
appContext := getAppContext(namespace, appName+"-ctx", appName+"-v2")
appContext.Labels = map[string]string{
oam.LabelAppName: appName,
}
Expect(k8sClient.Create(ctx, appContext)).Should(BeNil())
rt := &v1beta1.ResourceTracker{}
rt.SetName(appName + "-v2-" + namespace)
rt.SetLabels(map[string]string{
oam.LabelAppName: appName,
oam.LabelAppNamespace: namespace,
})
Expect(k8sClient.Create(ctx, rt)).Should(BeNil())
handler := appHandler{
r: reconciler,
app: app,
@@ -408,19 +417,3 @@ var _ = Describe("Test gatherUsingAppRevision func", func() {
}, time.Second*60, time.Microsecond).Should(BeNil())
})
})
func getAppContext(namespace, name string, pointingRev string) *v1alpha2.ApplicationContext {
return &v1alpha2.ApplicationContext{
TypeMeta: metav1.TypeMeta{
APIVersion: v1alpha2.ApplicationContextKindAPIVersion,
Kind: v1alpha2.ApplicationContextKind,
},
ObjectMeta: metav1.ObjectMeta{
Namespace: namespace,
Name: "app-rollout",
},
Spec: v1alpha2.ApplicationContextSpec{
ApplicationRevisionName: pointingRev,
},
}
}

View File

@@ -29,7 +29,6 @@ import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/types"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/yaml"
"github.com/oam-dev/kubevela/apis/core.oam.dev/v1alpha2"
@@ -259,20 +258,8 @@ var _ = Describe("test generate revision ", func() {
Expect(len(curAppRevision.GetOwnerReferences())).Should(BeEquivalentTo(1))
Expect(curAppRevision.GetOwnerReferences()[0].Kind).Should(Equal(v1alpha2.ApplicationKind))
By("Verify that an application context is created to point to the correct appRevision")
curAC := &v1alpha2.ApplicationContext{}
Expect(handler.r.Get(ctx,
types.NamespacedName{Namespace: ns.Name, Name: app.Name},
curAC)).NotTo(HaveOccurred())
Expect(curAC.GetLabels()[oam.LabelAppRevisionHash]).Should(Equal(appHash1))
Expect(curAC.Spec.ApplicationRevisionName).Should(Equal(curApp.Status.LatestRevision.Name))
Expect(curAC.GetAnnotations()[annoKey1]).ShouldNot(BeEmpty())
Expect(curAC.GetAnnotations()[oam.AnnotationInplaceUpgrade]).Should(Equal("true"))
Expect(metav1.GetControllerOf(curAC)).ShouldNot(BeNil())
Expect(metav1.GetControllerOf(curAC).Kind).Should(Equal(v1alpha2.ApplicationKind))
By("Apply the application again without any spec change")
// there can be annotation change and appContext should have the exact label/annotation as app
handler.previousRevisionName = "revision-apply-test-v1"
annoKey2 := "testKey2"
app.SetAnnotations(map[string]string{annoKey2: "true"})
lastRevision := curApp.Status.LatestRevision.Name
@@ -301,15 +288,6 @@ var _ = Describe("test generate revision ", func() {
time.Second*5, time.Millisecond*500).Should(BeNil())
Expect(err).Should(Succeed())
Expect(curAppRevision.GetLabels()[oam.LabelAppRevisionHash]).Should(Equal(appHash1))
By("Verify that an application context is created to point to the same appRevision")
curAC = &v1alpha2.ApplicationContext{}
Expect(handler.r.Get(ctx,
types.NamespacedName{Namespace: ns.Name, Name: app.Name},
curAC)).NotTo(HaveOccurred())
Expect(curAC.GetLabels()[oam.LabelAppRevisionHash]).Should(Equal(appHash1))
Expect(curAC.Spec.ApplicationRevisionName).Should(Equal(lastRevision))
Expect(curAC.GetAnnotations()[annoKey1]).Should(BeEmpty())
Expect(curAC.GetAnnotations()[annoKey2]).ShouldNot(BeEmpty())
By("Change the application and apply again")
// bump the image tag
@@ -354,15 +332,6 @@ var _ = Describe("test generate revision ", func() {
Expect(appHash1).ShouldNot(Equal(appHash2))
Expect(curAppRevision.GetLabels()[oam.LabelAppRevisionHash]).Should(Equal(appHash2))
Expect(curApp.Status.LatestRevision.RevisionHash).Should(Equal(appHash2))
By("Verify that an application context is created to point to the right appRevision")
curAC = &v1alpha2.ApplicationContext{}
Expect(handler.r.Get(ctx,
types.NamespacedName{Namespace: ns.Name, Name: app.Name},
curAC)).NotTo(HaveOccurred())
Expect(curAC.GetLabels()[oam.LabelAppRevisionHash]).Should(Equal(appHash2))
Expect(curAC.Spec.ApplicationRevisionName).Should(Equal(curApp.Status.LatestRevision.Name))
Expect(curAC.GetAnnotations()[annoKey2]).ShouldNot(BeEmpty())
Expect(metav1.GetControllerOf(curAC).Kind).Should(Equal(v1alpha2.ApplicationKind))
})
It("Test App with rollout template", func() {
@@ -406,19 +375,7 @@ var _ = Describe("test generate revision ", func() {
Expect(len(curAppRevision.GetOwnerReferences())).Should(BeEquivalentTo(1))
Expect(curAppRevision.GetOwnerReferences()[0].Kind).Should(Equal(v1alpha2.ApplicationKind))
By("Verify that no application context is created")
curACs := &v1alpha2.ApplicationContextList{}
opts := []client.ListOption{
client.InNamespace(namespaceName),
}
Eventually(
func() error {
return handler.r.List(ctx, curACs, opts...)
}, time.Second*5, time.Microsecond*500).Should(Succeed())
Expect(len(curACs.Items)).Should(BeEquivalentTo(0))
By("Apply the application again without any spec change but remove the rollout annotation")
// there can be annotation change and appContext should have the exact label/annotation as app
annoKey2 := "testKey2"
app.SetAnnotations(map[string]string{annoKey2: "true"})
lastRevision := curApp.Status.LatestRevision.Name
@@ -448,14 +405,6 @@ var _ = Describe("test generate revision ", func() {
Expect(err).Should(Succeed())
Expect(curAppRevision.GetLabels()[oam.LabelAppRevisionHash]).Should(Equal(appHash1))
Expect(curAppRevision.GetAnnotations()[annoKey2]).ShouldNot(BeEmpty())
By("Verify that an application context is created to point to the same appRevision")
curAC := &v1alpha2.ApplicationContext{}
Expect(handler.r.Get(ctx,
types.NamespacedName{Namespace: ns.Name, Name: app.Name},
curAC)).NotTo(HaveOccurred())
Expect(curAC.GetLabels()[oam.LabelAppRevisionHash]).Should(Equal(appHash1))
Expect(curAC.Spec.ApplicationRevisionName).Should(Equal(lastRevision))
Expect(curAC.GetAnnotations()[annoKey2]).ShouldNot(BeEmpty())
By("Change the application and apply again with rollout")
// bump the image tag
@@ -503,12 +452,6 @@ var _ = Describe("test generate revision ", func() {
Expect(curApp.Status.LatestRevision.RevisionHash).Should(Equal(appHash2))
Expect(curAppRevision.GetAnnotations()[annoKey2]).Should(BeEmpty())
Expect(curAppRevision.GetAnnotations()[oam.AnnotationAppRollout]).ShouldNot(BeEmpty())
By("Verify that no more application context is created")
Eventually(
func() error {
return handler.r.List(ctx, curACs, opts...)
}, time.Second*5, time.Microsecond*500).Should(Succeed())
Expect(len(curACs.Items)).Should(BeEquivalentTo(1))
})
It("Test apply passes all label and annotation from app to appRevision", func() {
@@ -550,7 +493,6 @@ var _ = Describe("test generate revision ", func() {
Expect(appHash1).Should(Equal(curApp.Status.LatestRevision.RevisionHash))
Expect(curAppRevision.GetLabels()[labelKey1]).Should(Equal("true"))
Expect(curAppRevision.GetAnnotations()[annoKey1]).Should(Equal("true"))
// there can be annotation change and appContext should have the exact label/annotation as app
annoKey2 := "testKey2"
app.SetAnnotations(map[string]string{annoKey2: "true"})
labelKey2 := "labelKey2"

View File

@@ -18,13 +18,13 @@ package applicationrollout
import (
"context"
"strconv"
"time"
"github.com/crossplane/crossplane-runtime/pkg/event"
"github.com/crossplane/crossplane-runtime/pkg/meta"
"github.com/pkg/errors"
apierrors "k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/client-go/util/retry"
"k8s.io/klog/v2"
@@ -33,13 +33,10 @@ import (
"sigs.k8s.io/controller-runtime/pkg/controller"
"sigs.k8s.io/controller-runtime/pkg/reconcile"
oamv1alpha2 "github.com/oam-dev/kubevela/apis/core.oam.dev/v1alpha2"
"github.com/oam-dev/kubevela/apis/core.oam.dev/v1beta1"
"github.com/oam-dev/kubevela/apis/standard.oam.dev/v1alpha1"
"github.com/oam-dev/kubevela/apis/types"
"github.com/oam-dev/kubevela/pkg/controller/common/rollout"
oamctrl "github.com/oam-dev/kubevela/pkg/controller/core.oam.dev"
"github.com/oam-dev/kubevela/pkg/oam"
"github.com/oam-dev/kubevela/pkg/oam/discoverymapper"
oamutil "github.com/oam-dev/kubevela/pkg/oam/util"
)
@@ -110,119 +107,86 @@ func (r *Reconciler) Reconcile(req ctrl.Request) (res reconcile.Result, retErr e
}
// DoReconcile is real reconcile logic for appRollout.
// 1.prepare rollout info: use assemble module in application pkg to generate manifest with appRevision
// 2.determine which component is the common component between source and target AppRevision
// 3.if target workload isn't exist yet, template the targetAppRevision to apply target manifest
// 4.extract target workload and source workload(if sourceAppRevision not empty)
// 5.generate a rolloutPlan controller with source and target workload and call rolloutPlan's reconcile func
// 6.handle output status
// !!! Note the AppRollout object should not be updated in this function as it could be logically used in Application reconcile loop which does not have real AppRollout object.
func (r *Reconciler) DoReconcile(ctx context.Context, appRollout *v1beta1.AppRollout) (res reconcile.Result, retErr error) {
func (r *Reconciler) DoReconcile(ctx context.Context, appRollout *v1beta1.AppRollout) (reconcile.Result, error) {
if len(appRollout.Status.RollingState) == 0 {
appRollout.Status.ResetStatus()
}
targetAppRevisionName := appRollout.Spec.TargetAppRevisionName
sourceAppRevisionName := appRollout.Spec.SourceAppRevisionName
var err error
// no need to proceed if rollout is already in a terminal state and there is no source/target change
doneReconcile := r.handleRollingTerminated(*appRollout, targetAppRevisionName, sourceAppRevisionName)
doneReconcile := r.handleRollingTerminated(*appRollout)
if doneReconcile {
return reconcile.Result{}, nil
}
h := rolloutHandler{Reconciler: r, appRollout: appRollout}
// handle rollout target/source change (only if it's not deleting already)
if isRolloutModified(*appRollout) {
klog.InfoS("rollout target changed, restart the rollout", "new source", sourceAppRevisionName,
"new target", targetAppRevisionName)
r.record.Event(appRollout, event.Normal("Rollout Restarted",
"rollout target changed, restart the rollout", "new source", sourceAppRevisionName,
"new target", targetAppRevisionName))
// we are okay to move directly to restart the rollout since we are at the terminal state
// however, we need to make sure we properly finalizing the existing rollout before restart if it's
// still in the middle of rolling out
if appRollout.Status.RollingState != v1alpha1.RolloutSucceedState &&
appRollout.Status.RollingState != v1alpha1.RolloutFailedState {
// continue to handle the previous resources until we are okay to move forward
targetAppRevisionName = appRollout.Status.LastUpgradedTargetAppRevision
sourceAppRevisionName = appRollout.Status.LastSourceAppRevision
} else {
// mark so that we don't think we are modified again
appRollout.Status.LastUpgradedTargetAppRevision = targetAppRevisionName
appRollout.Status.LastSourceAppRevision = sourceAppRevisionName
}
appRollout.Status.StateTransition(v1alpha1.RollingModifiedEvent)
h.handleRolloutModified()
} else {
// except modified in middle of one rollout, in most cases use real source/target in appRollout and revision as this round reconcile
h.sourceRevName = appRollout.Spec.SourceAppRevisionName
h.targetRevName = appRollout.Spec.TargetAppRevisionName
}
// Get the source application first
var sourceApRev, targetAppRev *oamv1alpha2.ApplicationRevision
var sourceApp, targetApp *oamv1alpha2.ApplicationContext
var err error
// call assemble func generate source and target manifest
if err = h.prepareRollout(ctx); err != nil {
return reconcile.Result{}, err
}
if appRollout.Status.RollingState == v1alpha1.RolloutDeletingState {
if sourceAppRevisionName == "" {
klog.InfoS("source app fields not filled, this is a scale operation", "appRollout", klog.KRef(appRollout.Namespace, appRollout.Name))
} else {
sourceApRev, sourceApp, err = r.getSourceAppContexts(ctx,
appRollout.Spec.ComponentList, appRollout.Status.RollingState, sourceAppRevisionName)
if err != nil && !apierrors.IsNotFound(err) {
return ctrl.Result{}, err
}
}
// Get the
targetAppRev, targetApp, err = r.getTargetApps(ctx, appRollout.Spec.ComponentList,
appRollout.Status.RollingState, targetAppRevisionName)
if err != nil && !apierrors.IsNotFound(err) {
return ctrl.Result{}, err
}
if sourceApp == nil && targetApp == nil {
// we only support one workload rollout now, so here is determine witch component is need to rollout
if err = h.determineRolloutComponent(); err != nil {
return reconcile.Result{}, err
}
var sourceWorkload, targetWorkload *unstructured.Unstructured
// we should handle two special cases before call rolloutPlan Reconcile
switch h.appRollout.Status.RollingState {
case v1alpha1.RolloutDeletingState:
// application has been deleted, the related appRev haven't removed
if h.sourceAppRevision == nil && h.targetAppRevision == nil {
klog.InfoS("Both the target and the source app are gone", "appRollout",
klog.KRef(appRollout.Namespace, appRollout.Name), "rolling state", appRollout.Status.RollingState)
appRollout.Status.StateTransition(v1alpha1.RollingFinalizedEvent)
h.appRollout.Status.StateTransition(v1alpha1.RollingFinalizedEvent)
// update the appRollout status
return ctrl.Result{}, nil
}
} else {
// TODO: try to refactor this into a method with reasonable number of parameters and output
if sourceAppRevisionName == "" {
klog.Info("source app fields not filled, this is a scale operation")
} else {
sourceApRev, sourceApp, err = r.getSourceAppContexts(ctx,
appRollout.Spec.ComponentList, appRollout.Status.RollingState, sourceAppRevisionName)
if err != nil {
return ctrl.Result{}, err
}
// check if the app is templated
if sourceApp.Status.RollingStatus != types.RollingTemplated {
klog.Info("source app revision is not ready for rolling yet", "application revision", sourceAppRevisionName)
r.record.Event(appRollout, event.Normal("Rollout Paused",
"source app revision is not ready for rolling yet", "application revision", sourceApp.GetName()))
return ctrl.Result{RequeueAfter: 3 * time.Second}, nil
}
}
// Get the target application revision after the source app is templated
targetAppRev, targetApp, err = r.getTargetApps(ctx, appRollout.Spec.ComponentList,
appRollout.Status.RollingState, targetAppRevisionName)
case v1alpha1.LocatingTargetAppState:
// dispatch sourceWorkload
err = h.templateSourceManifest(ctx)
if err != nil {
return ctrl.Result{}, err
return reconcile.Result{}, err
}
// this ensures that we handle the target app init only once
appRollout.Status.StateTransition(v1alpha1.AppLocatedEvent)
// check if the app is templated
if targetApp.Status.RollingStatus != types.RollingTemplated {
r.record.Event(appRollout, event.Normal("Rollout Paused",
"target app revision is not ready for rolling yet", "application revision", targetApp.GetName()))
return ctrl.Result{RequeueAfter: 3 * time.Second}, nil
// target manifest haven't template yet, call dispatch template target manifest firstly
err = h.templateTargetManifest(ctx)
if err != nil {
return reconcile.Result{}, err
}
// this ensures that we template workload only once
h.appRollout.Status.StateTransition(v1alpha1.AppLocatedEvent)
return reconcile.Result{RequeueAfter: 3 * time.Second}, nil
default:
// in other cases there is no need do anything
}
// we get the real workloads from the spec of the revisions
targetWorkload, sourceWorkload, err := r.extractWorkloads(ctx, appRollout.Spec.ComponentList, targetAppRev, sourceApRev)
sourceWorkload, targetWorkload, err = h.fetchSourceAndTargetWorkload(ctx)
if err != nil {
klog.ErrorS(err, "cannot fetch the workloads to upgrade", "target application",
klog.KRef(appRollout.Namespace, targetAppRevisionName), "source application", klog.KRef(appRollout.Namespace, sourceAppRevisionName),
"commonComponent", appRollout.Spec.ComponentList)
return ctrl.Result{RequeueAfter: 5 * time.Second}, err
return reconcile.Result{}, err
}
klog.InfoS("get the target workload we need to work on", "targetWorkload", klog.KObj(targetWorkload))
if sourceWorkload != nil {
klog.InfoS("get the source workload we need to work on", "sourceWorkload", klog.KObj(sourceWorkload))
}
// reconcile the rollout part of the spec given the target and source workload
rolloutPlanController := rollout.NewRolloutPlanController(r, appRollout, r.record,
&appRollout.Spec.RolloutPlan, &appRollout.Status.RolloutStatus, targetWorkload, sourceWorkload)
@@ -234,18 +198,19 @@ func (r *Reconciler) DoReconcile(ctx context.Context, appRollout *v1beta1.AppRol
appRollout.Status.LastUpgradedTargetAppRevision = appRollout.Spec.TargetAppRevisionName
appRollout.Status.LastSourceAppRevision = appRollout.Spec.SourceAppRevisionName
}
if rolloutStatus.RollingState == v1alpha1.RolloutSucceedState {
klog.InfoS("rollout succeeded, record the source and target app revision", "source", sourceAppRevisionName,
"target", targetAppRevisionName)
if err = r.finalizeRollingSucceeded(ctx, sourceApp, targetApp); err != nil {
return ctrl.Result{}, err
err = h.finalizeRollingSucceeded(ctx)
if err != nil {
return reconcile.Result{}, err
}
klog.InfoS("rollout succeeded, record the source and target app revision", "source", appRollout.Spec.SourceAppRevisionName,
"target", appRollout.Spec.TargetAppRevisionName)
} else if rolloutStatus.RollingState == v1alpha1.RolloutFailedState {
klog.InfoS("rollout failed, record the source and target app revision", "source", sourceAppRevisionName,
"target", targetAppRevisionName, "revert on deletion", appRollout.Spec.RevertOnDelete)
klog.InfoS("rollout failed, record the source and target app revision", "source", appRollout.Spec.SourceAppRevisionName,
"target", appRollout.Spec.TargetAppRevisionName, "revert on deletion", appRollout.Spec.RevertOnDelete)
}
// update the appRollout status
return result, nil
}
@@ -289,43 +254,20 @@ func (r *Reconciler) handleFinalizer(ctx context.Context, appRollout *v1beta1.Ap
return false, reconcile.Result{}, nil
}
func (r *Reconciler) handleRollingTerminated(appRollout v1beta1.AppRollout, targetAppRevisionName string,
sourceAppRevisionName string) bool {
func (r *Reconciler) handleRollingTerminated(appRollout v1beta1.AppRollout) bool {
// handle rollout completed
if appRollout.Status.RollingState == v1alpha1.RolloutSucceedState ||
appRollout.Status.RollingState == v1alpha1.RolloutFailedState {
if appRollout.Status.LastUpgradedTargetAppRevision == targetAppRevisionName &&
appRollout.Status.LastSourceAppRevision == sourceAppRevisionName {
klog.InfoS("rollout completed, no need to reconcile", "source", sourceAppRevisionName,
"target", targetAppRevisionName)
if appRollout.Status.LastUpgradedTargetAppRevision == appRollout.Spec.TargetAppRevisionName &&
appRollout.Status.LastSourceAppRevision == appRollout.Spec.TargetAppRevisionName {
klog.InfoS("rollout completed, no need to reconcile", "source", appRollout.Spec.SourceAppRevisionName,
"target", appRollout.Spec.TargetAppRevisionName)
return true
}
}
return false
}
func (r *Reconciler) finalizeRollingSucceeded(ctx context.Context, sourceApp *oamv1alpha2.ApplicationContext,
targetApp *oamv1alpha2.ApplicationContext) error {
if sourceApp != nil {
// mark the source app as an application revision only so that it stop being reconciled
oamutil.RemoveAnnotations(sourceApp, []string{oam.AnnotationAppRollout})
oamutil.AddAnnotations(sourceApp, map[string]string{oam.AnnotationAppRevision: strconv.FormatBool(true)})
if err := r.Update(ctx, sourceApp); err != nil {
klog.ErrorS(err, "cannot add the app revision annotation", "source application",
klog.KRef(sourceApp.Namespace, sourceApp.GetName()))
return err
}
}
// remove the rollout annotation so that the target appConfig controller can take over the rest of the work
oamutil.RemoveAnnotations(targetApp, []string{oam.AnnotationAppRollout, oam.AnnotationRollingComponent})
if err := r.Update(ctx, targetApp); err != nil {
klog.ErrorS(err, "cannot remove the rollout annotation", "target application",
klog.KRef(targetApp.Namespace, targetApp.GetName()))
return err
}
return nil
}
// UpdateStatus updates v1alpha2.AppRollout's Status with retry.RetryOnConflict
func (r *Reconciler) updateStatus(ctx context.Context, appRollout *v1beta1.AppRollout) error {
status := appRollout.DeepCopy().Status

View File

@@ -1,258 +0,0 @@
/*
Copyright 2021 The KubeVela Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package applicationrollout
import (
"context"
"fmt"
"strconv"
"strings"
"github.com/pkg/errors"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
ktypes "k8s.io/apimachinery/pkg/types"
"k8s.io/klog/v2"
"k8s.io/utils/pointer"
"github.com/oam-dev/kubevela/apis/core.oam.dev/v1alpha2"
"github.com/oam-dev/kubevela/apis/standard.oam.dev/v1alpha1"
"github.com/oam-dev/kubevela/apis/types"
"github.com/oam-dev/kubevela/pkg/controller/common"
"github.com/oam-dev/kubevela/pkg/controller/core.oam.dev/v1alpha2/applicationconfiguration"
"github.com/oam-dev/kubevela/pkg/controller/utils"
"github.com/oam-dev/kubevela/pkg/oam"
oamutil "github.com/oam-dev/kubevela/pkg/oam/util"
appUtil "github.com/oam-dev/kubevela/pkg/webhook/core.oam.dev/v1alpha2/applicationrollout"
)
// getTargetApps try to locate the target appRevision and appContext that is responsible for the target
// we will create a new appContext when it's not found
func (r *Reconciler) getTargetApps(ctx context.Context, componentList []string, rollingState v1alpha1.RollingState,
targetAppRevisionName string) (*v1alpha2.ApplicationRevision, *v1alpha2.ApplicationContext, error) {
var appRevision v1alpha2.ApplicationRevision
var appContext v1alpha2.ApplicationContext
namespaceName := oamutil.GetDefinitionNamespaceWithCtx(ctx)
if err := r.Get(ctx, ktypes.NamespacedName{Namespace: namespaceName, Name: targetAppRevisionName},
&appRevision); err != nil {
klog.ErrorS(err, "cannot locate target application revision", "target application revision",
klog.KRef(namespaceName, targetAppRevisionName))
return nil, nil, err
}
if err := r.Get(ctx, ktypes.NamespacedName{Namespace: namespaceName, Name: targetAppRevisionName},
&appContext); err != nil {
if apierrors.IsNotFound(err) && rollingState == v1alpha1.LocatingTargetAppState {
klog.InfoS("target application context does not exist yet, create one", "target application revision",
klog.KRef(namespaceName, targetAppRevisionName))
appContext, err = r.createAppContext(ctx, componentList, &appRevision)
if err != nil {
return nil, nil, err
}
return &appRevision, &appContext, nil
}
// the appContext has to exist by now
klog.ErrorS(err, "cannot locate target application context", "target application name",
klog.KRef(namespaceName, targetAppRevisionName), "rollingState", rollingState)
return nil, nil, err
}
// special handle the first time we locate the appContext
if rollingState == v1alpha1.LocatingTargetAppState {
if appContext.Status.RollingStatus == types.RollingTemplated {
// force template the target app
klog.InfoS("force templating an already templated target application",
"target application revision", klog.KRef(namespaceName, targetAppRevisionName))
appContext.Status.RollingStatus = types.RollingTemplating
if err := r.Status().Update(ctx, &appContext); err != nil {
klog.ErrorS(err, "failed to force update target application context to be templating",
"target application name", klog.KRef(namespaceName, targetAppRevisionName))
return nil, nil, err
}
}
err := r.prepareAppContext(ctx, componentList, &appContext)
if err != nil {
return nil, nil, err
}
}
return &appRevision, &appContext, nil
}
// getTargetApps try to locate the source appRevision and appContext that is responsible for the source
func (r *Reconciler) getSourceAppContexts(ctx context.Context, componentList []string, rollingState v1alpha1.RollingState,
sourceAppRevisionName string) (*v1alpha2.ApplicationRevision, *v1alpha2.ApplicationContext, error) {
var appRevision v1alpha2.ApplicationRevision
var appContext v1alpha2.ApplicationContext
namespaceName := oamutil.GetDefinitionNamespaceWithCtx(ctx)
if err := r.Get(ctx, ktypes.NamespacedName{Namespace: namespaceName, Name: sourceAppRevisionName},
&appRevision); err != nil {
klog.ErrorS(err, "cannot locate source application revision", "source application revision",
klog.KRef(namespaceName, sourceAppRevisionName))
return nil, nil, err
}
// the source app has to exist or there is nothing for us to upgrade from
if err := r.Get(ctx, ktypes.NamespacedName{Namespace: namespaceName, Name: sourceAppRevisionName},
&appContext); err != nil {
// TODO: use the app name as the source Context to upgrade from none-rolling application to rolling
klog.ErrorS(err, "cannot locate source application revision", "source application name",
klog.KRef(namespaceName, sourceAppRevisionName))
return nil, nil, err
}
// set the AC as rolling if we are still at locating state
if rollingState == v1alpha1.LocatingTargetAppState {
err := r.prepareAppContext(ctx, componentList, &appContext)
if err != nil {
return nil, nil, err
}
}
return &appRevision, &appContext, nil
}
func (r *Reconciler) prepareAppContext(ctx context.Context, componentList []string,
appContext *v1alpha2.ApplicationContext) error {
oamutil.RemoveAnnotations(appContext, []string{oam.AnnotationAppRevision})
// pass the rolling component to the app
oamutil.AddAnnotations(appContext, map[string]string{oam.AnnotationAppRollout: strconv.FormatBool(true)})
if len(componentList) != 0 {
oamutil.AddAnnotations(appContext, map[string]string{
oam.AnnotationRollingComponent: strings.Join(componentList, common.RollingComponentsSep)})
}
return r.Update(ctx, appContext)
}
func (r *Reconciler) createAppContext(ctx context.Context, componentList []string,
appRevision *v1alpha2.ApplicationRevision) (v1alpha2.ApplicationContext, error) {
namespaceName := oamutil.GetDefinitionNamespaceWithCtx(ctx)
appContext := v1alpha2.ApplicationContext{
ObjectMeta: metav1.ObjectMeta{
Name: appRevision.GetName(),
Namespace: namespaceName,
Labels: appRevision.GetLabels(),
Annotations: appRevision.GetAnnotations(),
OwnerReferences: appRevision.GetOwnerReferences(),
},
Spec: v1alpha2.ApplicationContextSpec{
ApplicationRevisionName: appRevision.GetName(),
},
}
if metav1.GetControllerOf(&appContext) == nil {
for i, owner := range appContext.GetOwnerReferences() {
if owner.Kind == v1alpha2.ApplicationKind {
appContext.GetOwnerReferences()[i].Controller = pointer.BoolPtr(true)
}
}
}
// set the AC as rolling
oamutil.AddAnnotations(&appContext, map[string]string{oam.AnnotationAppRollout: strconv.FormatBool(true)})
// pass the rolling component to the app
if len(componentList) != 0 {
oamutil.AddAnnotations(&appContext, map[string]string{
oam.AnnotationRollingComponent: strings.Join(componentList, common.RollingComponentsSep)})
}
err := r.Create(ctx, &appContext)
return appContext, err
}
// extractWorkloads extracts the workloads from the source and target applicationConfig
func (r *Reconciler) extractWorkloads(ctx context.Context, componentList []string, targetAppRevision,
sourceAppRevision *v1alpha2.ApplicationRevision) (*unstructured.Unstructured, *unstructured.Unstructured, error) {
var componentName string
var sourceApp *v1alpha2.ApplicationConfiguration
targetApp, err := oamutil.RawExtension2AppConfig(targetAppRevision.Spec.ApplicationConfiguration)
if err != nil {
return nil, nil, err
}
if sourceAppRevision != nil {
sourceApp, err = oamutil.RawExtension2AppConfig(sourceAppRevision.Spec.ApplicationConfiguration)
if err != nil {
return nil, nil, err
}
}
if len(componentList) == 0 {
// we need to find a default component
commons := appUtil.FindCommonComponent(targetApp, sourceApp)
if len(commons) != 1 {
return nil, nil, fmt.Errorf("cannot find a default component, too many common components: %+v", commons)
}
componentName = commons[0]
} else {
// assume that the validator webhook has already guaranteed that there is no more than one component for now
// and the component exists in both the target and source app
componentName = componentList[0]
}
// get the workload definition
// the validator webhook has checked that source and the target are the same type
targetWorkload, err := r.fetchWorkload(ctx, componentName, targetApp)
if err != nil {
return nil, nil, err
}
klog.InfoS("successfully get the target workload we need to work on", "targetWorkload", klog.KObj(targetWorkload))
if sourceApp != nil {
sourceWorkload, err := r.fetchWorkload(ctx, componentName, sourceApp)
if err != nil {
return nil, nil, err
}
klog.InfoS("successfully get the source workload we need to work on", "sourceWorkload",
klog.KObj(sourceWorkload))
return targetWorkload, sourceWorkload, nil
}
return targetWorkload, nil, nil
}
// fetchWorkload based on the component and the appConfig
func (r *Reconciler) fetchWorkload(ctx context.Context, componentName string,
targetApp *v1alpha2.ApplicationConfiguration) (*unstructured.Unstructured, error) {
var targetAcc *v1alpha2.ApplicationConfigurationComponent
for _, acc := range targetApp.Spec.Components {
if utils.ExtractComponentName(acc.RevisionName) == componentName {
targetAcc = acc.DeepCopy()
}
}
// can't happen as we just searched the appConfig
if targetAcc == nil {
klog.Error("The component does not belong to the application",
"components", targetApp.Spec.Components, "component to upgrade", componentName)
return nil, fmt.Errorf("the component %s does not belong to the application with components %+v",
componentName, targetApp.Spec.Components)
}
revision, err := utils.ExtractRevision(targetAcc.RevisionName)
if err != nil {
return nil, errors.Wrap(err, fmt.Sprintf("failed to get revision given revision name %s",
targetAcc.RevisionName))
}
// get the component given the component revision
component, _, err := oamutil.GetComponent(ctx, r, *targetAcc, targetApp.GetNamespace())
if err != nil {
return nil, errors.Wrap(err, fmt.Sprintf("failed to get component given its revision %s",
targetAcc.RevisionName))
}
// get the workload template in the component
w, err := oamutil.RawExtension2Unstructured(&component.Spec.Workload)
if err != nil {
return nil, errors.Wrap(err, fmt.Sprintf("failed to get component given revision %s", targetAcc.RevisionName))
}
// reuse the same appConfig controller logic that determines the workload name given an ACC
// inplaceUpgrade not used in rollout now
applicationconfiguration.SetAppWorkloadInstanceName(componentName, w, revision, "")
// get the real workload object from api-server given GVK and name
workload, err := oamutil.GetObjectGivenGVKAndName(ctx, r, w.GroupVersionKind(), targetApp.GetNamespace(), w.GetName())
if err != nil {
return nil, errors.Wrap(err, fmt.Sprintf("failed to get workload %s with gvk %+v ", w.GetName(), w.GroupVersionKind()))
}
return workload, nil
}

View File

@@ -0,0 +1,80 @@
/*
Copyright 2021 The KubeVela Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package applicationrollout
import (
"reflect"
"k8s.io/utils/pointer"
"github.com/openkruise/kruise-api/apps/v1alpha1"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/klog/v2"
"github.com/oam-dev/kubevela/apis/core.oam.dev/v1alpha2"
"github.com/oam-dev/kubevela/apis/core.oam.dev/v1beta1"
"github.com/oam-dev/kubevela/pkg/controller/core.oam.dev/v1alpha2/application/assemble"
"github.com/oam-dev/kubevela/pkg/oam"
)
func rolloutWorkloadName() assemble.WorkloadOption {
return assemble.WorkloadOptionFn(func(w *unstructured.Unstructured, component *v1alpha2.Component, definition *v1beta1.ComponentDefinition) error {
// we hard code the behavior depends on the workload group/kind for now. The only in-place upgradable resources
// we support is cloneset/statefulset for now. We can easily add more later.
if w.GroupVersionKind().Group == v1alpha1.GroupVersion.Group {
if w.GetKind() == reflect.TypeOf(v1alpha1.CloneSet{}).Name() ||
w.GetKind() == reflect.TypeOf(v1alpha1.StatefulSet{}).Name() {
// we use the component name alone for those resources that do support in-place upgrade
klog.InfoS("we reuse the component name for resources that support in-place upgrade",
"GVK", w.GroupVersionKind(), "instance name", component.Name)
w.SetName(component.Name)
return nil
}
}
// we assume that the rest of the resources do not support in-place upgrade
compRevName := w.GetLabels()[oam.LabelAppComponentRevision]
w.SetName(compRevName)
klog.InfoS("we encountered an unknown resources, assume that it does not support in-place upgrade",
"GVK", w.GroupVersionKind(), "instance name", compRevName)
return nil
})
}
// appRollout should take over updating workload, so disable previous controller owner(resourceTracker)
func disableControllerOwner(workload *unstructured.Unstructured) {
if workload == nil {
return
}
ownerRefs := workload.GetOwnerReferences()
for i, ref := range ownerRefs {
if ref.Controller != nil && *ref.Controller {
ownerRefs[i].Controller = pointer.BoolPtr(false)
}
}
workload.SetOwnerReferences(ownerRefs)
}
// enableControllerOwner yield controller owner back to resourceTracker
func enableControllerOwner(workload *unstructured.Unstructured) {
owners := workload.GetOwnerReferences()
for i, owner := range owners {
if owner.Kind == v1beta1.ResourceTrackerKind && owner.Controller != nil && !*owner.Controller {
owners[i].Controller = pointer.BoolPtr(true)
}
}
workload.SetOwnerReferences(owners)
}

View File

@@ -0,0 +1,61 @@
/*
Copyright 2021 The KubeVela Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package applicationrollout
import (
"testing"
"github.com/oam-dev/kubevela/apis/core.oam.dev/v1alpha2"
"github.com/oam-dev/kubevela/apis/core.oam.dev/v1beta1"
"gotest.tools/assert"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/utils/pointer"
)
func TestDisableControllerOwner(t *testing.T) {
w := &unstructured.Unstructured{}
owners := []metav1.OwnerReference{
{Name: "test-1", Controller: pointer.BoolPtr(false)},
{Name: "test-2", Controller: pointer.BoolPtr(true)},
}
w.SetOwnerReferences(owners)
disableControllerOwner(w)
assert.Equal(t, 2, len(w.GetOwnerReferences()))
for _, reference := range w.GetOwnerReferences() {
assert.Equal(t, false, *reference.Controller)
}
}
func TestEnableControllerOwner(t *testing.T) {
w := &unstructured.Unstructured{}
owners := []metav1.OwnerReference{
{Name: "test-1", Controller: pointer.BoolPtr(false), Kind: v1beta1.ResourceTrackerKind},
{Name: "test-2", Controller: pointer.BoolPtr(false), Kind: v1alpha2.ApplicationKind},
}
w.SetOwnerReferences(owners)
enableControllerOwner(w)
assert.Equal(t, 2, len(w.GetOwnerReferences()))
for _, reference := range w.GetOwnerReferences() {
if reference.Kind == v1beta1.ResourceTrackerKind {
assert.Equal(t, true, *reference.Controller)
} else {
assert.Equal(t, false, *reference.Controller)
}
}
}

View File

@@ -0,0 +1,290 @@
/*
Copyright 2021 The KubeVela Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package applicationrollout
import (
"context"
"fmt"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/apimachinery/pkg/types"
"k8s.io/klog/v2"
"sigs.k8s.io/controller-runtime/pkg/client"
"github.com/crossplane/crossplane-runtime/pkg/event"
"github.com/oam-dev/kubevela/apis/core.oam.dev/v1beta1"
"github.com/oam-dev/kubevela/apis/standard.oam.dev/v1alpha1"
"github.com/oam-dev/kubevela/pkg/controller/core.oam.dev/v1alpha2/application/dispatch"
"github.com/oam-dev/kubevela/pkg/controller/core.oam.dev/v1alpha2/application/assemble"
oamutil "github.com/oam-dev/kubevela/pkg/oam/util"
appUtil "github.com/oam-dev/kubevela/pkg/webhook/core.oam.dev/v1alpha2/applicationrollout"
)
type rolloutHandler struct {
*Reconciler
appRollout *v1beta1.AppRollout
// source/targetRevName represent this round reconcile using source and target revision
// in most cases they are equal to appRollout.spec.target/sourceRevName but if roll forward or revert in middle of rollout
// source/targetRevName are equal to previous rollout
sourceRevName string
targetRevName string
sourceAppRevision *v1beta1.ApplicationRevision
targetAppRevision *v1beta1.ApplicationRevision
// sourceWorkloads is assembled by appRevision in assemble phase
// please be aware that they are not real status in k8s, they are just generate from appRevision include GVK+namespace+name
sourceWorkloads map[string]*unstructured.Unstructured
targetWorkloads map[string]*unstructured.Unstructured
// targetManifests used by dispatch(template targetRevision) and handleSucceed(GC) phase
targetManifests []*unstructured.Unstructured
// sourceManifests used by dispatch(template targetRevision) and handleSucceed(GC) phase
sourceManifests []*unstructured.Unstructured
// needRollComponent is find common component between source and target revision
needRollComponent string
}
// prepareRollout call assemble func to prepare info needed in whole reconcile loop
func (h *rolloutHandler) prepareRollout(ctx context.Context) error {
var err error
h.targetAppRevision = new(v1beta1.ApplicationRevision)
if err := h.Get(ctx, types.NamespacedName{Namespace: h.appRollout.Namespace, Name: h.targetRevName}, h.targetAppRevision); err != nil {
return err
}
// construct a assemble manifest for targetAppRevision
targetAssemble := assemble.NewAppManifests(h.targetAppRevision).
WithWorkloadOption(rolloutWorkloadName()).
WithWorkloadOption(assemble.PrepareWorkloadForRollout())
// in template phase, we should use targetManifests including target workloads/traits to
h.targetManifests, err = targetAssemble.AssembledManifests()
if err != nil {
klog.Error("appRollout targetAppRevision failed to assemble manifest", "appRollout", klog.KRef(h.appRollout.Namespace, h.appRollout.Name))
return err
}
// we only use workloads group by component name to find common workloads in source and target revision
h.targetWorkloads, _, _, err = targetAssemble.GroupAssembledManifests()
if err != nil {
klog.Error("appRollout targetAppRevision failed to assemble target workload", "appRollout", klog.KRef(h.appRollout.Namespace, h.appRollout.Name))
return err
}
if len(h.sourceRevName) != 0 {
h.sourceAppRevision = new(v1beta1.ApplicationRevision)
if err := h.Get(ctx, types.NamespacedName{Namespace: h.appRollout.Namespace, Name: h.sourceRevName}, h.sourceAppRevision); err != nil {
return err
}
// construct a assemble manifest for sourceAppRevision
sourceAssemble := assemble.NewAppManifests(h.sourceAppRevision).
WithWorkloadOption(assemble.PrepareWorkloadForRollout()).
WithWorkloadOption(rolloutWorkloadName())
h.sourceWorkloads, _, _, err = sourceAssemble.GroupAssembledManifests()
if err != nil {
klog.Error("appRollout sourceAppRevision failed to assemble workloads", "appRollout", klog.KRef(h.appRollout.Namespace, h.appRollout.Name))
return err
}
}
return nil
}
// we only support one workload now, so this func is to determine witch component is need to rollout
func (h *rolloutHandler) determineRolloutComponent() error {
componentList := h.appRollout.Spec.ComponentList
// if user not set ComponentList in AppRollout we also find a common component between source and target
if len(componentList) == 0 {
// we need to find a default component
commons := appUtil.FindCommonComponentWithManifest(h.targetWorkloads, h.sourceWorkloads)
if len(commons) != 1 {
return fmt.Errorf("cannot find a default component, too many common components: %+v", commons)
}
h.needRollComponent = commons[0]
} else {
// assume that the validator webhook has already guaranteed that there is no more than one component for now
// and the component exists in both the target and source app
h.needRollComponent = componentList[0]
}
return nil
}
// fetch source and target workload
func (h *rolloutHandler) fetchSourceAndTargetWorkload(ctx context.Context) (*unstructured.Unstructured, *unstructured.Unstructured, error) {
var sourceWorkload, targetWorkload *unstructured.Unstructured
var err error
if len(h.sourceRevName) == 0 {
klog.Info("source app fields not filled, this is a scale operation")
} else if sourceWorkload, err = h.extractWorkload(ctx, *h.sourceWorkloads[h.needRollComponent]); err != nil {
klog.Errorf("specified sourceRevName but cannot fetch source workload %s: %v",
h.appRollout.Spec.SourceAppRevisionName, err)
return nil, nil, err
}
if targetWorkload, err = h.extractWorkload(ctx, *h.targetWorkloads[h.needRollComponent]); err != nil {
klog.Errorf("cannot fetch target workload %s: %v", h.appRollout.Spec.TargetAppRevisionName, err)
return nil, nil, err
}
return sourceWorkload, targetWorkload, nil
}
// extractWorkload use GVK and name of workload(assembled result) to fetch real workload in cluster
func (h *rolloutHandler) extractWorkload(ctx context.Context, workload unstructured.Unstructured) (*unstructured.Unstructured, error) {
wl, err := oamutil.GetObjectGivenGVKAndName(ctx, h, workload.GroupVersionKind(), workload.GetNamespace(), workload.GetName())
if err != nil {
return nil, err
}
return wl, nil
}
// if in middle of previous rollout, continue use previous source and target appRevision as this round rollout
func (h *rolloutHandler) handleRolloutModified() {
klog.InfoS("rollout target changed, restart the rollout", "new source", h.appRollout.Spec.SourceAppRevisionName,
"new target", h.appRollout.Spec.TargetAppRevisionName)
h.record.Event(h.appRollout, event.Normal("Rollout Restarted",
"rollout target changed, restart the rollout", "new source", h.appRollout.Spec.SourceAppRevisionName,
"new target", h.appRollout.Spec.TargetAppRevisionName))
// we are okay to move directly to restart the rollout since we are at the terminal state
// however, we need to make sure we properly finalizing the existing rollout before restart if it's
// still in the middle of rolling out
if h.appRollout.Status.RollingState != v1alpha1.RolloutSucceedState &&
h.appRollout.Status.RollingState != v1alpha1.RolloutFailedState {
// happen when roll forward or revert in middle of rollout, previous rollout haven't finished
// continue to handle the previous resources until we are okay to move forward
h.targetRevName = h.appRollout.Status.LastUpgradedTargetAppRevision
h.sourceRevName = h.appRollout.Status.LastSourceAppRevision
} else {
// previous rollout have finished, go ahead using new source/target revision
h.targetRevName = h.appRollout.Spec.TargetAppRevisionName
h.sourceRevName = h.appRollout.Spec.SourceAppRevisionName
// mark so that we don't think we are modified again
h.appRollout.Status.LastUpgradedTargetAppRevision = h.appRollout.Spec.TargetAppRevisionName
h.appRollout.Status.LastSourceAppRevision = h.appRollout.Spec.SourceAppRevisionName
}
h.appRollout.Status.StateTransition(v1alpha1.RollingModifiedEvent)
}
// templateTargetManifest call dispatch to template target app revision's manifests to cluster
func (h *rolloutHandler) templateTargetManifest(ctx context.Context) error {
var rt *v1beta1.ResourceTracker
// if sourceAppRevision is not nil, we should upgrade existing resources which are also needed by target app
// revision
if h.sourceAppRevision != nil {
rt = new(v1beta1.ResourceTracker)
err := h.Get(ctx, types.NamespacedName{Name: dispatch.ConstructResourceTrackerName(h.appRollout.Spec.SourceAppRevisionName, h.appRollout.Namespace)}, rt)
if err != nil {
klog.Errorf("specified sourceAppRevisionName %s but cannot fetch the sourceResourceTracker %v",
h.appRollout.Spec.SourceAppRevisionName, err)
return err
}
}
// use source resourceTracker to handle same resource owner transfer
dispatcher := dispatch.NewAppManifestsDispatcher(h, h.targetAppRevision).EnableUpgradeAndSkipGC(rt)
_, err := dispatcher.Dispatch(ctx, h.targetManifests)
if err != nil {
klog.Errorf("dispatch targetRevision error %s:%v", h.appRollout.Spec.TargetAppRevisionName, err)
return err
}
workload, err := h.extractWorkload(ctx, *h.targetWorkloads[h.needRollComponent])
if err != nil {
return err
}
ref := metav1.GetControllerOfNoCopy(workload)
if ref != nil && ref.Kind == v1beta1.ResourceTrackerKind {
wlPatch := client.MergeFrom(workload.DeepCopy())
// guarantee resourceTracker isn't controller owner of workload
disableControllerOwner(workload)
if err = h.Client.Patch(ctx, workload, wlPatch, client.FieldOwner(h.appRollout.UID)); err != nil {
return err
}
}
return nil
}
// templateTargetManifest call dispatch to template source app revision's manifests to cluster
func (h *rolloutHandler) templateSourceManifest(ctx context.Context) error {
// only when sourceAppRevision is not nil, we need template sourceRevision revision
if h.sourceAppRevision == nil {
return nil
}
// use source resourceTracker to handle same resource owner transfer
dispatcher := dispatch.NewAppManifestsDispatcher(h, h.sourceAppRevision)
_, err := dispatcher.Dispatch(ctx, h.sourceManifests)
if err != nil {
klog.Errorf("dispatch sourceRevision error %s:%v", h.appRollout.Spec.TargetAppRevisionName, err)
return err
}
workload, err := h.extractWorkload(ctx, *h.sourceWorkloads[h.needRollComponent])
if err != nil {
return err
}
ref := metav1.GetControllerOfNoCopy(workload)
if ref != nil && ref.Kind == v1beta1.ResourceTrackerKind {
wlPatch := client.MergeFrom(workload.DeepCopy())
// guarantee resourceTracker isn't controller owner of workload
disableControllerOwner(workload)
if err = h.Client.Patch(ctx, workload, wlPatch, client.FieldOwner(h.appRollout.UID)); err != nil {
return err
}
}
return nil
}
// handle rollout succeed work left
func (h *rolloutHandler) finalizeRollingSucceeded(ctx context.Context) error {
// yield controller owner back to resourceTracker
workload, err := h.extractWorkload(ctx, *h.targetWorkloads[h.needRollComponent])
if err != nil {
return err
}
wlPatch := client.MergeFrom(workload.DeepCopy())
enableControllerOwner(workload)
if err = h.Client.Patch(ctx, workload, wlPatch, client.FieldOwner(h.appRollout.UID)); err != nil {
return err
}
// only when sourceAppRevision is not nil, we need gc old revision resources
if h.sourceAppRevision != nil {
oldRT := &v1beta1.ResourceTracker{}
err := h.Client.Get(ctx, client.ObjectKey{
Name: dispatch.ConstructResourceTrackerName(h.sourceAppRevision.Name, h.sourceAppRevision.Namespace)}, oldRT)
if err != nil && apierrors.IsNotFound(err) {
// end finalizing if source revision's tracker is already gone
// this guarantees finalizeRollingSucceeded will only GC once
return nil
}
if err != nil {
return err
}
d := dispatch.NewAppManifestsDispatcher(h.Client, h.targetAppRevision).
EnableUpgradeAndGC(oldRT)
// no need to dispatch manifest again, just do GC
if _, err := d.Dispatch(ctx, nil); err != nil {
return err
}
}
return nil
}

View File

@@ -23,7 +23,6 @@ import (
"github.com/oam-dev/kubevela/pkg/controller/core.oam.dev/v1alpha2/appdeployment"
"github.com/oam-dev/kubevela/pkg/controller/core.oam.dev/v1alpha2/application"
"github.com/oam-dev/kubevela/pkg/controller/core.oam.dev/v1alpha2/applicationconfiguration"
"github.com/oam-dev/kubevela/pkg/controller/core.oam.dev/v1alpha2/applicationcontext"
"github.com/oam-dev/kubevela/pkg/controller/core.oam.dev/v1alpha2/applicationrollout"
"github.com/oam-dev/kubevela/pkg/controller/core.oam.dev/v1alpha2/core/components/componentdefinition"
"github.com/oam-dev/kubevela/pkg/controller/core.oam.dev/v1alpha2/core/policies/policydefinition"
@@ -38,7 +37,7 @@ import (
func Setup(mgr ctrl.Manager, args controller.Args) error {
for _, setup := range []func(ctrl.Manager, controller.Args) error{
containerizedworkload.Setup, manualscalertrait.Setup, healthscope.Setup,
application.Setup, applicationrollout.Setup, applicationcontext.Setup, appdeployment.Setup,
application.Setup, applicationrollout.Setup, appdeployment.Setup,
traitdefinition.Setup, componentdefinition.Setup, policydefinition.Setup, workflowstepdefinition.Setup,
} {
if err := setup(mgr, args); err != nil {