mirror of
https://github.com/kubevela/kubevela.git
synced 2026-02-25 07:14:15 +00:00
Compare commits
4 Commits
v1.4.12
...
release-1.
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
35a84e9cbf | ||
|
|
bf251d5039 | ||
|
|
31f0b28d96 | ||
|
|
810c47545e |
2
.github/workflows/apiserver-test.yaml
vendored
2
.github/workflows/apiserver-test.yaml
vendored
@@ -55,7 +55,7 @@ jobs:
|
||||
|
||||
|
||||
apiserver-unit-tests:
|
||||
runs-on: aliyun
|
||||
runs-on: aliyun-legacy
|
||||
needs: [ detect-noop,set-k8s-matrix ]
|
||||
if: needs.detect-noop.outputs.noop != 'true'
|
||||
strategy:
|
||||
|
||||
6
.github/workflows/e2e-multicluster-test.yml
vendored
6
.github/workflows/e2e-multicluster-test.yml
vendored
@@ -53,7 +53,7 @@ jobs:
|
||||
|
||||
|
||||
e2e-multi-cluster-tests:
|
||||
runs-on: aliyun
|
||||
runs-on: aliyun-legacy
|
||||
needs: [ detect-noop,set-k8s-matrix ]
|
||||
if: needs.detect-noop.outputs.noop != 'true'
|
||||
strategy:
|
||||
@@ -97,7 +97,9 @@ jobs:
|
||||
kubectl cluster-info
|
||||
|
||||
- name: Load Image to kind cluster (Hub)
|
||||
run: make kind-load
|
||||
run: |
|
||||
make kind-load
|
||||
make kind-load-runtime-cluster
|
||||
|
||||
- name: Cleanup for e2e tests
|
||||
run: |
|
||||
|
||||
2
.github/workflows/e2e-rollout-test.yml
vendored
2
.github/workflows/e2e-rollout-test.yml
vendored
@@ -52,7 +52,7 @@ jobs:
|
||||
fi
|
||||
|
||||
e2e-rollout-tests:
|
||||
runs-on: aliyun
|
||||
runs-on: aliyun-legacy
|
||||
needs: [ detect-noop,set-k8s-matrix ]
|
||||
if: needs.detect-noop.outputs.noop != 'true'
|
||||
strategy:
|
||||
|
||||
2
.github/workflows/e2e-test.yml
vendored
2
.github/workflows/e2e-test.yml
vendored
@@ -52,7 +52,7 @@ jobs:
|
||||
fi
|
||||
|
||||
e2e-tests:
|
||||
runs-on: aliyun
|
||||
runs-on: aliyun-legacy
|
||||
needs: [ detect-noop,set-k8s-matrix ]
|
||||
if: needs.detect-noop.outputs.noop != 'true'
|
||||
strategy:
|
||||
|
||||
2
.github/workflows/go.yml
vendored
2
.github/workflows/go.yml
vendored
@@ -98,7 +98,7 @@ jobs:
|
||||
version: ${{ env.GOLANGCI_VERSION }}
|
||||
|
||||
check-diff:
|
||||
runs-on: aliyun
|
||||
runs-on: aliyun-legacy
|
||||
needs: detect-noop
|
||||
if: needs.detect-noop.outputs.noop != 'true'
|
||||
|
||||
|
||||
2
.github/workflows/timed-task.yml
vendored
2
.github/workflows/timed-task.yml
vendored
@@ -4,7 +4,7 @@ on:
|
||||
- cron: '* * * * *'
|
||||
jobs:
|
||||
clean-image:
|
||||
runs-on: aliyun
|
||||
runs-on: aliyun-legacy
|
||||
steps:
|
||||
- name: Cleanup image
|
||||
run: docker image prune -f
|
||||
6
Makefile
6
Makefile
@@ -83,15 +83,17 @@ endif
|
||||
|
||||
|
||||
# load docker image to the kind cluster
|
||||
kind-load: kind-load-runtime-cluster
|
||||
kind-load: kind-load-rollout
|
||||
docker build -t $(VELA_CORE_TEST_IMAGE) -f Dockerfile.e2e .
|
||||
kind load docker-image $(VELA_CORE_TEST_IMAGE) || { echo >&2 "kind not installed or error loading image: $(VELA_CORE_TEST_IMAGE)"; exit 1; }
|
||||
|
||||
kind-load-runtime-cluster:
|
||||
kind-load-rollout:
|
||||
/bin/sh hack/e2e/build_runtime_rollout.sh
|
||||
docker build -t $(VELA_RUNTIME_ROLLOUT_TEST_IMAGE) -f runtime/rollout/e2e/Dockerfile.e2e runtime/rollout/e2e/
|
||||
rm -rf runtime/rollout/e2e/tmp
|
||||
kind load docker-image $(VELA_RUNTIME_ROLLOUT_TEST_IMAGE) || { echo >&2 "kind not installed or error loading image: $(VELA_RUNTIME_ROLLOUT_TEST_IMAGE)"; exit 1; }
|
||||
|
||||
kind-load-runtime-cluster:
|
||||
kind load docker-image $(VELA_RUNTIME_ROLLOUT_TEST_IMAGE) --name=$(RUNTIME_CLUSTER_NAME) || { echo >&2 "kind not installed or error loading image: $(VELA_RUNTIME_ROLLOUT_TEST_IMAGE)"; exit 1; }
|
||||
|
||||
# Run tests
|
||||
|
||||
@@ -77,7 +77,7 @@ ifeq (, $(shell which readme-generator))
|
||||
@{ \
|
||||
set -e ;\
|
||||
echo 'installing readme-generator-for-helm' ;\
|
||||
npm install -g readme-generator-for-helm ;\
|
||||
npm install -g @bitnami/readme-generator-for-helm ;\
|
||||
}
|
||||
else
|
||||
@$(OK) readme-generator-for-helm is already installed
|
||||
|
||||
@@ -86,7 +86,6 @@ var _ = Describe("test FindWholeAddonPackagesFromRegistry", func() {
|
||||
Expect(res).To(HaveLen(1))
|
||||
Expect(res[0].Name).To(Equal("velaux"))
|
||||
Expect(res[0].InstallPackage).ToNot(BeNil())
|
||||
Expect(res[0].APISchema).ToNot(BeNil())
|
||||
})
|
||||
It("should return one valid result, matching one registry", func() {
|
||||
res, err := FindWholeAddonPackagesFromRegistry(context.Background(), k8sClient, []string{"velaux"}, []string{"KubeVela"})
|
||||
@@ -94,7 +93,6 @@ var _ = Describe("test FindWholeAddonPackagesFromRegistry", func() {
|
||||
Expect(res).To(HaveLen(1))
|
||||
Expect(res[0].Name).To(Equal("velaux"))
|
||||
Expect(res[0].InstallPackage).ToNot(BeNil())
|
||||
Expect(res[0].APISchema).ToNot(BeNil())
|
||||
})
|
||||
})
|
||||
|
||||
@@ -113,10 +111,8 @@ var _ = Describe("test FindWholeAddonPackagesFromRegistry", func() {
|
||||
Expect(res).To(HaveLen(2))
|
||||
Expect(res[0].Name).To(Equal("velaux"))
|
||||
Expect(res[0].InstallPackage).ToNot(BeNil())
|
||||
Expect(res[0].APISchema).ToNot(BeNil())
|
||||
Expect(res[1].Name).To(Equal("traefik"))
|
||||
Expect(res[1].InstallPackage).ToNot(BeNil())
|
||||
Expect(res[1].APISchema).ToNot(BeNil())
|
||||
})
|
||||
})
|
||||
|
||||
@@ -127,7 +123,6 @@ var _ = Describe("test FindWholeAddonPackagesFromRegistry", func() {
|
||||
Expect(res).To(HaveLen(1))
|
||||
Expect(res[0].Name).To(Equal("velaux"))
|
||||
Expect(res[0].InstallPackage).ToNot(BeNil())
|
||||
Expect(res[0].APISchema).ToNot(BeNil())
|
||||
})
|
||||
})
|
||||
})
|
||||
|
||||
@@ -82,8 +82,8 @@ func (c Condition) Validate() error {
|
||||
if c.JSONKey == "" {
|
||||
return fmt.Errorf("the json key of the condition can not be empty")
|
||||
}
|
||||
if c.Action != "enable" && c.Action != "disable" {
|
||||
return fmt.Errorf("the action of the condition must be enable or disable")
|
||||
if c.Action != "enable" && c.Action != "disable" && c.Action != "" {
|
||||
return fmt.Errorf("the action of the condition only supports enable, disable or leave it empty")
|
||||
}
|
||||
if c.Op != "" && !StringsContain([]string{"==", "!=", "in"}, c.Op) {
|
||||
return fmt.Errorf("the op of the condition must be `==` 、`!=` and `in`")
|
||||
|
||||
@@ -307,6 +307,11 @@ func (r *Reconciler) gcResourceTrackers(logCtx monitorContext.Context, handler *
|
||||
}))
|
||||
defer subCtx.Commit("finish gc resourceTrackers")
|
||||
|
||||
statusUpdater := r.updateStatus
|
||||
if isPatch {
|
||||
statusUpdater = r.patchStatus
|
||||
}
|
||||
|
||||
var options []resourcekeeper.GCOption
|
||||
if !gcOutdated {
|
||||
options = append(options, resourcekeeper.DisableMarkStageGCOption{}, resourcekeeper.DisableGCComponentRevisionOption{}, resourcekeeper.DisableLegacyGCOption{})
|
||||
@@ -314,8 +319,10 @@ func (r *Reconciler) gcResourceTrackers(logCtx monitorContext.Context, handler *
|
||||
finished, waiting, err := handler.resourceKeeper.GarbageCollect(logCtx, options...)
|
||||
if err != nil {
|
||||
logCtx.Error(err, "Failed to gc resourcetrackers")
|
||||
r.Recorder.Event(handler.app, event.Warning(velatypes.ReasonFailedGC, err))
|
||||
return r.endWithNegativeCondition(logCtx, handler.app, condition.ReconcileError(err), phase)
|
||||
cond := condition.Deleting()
|
||||
cond.Message = fmt.Sprintf("error encountered during garbage collection: %s", err.Error())
|
||||
handler.app.Status.SetConditions(cond)
|
||||
return r.result(statusUpdater(logCtx, handler.app, phase)).ret()
|
||||
}
|
||||
if !finished {
|
||||
logCtx.Info("GarbageCollecting resourcetrackers unfinished")
|
||||
@@ -324,13 +331,13 @@ func (r *Reconciler) gcResourceTrackers(logCtx monitorContext.Context, handler *
|
||||
cond.Message = fmt.Sprintf("Waiting for %s to delete. (At least %d resources are deleting.)", waiting[0].DisplayName(), len(waiting))
|
||||
}
|
||||
handler.app.Status.SetConditions(cond)
|
||||
return r.result(r.patchStatus(logCtx, handler.app, phase)).requeue(baseGCBackoffWaitTime).ret()
|
||||
return r.result(statusUpdater(logCtx, handler.app, phase)).requeue(baseGCBackoffWaitTime).ret()
|
||||
}
|
||||
logCtx.Info("GarbageCollected resourcetrackers")
|
||||
if !isPatch {
|
||||
return r.result(r.updateStatus(logCtx, handler.app, common.ApplicationRunningWorkflow)).ret()
|
||||
phase = common.ApplicationRunningWorkflow
|
||||
}
|
||||
return r.result(r.patchStatus(logCtx, handler.app, phase)).ret()
|
||||
return r.result(statusUpdater(logCtx, handler.app, phase)).ret()
|
||||
}
|
||||
|
||||
type reconcileResult struct {
|
||||
|
||||
@@ -165,7 +165,7 @@ var _ = Describe("Addon status or info", func() {
|
||||
Expect(ds.DeleteRegistry(context.Background(), "KubeVela")).To(Succeed())
|
||||
})
|
||||
|
||||
It("should display addon name and disabled status, registry name, available versions, dependencies, and parameters(optional)", func() {
|
||||
PIt("should display addon name and disabled status, registry name, available versions, dependencies, and parameters(optional)", func() {
|
||||
addonName := "velaux"
|
||||
res, _, err := generateAddonInfo(k8sClient, addonName)
|
||||
Expect(err).Should(BeNil())
|
||||
|
||||
@@ -233,14 +233,17 @@ func prepareToForceDeleteTerraformComponents(ctx context.Context, k8sClient clie
|
||||
for _, c := range app.Spec.Components {
|
||||
var def corev1beta1.ComponentDefinition
|
||||
if err := k8sClient.Get(ctx, client.ObjectKey{Name: c.Type, Namespace: types.DefaultKubeVelaNS}, &def); err != nil {
|
||||
return err
|
||||
if !apierrors.IsNotFound(err) {
|
||||
return err
|
||||
}
|
||||
if err := k8sClient.Get(ctx, client.ObjectKey{Name: c.Type, Namespace: namespace}, &def); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
if def.Spec.Schematic != nil && def.Spec.Schematic.Terraform != nil {
|
||||
var conf terraformapi.Configuration
|
||||
if err := k8sClient.Get(ctx, client.ObjectKey{Name: c.Name, Namespace: namespace}, &conf); err != nil {
|
||||
if !apierrors.IsNotFound(err) {
|
||||
return err
|
||||
}
|
||||
return err
|
||||
}
|
||||
conf.Spec.ForceDelete = &forceDelete
|
||||
if err := k8sClient.Update(ctx, &conf); err != nil {
|
||||
|
||||
@@ -19,7 +19,7 @@ import (
|
||||
"context"
|
||||
"testing"
|
||||
|
||||
terraformapi "github.com/oam-dev/terraform-controller/api/v1beta1"
|
||||
terraformapi "github.com/oam-dev/terraform-controller/api/v1beta2"
|
||||
"github.com/stretchr/testify/assert"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
@@ -55,7 +55,7 @@ func TestPrepareToForceDeleteTerraformComponents(t *testing.T) {
|
||||
def1 := &v1beta1.ComponentDefinition{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
Kind: "ComponentDefinition",
|
||||
APIVersion: "core.oam.dev/v1beta1",
|
||||
APIVersion: "core.oam.dev/v1beta2",
|
||||
},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "d1",
|
||||
@@ -75,6 +75,16 @@ func TestPrepareToForceDeleteTerraformComponents(t *testing.T) {
|
||||
Namespace: "default",
|
||||
},
|
||||
}
|
||||
|
||||
userNamespace := "another-namespace"
|
||||
def2 := def1.DeepCopy()
|
||||
def2.SetNamespace(userNamespace)
|
||||
app2 := app1.DeepCopy()
|
||||
app2.SetNamespace(userNamespace)
|
||||
app2.SetName("app2")
|
||||
conf2 := conf1.DeepCopy()
|
||||
conf2.SetNamespace(userNamespace)
|
||||
|
||||
k8sClient1 := fake.NewClientBuilder().WithScheme(s).WithObjects(app1, def1, conf1).Build()
|
||||
|
||||
k8sClient2 := fake.NewClientBuilder().Build()
|
||||
@@ -83,6 +93,7 @@ func TestPrepareToForceDeleteTerraformComponents(t *testing.T) {
|
||||
|
||||
k8sClient4 := fake.NewClientBuilder().WithScheme(s).WithObjects(app1, def1).Build()
|
||||
|
||||
k8sClient5 := fake.NewClientBuilder().WithScheme(s).WithObjects(app2, def2, conf2).Build()
|
||||
type args struct {
|
||||
k8sClient client.Client
|
||||
namespace string
|
||||
@@ -141,16 +152,27 @@ func TestPrepareToForceDeleteTerraformComponents(t *testing.T) {
|
||||
"app1",
|
||||
},
|
||||
want: want{
|
||||
errMsg: "no kind is registered for the type",
|
||||
errMsg: "configurations.terraform.core.oam.dev \"c1\" not found",
|
||||
},
|
||||
},
|
||||
"can read definition from application namespace": {
|
||||
args: args{
|
||||
k8sClient5,
|
||||
userNamespace,
|
||||
"app2",
|
||||
},
|
||||
want: want{},
|
||||
},
|
||||
}
|
||||
|
||||
for name, tc := range testcases {
|
||||
t.Run(name, func(t *testing.T) {
|
||||
err := prepareToForceDeleteTerraformComponents(ctx, tc.args.k8sClient, tc.args.namespace, tc.args.name)
|
||||
if err != nil || tc.want.errMsg != "" {
|
||||
if err != nil {
|
||||
assert.NotEmpty(t, tc.want.errMsg)
|
||||
assert.Contains(t, err.Error(), tc.want.errMsg)
|
||||
} else {
|
||||
assert.Empty(t, tc.want.errMsg)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
@@ -28,6 +28,7 @@ import (
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
|
||||
"github.com/oam-dev/kubevela/apis/core.oam.dev/common"
|
||||
kubevelatypes "github.com/oam-dev/kubevela/apis/types"
|
||||
"github.com/oam-dev/kubevela/pkg/oam"
|
||||
"github.com/oam-dev/kubevela/pkg/utils"
|
||||
|
||||
@@ -501,5 +502,95 @@ var _ = Describe("Test multicluster scenario", func() {
|
||||
g.Expect(kerrors.IsNotFound(err)).Should(BeTrue())
|
||||
}, 2*time.Minute).Should(Succeed())
|
||||
})
|
||||
|
||||
It("Test application with failed gc and restart workflow", func() {
|
||||
By("duplicate cluster")
|
||||
secret := &corev1.Secret{}
|
||||
const secretName = "disconnection-test"
|
||||
Expect(k8sClient.Get(hubCtx, types.NamespacedName{Namespace: kubevelatypes.DefaultKubeVelaNS, Name: WorkerClusterName}, secret)).Should(Succeed())
|
||||
secret.SetName(secretName)
|
||||
secret.SetResourceVersion("")
|
||||
Expect(k8sClient.Create(hubCtx, secret)).Should(Succeed())
|
||||
defer func() {
|
||||
_ = k8sClient.Delete(hubCtx, secret)
|
||||
}()
|
||||
|
||||
By("create cluster normally")
|
||||
bs, err := os.ReadFile("./testdata/app/app-disconnection-test.yaml")
|
||||
Expect(err).Should(Succeed())
|
||||
app := &v1beta1.Application{}
|
||||
Expect(yaml.Unmarshal(bs, app)).Should(Succeed())
|
||||
app.SetNamespace(namespace)
|
||||
Expect(k8sClient.Create(hubCtx, app)).Should(Succeed())
|
||||
key := client.ObjectKeyFromObject(app)
|
||||
Eventually(func(g Gomega) {
|
||||
g.Expect(k8sClient.Get(hubCtx, key, app)).Should(Succeed())
|
||||
g.Expect(app.Status.Phase).Should(Equal(common.ApplicationRunning))
|
||||
}).WithTimeout(30 * time.Second).WithPolling(2 * time.Second).Should(Succeed())
|
||||
|
||||
By("disconnect cluster")
|
||||
Expect(k8sClient.Get(hubCtx, types.NamespacedName{Namespace: kubevelatypes.DefaultKubeVelaNS, Name: secretName}, secret)).Should(Succeed())
|
||||
secret.Data["tls.crt"] = []byte("-")
|
||||
Expect(k8sClient.Update(hubCtx, secret)).Should(Succeed())
|
||||
|
||||
By("update application")
|
||||
Expect(k8sClient.Get(hubCtx, key, app)).Should(Succeed())
|
||||
app.Spec.Policies = nil
|
||||
Expect(k8sClient.Update(hubCtx, app)).Should(Succeed())
|
||||
Eventually(func(g Gomega) {
|
||||
g.Expect(k8sClient.Get(hubCtx, key, app)).Should(Succeed())
|
||||
g.Expect(app.Status.ObservedGeneration).Should(Equal(app.Generation))
|
||||
g.Expect(app.Status.Phase).Should(Equal(common.ApplicationRunning))
|
||||
rts := &v1beta1.ResourceTrackerList{}
|
||||
g.Expect(k8sClient.List(hubCtx, rts, client.MatchingLabels{oam.LabelAppName: key.Name, oam.LabelAppNamespace: key.Namespace})).Should(Succeed())
|
||||
cnt := 0
|
||||
for _, item := range rts.Items {
|
||||
if item.Spec.Type == v1beta1.ResourceTrackerTypeVersioned {
|
||||
cnt++
|
||||
}
|
||||
}
|
||||
g.Expect(cnt).Should(Equal(2))
|
||||
}).WithTimeout(30 * time.Second).WithPolling(2 * time.Second).Should(Succeed())
|
||||
|
||||
By("try update application again")
|
||||
Expect(k8sClient.Get(hubCtx, key, app)).Should(Succeed())
|
||||
if app.Annotations == nil {
|
||||
app.Annotations = map[string]string{}
|
||||
}
|
||||
app.Annotations[oam.AnnotationPublishVersion] = "test"
|
||||
Expect(k8sClient.Update(hubCtx, app)).Should(Succeed())
|
||||
Eventually(func(g Gomega) {
|
||||
g.Expect(k8sClient.Get(hubCtx, key, app)).Should(Succeed())
|
||||
g.Expect(app.Status.LatestRevision).ShouldNot(BeNil())
|
||||
g.Expect(app.Status.LatestRevision.Revision).Should(Equal(int64(3)))
|
||||
g.Expect(app.Status.ObservedGeneration).Should(Equal(app.Generation))
|
||||
g.Expect(app.Status.Phase).Should(Equal(common.ApplicationRunning))
|
||||
}).WithTimeout(1 * time.Minute).WithPolling(2 * time.Second).Should(Succeed())
|
||||
|
||||
By("clear disconnection cluster secret")
|
||||
Expect(k8sClient.Get(hubCtx, types.NamespacedName{Namespace: kubevelatypes.DefaultKubeVelaNS, Name: secretName}, secret)).Should(Succeed())
|
||||
Expect(k8sClient.Delete(hubCtx, secret)).Should(Succeed())
|
||||
|
||||
By("update application again")
|
||||
Eventually(func(g Gomega) {
|
||||
g.Expect(k8sClient.Get(hubCtx, key, app)).Should(Succeed())
|
||||
app.Annotations[oam.AnnotationPublishVersion] = "test2"
|
||||
g.Expect(k8sClient.Update(hubCtx, app)).Should(Succeed())
|
||||
}).WithTimeout(10 * time.Second).WithPolling(2 * time.Second).Should(Succeed())
|
||||
|
||||
By("wait gc application completed")
|
||||
Eventually(func(g Gomega) {
|
||||
rts := &v1beta1.ResourceTrackerList{}
|
||||
g.Expect(k8sClient.List(hubCtx, rts, client.MatchingLabels{oam.LabelAppName: key.Name, oam.LabelAppNamespace: key.Namespace})).Should(Succeed())
|
||||
cnt := 0
|
||||
for _, item := range rts.Items {
|
||||
if item.Spec.Type == v1beta1.ResourceTrackerTypeVersioned {
|
||||
cnt++
|
||||
}
|
||||
}
|
||||
g.Expect(cnt).Should(Equal(1))
|
||||
}).WithTimeout(3 * time.Minute).WithPolling(10 * time.Second).Should(Succeed())
|
||||
})
|
||||
|
||||
})
|
||||
})
|
||||
|
||||
17
test/e2e-multicluster-test/testdata/app/app-disconnection-test.yaml
vendored
Normal file
17
test/e2e-multicluster-test/testdata/app/app-disconnection-test.yaml
vendored
Normal file
@@ -0,0 +1,17 @@
|
||||
apiVersion: core.oam.dev/v1beta1
|
||||
kind: Application
|
||||
metadata:
|
||||
name: app-disconnection-test
|
||||
spec:
|
||||
components:
|
||||
- type: k8s-objects
|
||||
name: app-dis-cm
|
||||
properties:
|
||||
objects:
|
||||
- apiVersion: v1
|
||||
kind: ConfigMap
|
||||
policies:
|
||||
- type: topology
|
||||
name: disconnection-test
|
||||
properties:
|
||||
clusters: ["disconnection-test"]
|
||||
Reference in New Issue
Block a user