mirror of
https://github.com/kubevela/kubevela.git
synced 2026-02-14 18:10:21 +00:00
Chore: delete useless test (#2984)
* delete useless test Signed-off-by: wangyike <wangyike_wyk@163.com> * delete healthscope related test Signed-off-by: wangyike <wangyike_wyk@163.com> * small fix Signed-off-by: wangyike <wangyike_wyk@163.com> * remove useless report Signed-off-by: wangyike <wangyike_wyk@163.com>
This commit is contained in:
4
.github/workflows/e2e-rollout-test.yml
vendored
4
.github/workflows/e2e-rollout-test.yml
vendored
@@ -90,12 +90,12 @@ jobs:
|
||||
uses: codecov/codecov-action@v1
|
||||
with:
|
||||
token: ${{ secrets.CODECOV_TOKEN }}
|
||||
files: /tmp/e2e-profile.out,/tmp/oam-e2e-profile.out
|
||||
files: /tmp/e2e-profile.out
|
||||
flags: e2e-rollout-tests
|
||||
name: codecov-umbrella
|
||||
|
||||
- name: Clean e2e profile
|
||||
run: rm /tmp/e2e-profile.out /tmp/oam-e2e-profile.out
|
||||
run: rm /tmp/e2e-profile.out
|
||||
|
||||
- name: Cleanup image
|
||||
if: ${{ always() }}
|
||||
|
||||
4
.github/workflows/e2e-test.yml
vendored
4
.github/workflows/e2e-test.yml
vendored
@@ -96,12 +96,12 @@ jobs:
|
||||
uses: codecov/codecov-action@v1
|
||||
with:
|
||||
token: ${{ secrets.CODECOV_TOKEN }}
|
||||
files: /tmp/e2e-profile.out,/tmp/oam-e2e-profile.out
|
||||
files: /tmp/e2e-profile.out
|
||||
flags: e2etests
|
||||
name: codecov-umbrella
|
||||
|
||||
- name: Clean e2e profile
|
||||
run: rm /tmp/e2e-profile.out /tmp/oam-e2e-profile.out
|
||||
run: rm /tmp/e2e-profile.out
|
||||
|
||||
- name: Cleanup image
|
||||
if: ${{ always() }}
|
||||
|
||||
46
.github/workflows/go.yml
vendored
46
.github/workflows/go.yml
vendored
@@ -33,52 +33,6 @@ jobs:
|
||||
do_not_skip: '["workflow_dispatch", "schedule", "push"]'
|
||||
concurrent_skipping: false
|
||||
|
||||
compatibility-test:
|
||||
runs-on: ubuntu-20.04
|
||||
needs: detect-noop
|
||||
if: needs.detect-noop.outputs.noop != 'true'
|
||||
|
||||
steps:
|
||||
- name: Set up Go
|
||||
uses: actions/setup-go@v1
|
||||
with:
|
||||
go-version: ${{ env.GO_VERSION }}
|
||||
id: go
|
||||
|
||||
- name: Check out code into the Go module directory
|
||||
uses: actions/checkout@v2
|
||||
with:
|
||||
submodules: true
|
||||
|
||||
- name: Cache Go Dependencies
|
||||
uses: actions/cache@v2
|
||||
with:
|
||||
path: .work/pkg
|
||||
key: ${{ runner.os }}-pkg-${{ hashFiles('**/go.sum') }}
|
||||
restore-keys: ${{ runner.os }}-pkg-
|
||||
|
||||
- name: Install ginkgo
|
||||
run: |
|
||||
sudo apt-get install -y golang-ginkgo-dev
|
||||
|
||||
- name: Setup Kind Cluster
|
||||
uses: engineerd/setup-kind@v0.5.0
|
||||
with:
|
||||
version: ${{ env.KIND_VERSION }}
|
||||
|
||||
- name: install Kubebuilder
|
||||
uses: RyanSiu1995/kubebuilder-action@v1.2
|
||||
with:
|
||||
version: 3.1.0
|
||||
kubebuilderOnly: false
|
||||
kubernetesVersion: v1.21.2
|
||||
|
||||
- name: Run Make compatibility-test
|
||||
run: make compatibility-test
|
||||
|
||||
- name: Clean up testdata
|
||||
run: make compatibility-testdata-cleanup
|
||||
|
||||
staticcheck:
|
||||
runs-on: ubuntu-20.04
|
||||
needs: detect-noop
|
||||
|
||||
13
Makefile
13
Makefile
@@ -165,7 +165,6 @@ e2e-setup:
|
||||
helm install kruise https://github.com/openkruise/kruise/releases/download/v0.9.0/kruise-chart.tgz --set featureGates="PreDownloadImageForInPlaceUpdate=true"
|
||||
sh ./hack/e2e/modify_charts.sh
|
||||
helm upgrade --install --create-namespace --namespace vela-system --set image.pullPolicy=IfNotPresent --set image.repository=vela-core-test --set applicationRevisionLimit=5 --set dependCheckWait=10s --set image.tag=$(GIT_COMMIT) --wait kubevela ./charts/vela-core
|
||||
helm upgrade --install --create-namespace --namespace oam-runtime-system --set image.pullPolicy=IfNotPresent --set image.repository=vela-core-test --set dependCheckWait=10s --set image.tag=$(GIT_COMMIT) --wait oam-runtime ./charts/oam-runtime
|
||||
go run ./e2e/addon/mock &
|
||||
bin/vela addon enable fluxcd
|
||||
bin/vela addon enable terraform
|
||||
@@ -208,18 +207,6 @@ e2e-multicluster-test:
|
||||
go test -v -coverpkg=./... -coverprofile=/tmp/e2e_multicluster_test.out ./test/e2e-multicluster-test
|
||||
@$(OK) tests pass
|
||||
|
||||
compatibility-test: vet lint staticcheck generate-compatibility-testdata
|
||||
# Run compatibility test with old crd
|
||||
COMPATIBILITY_TEST=TRUE go test -race $(shell go list ./pkg/... | grep -v apiserver)
|
||||
@$(OK) compatibility-test pass
|
||||
|
||||
generate-compatibility-testdata:
|
||||
mkdir -p ./test/compatibility-test/testdata
|
||||
go run ./test/compatibility-test/convert/main.go ./charts/vela-core/crds ./test/compatibility-test/testdata
|
||||
|
||||
compatibility-testdata-cleanup:
|
||||
rm -f ./test/compatibility-test/testdata/*
|
||||
|
||||
e2e-cleanup:
|
||||
# Clean up
|
||||
rm -rf ~/.vela
|
||||
|
||||
@@ -1,65 +0,0 @@
|
||||
/*
|
||||
Copyright 2021 The KubeVela Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package main // #nosec
|
||||
|
||||
// generate compatibility testdata
|
||||
import (
|
||||
"fmt"
|
||||
"log"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
)
|
||||
|
||||
func main() {
|
||||
var srcdir, dstdir string
|
||||
if len(os.Args) > 1 {
|
||||
srcdir = os.Args[1]
|
||||
dstdir = os.Args[2]
|
||||
}
|
||||
err := filepath.Walk(srcdir, func(path string, info os.FileInfo, err error) error {
|
||||
if err != nil {
|
||||
fmt.Println(err)
|
||||
}
|
||||
if info.IsDir() {
|
||||
return nil
|
||||
}
|
||||
/* #nosec */
|
||||
data, err := os.ReadFile(path)
|
||||
if err != nil {
|
||||
fmt.Fprintln(os.Stderr, "failed to read file", err)
|
||||
return err
|
||||
}
|
||||
fileName := info.Name()
|
||||
var newdata string
|
||||
if fileName == "core.oam.dev_workloaddefinitions.yaml" || fileName == "core.oam.dev_traitdefinitions.yaml" || fileName == "core.oam.dev_scopedefinitions.yaml" {
|
||||
newdata = strings.ReplaceAll(string(data), "scope: Namespaced", "scope: Cluster")
|
||||
} else {
|
||||
newdata = string(data)
|
||||
}
|
||||
dstpath := dstdir + "/" + fileName
|
||||
/* #nosec */
|
||||
if err = os.WriteFile(dstpath, []byte(newdata), 0644); err != nil {
|
||||
fmt.Fprintln(os.Stderr, "failed to write file:", err)
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
}
|
||||
@@ -1,558 +0,0 @@
|
||||
/*
|
||||
Copyright 2021 The KubeVela Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package controllers_test
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
. "github.com/onsi/ginkgo"
|
||||
. "github.com/onsi/gomega"
|
||||
kruise "github.com/openkruise/kruise-api/apps/v1alpha1"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
ctypes "k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/apimachinery/pkg/util/intstr"
|
||||
"k8s.io/utils/pointer"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
|
||||
apicommon "github.com/oam-dev/kubevela/apis/core.oam.dev/common"
|
||||
"github.com/oam-dev/kubevela/apis/core.oam.dev/v1beta1"
|
||||
"github.com/oam-dev/kubevela/apis/standard.oam.dev/v1alpha1"
|
||||
"github.com/oam-dev/kubevela/pkg/controller/utils"
|
||||
"github.com/oam-dev/kubevela/pkg/oam/util"
|
||||
"github.com/oam-dev/kubevela/pkg/utils/common"
|
||||
)
|
||||
|
||||
var _ = PDescribe("rollout related e2e-test,Cloneset based app embed rollout tests", func() {
|
||||
ctx := context.Background()
|
||||
var namespaceName string
|
||||
var ns corev1.Namespace
|
||||
var kc kruise.CloneSet
|
||||
var app v1beta1.Application
|
||||
var appName string
|
||||
initialProperty := `{"cmd":["./podinfo","stress-cpu=1"],"image":"stefanprodan/podinfo:4.0.3","port":8080,"replicas":6}`
|
||||
|
||||
createNamespace := func() {
|
||||
ns = corev1.Namespace{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: namespaceName,
|
||||
},
|
||||
}
|
||||
// delete the namespaceName with all its resources
|
||||
Eventually(
|
||||
func() error {
|
||||
return k8sClient.Delete(ctx, &ns, client.PropagationPolicy(metav1.DeletePropagationForeground))
|
||||
},
|
||||
time.Second*120, time.Millisecond*500).Should(SatisfyAny(BeNil(), &util.NotFoundMatcher{}))
|
||||
By("make sure all the resources are removed")
|
||||
objectKey := client.ObjectKey{
|
||||
Name: namespaceName,
|
||||
}
|
||||
res := &corev1.Namespace{}
|
||||
Eventually(
|
||||
func() error {
|
||||
return k8sClient.Get(ctx, objectKey, res)
|
||||
},
|
||||
time.Second*120, time.Millisecond*500).Should(&util.NotFoundMatcher{})
|
||||
Eventually(
|
||||
func() error {
|
||||
return k8sClient.Create(ctx, &ns)
|
||||
},
|
||||
time.Second*3, time.Millisecond*300).Should(SatisfyAny(BeNil(), &util.AlreadyExistMatcher{}))
|
||||
}
|
||||
|
||||
CreateClonesetDef := func() {
|
||||
By("Install CloneSet based componentDefinition")
|
||||
var cd v1beta1.ComponentDefinition
|
||||
Expect(common.ReadYamlToObject("testdata/rollout/cloneset/clonesetDefinition.yaml", &cd)).Should(BeNil())
|
||||
// create the componentDefinition if not exist
|
||||
Eventually(
|
||||
func() error {
|
||||
return k8sClient.Create(ctx, &cd)
|
||||
},
|
||||
time.Second*3, time.Millisecond*300).Should(SatisfyAny(BeNil(), &util.AlreadyExistMatcher{}))
|
||||
}
|
||||
|
||||
CreateIngressDef := func() {
|
||||
By("Install Ingress trait definition")
|
||||
var td v1beta1.TraitDefinition
|
||||
Expect(common.ReadYamlToObject("testdata/rollout/cloneset/ingressDefinition.yaml", &td)).Should(BeNil())
|
||||
// create the traitDefinition if not exist
|
||||
Eventually(
|
||||
func() error {
|
||||
return k8sClient.Create(ctx, &td)
|
||||
},
|
||||
time.Second*3, time.Millisecond*300).Should(SatisfyAny(BeNil(), &util.AlreadyExistMatcher{}))
|
||||
}
|
||||
|
||||
generateNewApp := func(appName, namespace, compType string, plan *v1alpha1.RolloutPlan) *v1beta1.Application {
|
||||
return &v1beta1.Application{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
Kind: "Application",
|
||||
APIVersion: "core.oam.dev/v1beta1",
|
||||
},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: appName,
|
||||
Namespace: namespace,
|
||||
},
|
||||
Spec: v1beta1.ApplicationSpec{
|
||||
Components: []apicommon.ApplicationComponent{
|
||||
{
|
||||
Name: appName,
|
||||
Type: compType,
|
||||
Properties: &runtime.RawExtension{
|
||||
Raw: []byte(initialProperty),
|
||||
},
|
||||
},
|
||||
},
|
||||
RolloutPlan: plan,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
BeforeEach(func() {
|
||||
By("Start to run a test, clean up previous resources")
|
||||
namespaceName = randomNamespaceName("app-rollout-e2e-test")
|
||||
createNamespace()
|
||||
CreateClonesetDef()
|
||||
CreateIngressDef()
|
||||
})
|
||||
|
||||
AfterEach(func() {
|
||||
By("Clean up resources after a test")
|
||||
k8sClient.DeleteAllOf(ctx, &v1beta1.Application{}, client.InNamespace(namespaceName))
|
||||
k8sClient.DeleteAllOf(ctx, &v1beta1.ComponentDefinition{}, client.InNamespace(namespaceName))
|
||||
k8sClient.DeleteAllOf(ctx, &v1beta1.WorkloadDefinition{}, client.InNamespace(namespaceName))
|
||||
k8sClient.DeleteAllOf(ctx, &v1beta1.TraitDefinition{}, client.InNamespace(namespaceName))
|
||||
|
||||
By(fmt.Sprintf("Delete the entire namespaceName %s", ns.Name))
|
||||
// delete the namespaceName with all its resources
|
||||
Expect(k8sClient.Delete(ctx, &ns, client.PropagationPolicy(metav1.DeletePropagationBackground))).Should(BeNil())
|
||||
})
|
||||
|
||||
verifyRolloutSucceeded := func(targetAppRevisionName string, cpu string) {
|
||||
By("verify application status")
|
||||
Eventually(
|
||||
func() error {
|
||||
app = v1beta1.Application{}
|
||||
if err := k8sClient.Get(ctx, client.ObjectKey{Namespace: namespaceName, Name: appName}, &app); err != nil {
|
||||
return err
|
||||
}
|
||||
if app.Status.Rollout == nil {
|
||||
return fmt.Errorf("application is under creating, app status rollout is nil, %v", app.Status)
|
||||
}
|
||||
if app.Status.Rollout.LastUpgradedTargetAppRevision != app.Status.LatestRevision.Name {
|
||||
return fmt.Errorf("rollout controller haven't handle this change, targetRevision isn't right")
|
||||
}
|
||||
if app.Status.Rollout.RollingState != v1alpha1.RolloutSucceedState {
|
||||
return fmt.Errorf("app status rollingStatus not succeed acctually %s", app.Status.Rollout.RollingState)
|
||||
}
|
||||
if app.Status.Phase != apicommon.ApplicationRunning {
|
||||
return fmt.Errorf("app status not running acctually %s", app.Status.Phase)
|
||||
}
|
||||
return nil
|
||||
},
|
||||
time.Second*120, time.Second).Should(BeNil())
|
||||
Expect(app.Status.Rollout.UpgradedReadyReplicas).Should(BeEquivalentTo(app.Status.Rollout.RolloutTargetSize))
|
||||
Expect(app.Status.Rollout.UpgradedReplicas).Should(BeEquivalentTo(app.Status.Rollout.RolloutTargetSize))
|
||||
clonesetName := app.Spec.Components[0].Name
|
||||
|
||||
By("Verify cloneset status")
|
||||
var clonesetOwner *metav1.OwnerReference
|
||||
Eventually(
|
||||
func() error {
|
||||
if err := k8sClient.Get(ctx, client.ObjectKey{Namespace: namespaceName, Name: clonesetName}, &kc); err != nil {
|
||||
return err
|
||||
}
|
||||
clonesetOwner = metav1.GetControllerOf(&kc)
|
||||
if clonesetOwner == nil {
|
||||
return fmt.Errorf("cloneset don't have any controller owner")
|
||||
}
|
||||
if clonesetOwner.Kind != v1beta1.ResourceTrackerKind {
|
||||
return fmt.Errorf("cloneset owner mismatch wants %s actually %s", v1beta1.ResourceTrackerKind, clonesetOwner.Kind)
|
||||
}
|
||||
if kc.Status.UpdatedReplicas != *kc.Spec.Replicas {
|
||||
return fmt.Errorf("upgraded pod number error")
|
||||
}
|
||||
resourceTrackerName := fmt.Sprintf("%s-%s", targetAppRevisionName, app.Namespace)
|
||||
if clonesetOwner.Name != resourceTrackerName {
|
||||
return fmt.Errorf("resourceTracker haven't take back controller owner")
|
||||
}
|
||||
return nil
|
||||
},
|
||||
time.Second*30, time.Millisecond*500).Should(BeNil())
|
||||
By("Verify pod status")
|
||||
Eventually(func() error {
|
||||
podList := corev1.PodList{}
|
||||
if err := k8sClient.List(ctx, &podList, client.MatchingLabels(kc.Spec.Template.Labels), client.InNamespace(namespaceName)); err != nil {
|
||||
return err
|
||||
}
|
||||
if len(podList.Items) != int(*kc.Spec.Replicas) {
|
||||
return fmt.Errorf("pod number error")
|
||||
}
|
||||
for _, pod := range podList.Items {
|
||||
if pod.Status.Phase != corev1.PodRunning {
|
||||
return fmt.Errorf("pod status error %s", pod.Status.Phase)
|
||||
}
|
||||
if pod.Spec.Containers[0].Command[1] != fmt.Sprintf("stress-cpu=%s", cpu) {
|
||||
return fmt.Errorf("pod cmmond haven't updated")
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}, time.Second*120, time.Microsecond).Should(BeNil())
|
||||
}
|
||||
|
||||
updateAppWithCpuAndPlan := func(app *v1beta1.Application, cpu string, plan *v1alpha1.RolloutPlan) {
|
||||
Eventually(func() error {
|
||||
checkApp := new(v1beta1.Application)
|
||||
if err := k8sClient.Get(ctx, ctypes.NamespacedName{Namespace: namespaceName, Name: app.Name}, checkApp); err != nil {
|
||||
return err
|
||||
}
|
||||
updateProperty := fmt.Sprintf(`{"cmd":["./podinfo","stress-cpu=%s"],"image":"stefanprodan/podinfo:4.0.3","port":8080,"replicas":6}`, cpu)
|
||||
checkApp.Spec.Components[0].Properties.Raw = []byte(updateProperty)
|
||||
checkApp.Spec.RolloutPlan = plan
|
||||
if err := k8sClient.Update(ctx, checkApp); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}, time.Second*30, time.Microsecond*300).Should(BeNil())
|
||||
}
|
||||
|
||||
It("Test upgrade application", func() {
|
||||
plan := &v1alpha1.RolloutPlan{
|
||||
RolloutStrategy: v1alpha1.IncreaseFirstRolloutStrategyType,
|
||||
RolloutBatches: []v1alpha1.RolloutBatch{
|
||||
{
|
||||
Replicas: intstr.FromString("50%"),
|
||||
},
|
||||
{
|
||||
Replicas: intstr.FromString("50%"),
|
||||
},
|
||||
},
|
||||
TargetSize: pointer.Int32Ptr(6),
|
||||
}
|
||||
appName = "app-rollout-1"
|
||||
app := generateNewApp(appName, namespaceName, "clonesetservice", plan)
|
||||
Expect(k8sClient.Create(ctx, app)).Should(BeNil())
|
||||
verifyRolloutSucceeded(utils.ConstructRevisionName(appName, 1), "1")
|
||||
updateAppWithCpuAndPlan(app, "2", plan)
|
||||
verifyRolloutSucceeded(utils.ConstructRevisionName(appName, 2), "2")
|
||||
updateAppWithCpuAndPlan(app, "3", plan)
|
||||
verifyRolloutSucceeded(utils.ConstructRevisionName(appName, 3), "3")
|
||||
})
|
||||
|
||||
It("Test application only upgrade batchPartition", func() {
|
||||
plan := &v1alpha1.RolloutPlan{
|
||||
RolloutStrategy: v1alpha1.IncreaseFirstRolloutStrategyType,
|
||||
RolloutBatches: []v1alpha1.RolloutBatch{
|
||||
{
|
||||
Replicas: intstr.FromString("50%"),
|
||||
},
|
||||
{
|
||||
Replicas: intstr.FromString("50%"),
|
||||
},
|
||||
},
|
||||
TargetSize: pointer.Int32Ptr(6),
|
||||
}
|
||||
appName = "app-roll-out-2"
|
||||
app := generateNewApp(appName, namespaceName, "clonesetservice", plan)
|
||||
Expect(k8sClient.Create(ctx, app)).Should(BeNil())
|
||||
verifyRolloutSucceeded(utils.ConstructRevisionName(appName, 1), "1")
|
||||
app.Spec.RolloutPlan.BatchPartition = pointer.Int32Ptr(0)
|
||||
plan = &v1alpha1.RolloutPlan{
|
||||
RolloutStrategy: v1alpha1.IncreaseFirstRolloutStrategyType,
|
||||
RolloutBatches: []v1alpha1.RolloutBatch{
|
||||
{
|
||||
Replicas: intstr.FromString("50%"),
|
||||
},
|
||||
{
|
||||
Replicas: intstr.FromString("50%"),
|
||||
},
|
||||
},
|
||||
TargetSize: pointer.Int32Ptr(6),
|
||||
BatchPartition: pointer.Int32Ptr(0),
|
||||
}
|
||||
updateAppWithCpuAndPlan(app, "2", plan)
|
||||
|
||||
By("upgrade first batch partition, verify the middle state")
|
||||
// give controller some time to upgrade one batch
|
||||
time.Sleep(15 * time.Second)
|
||||
Eventually(func() error {
|
||||
checkApp := new(v1beta1.Application)
|
||||
if err := k8sClient.Get(ctx, ctypes.NamespacedName{Name: appName, Namespace: namespaceName}, checkApp); err != nil {
|
||||
return err
|
||||
}
|
||||
if checkApp.Status.Rollout.LastUpgradedTargetAppRevision != utils.ConstructRevisionName(appName, 2) {
|
||||
return fmt.Errorf("app status lastTargetRevision mismatch")
|
||||
}
|
||||
if checkApp.Status.Rollout.LastSourceAppRevision != utils.ConstructRevisionName(appName, 1) {
|
||||
return fmt.Errorf("app status lastSourceRevision mismatch")
|
||||
}
|
||||
if checkApp.Status.Rollout.RollingState != v1alpha1.RollingInBatchesState {
|
||||
return fmt.Errorf("app status rolling state mismatch")
|
||||
}
|
||||
if checkApp.Status.Rollout.UpgradedReplicas != 3 || checkApp.Status.Rollout.UpgradedReadyReplicas != 3 {
|
||||
return fmt.Errorf("app status upgraded status error")
|
||||
}
|
||||
if checkApp.Status.Phase != apicommon.ApplicationRollingOut {
|
||||
return fmt.Errorf("app status phase error")
|
||||
}
|
||||
return nil
|
||||
}, time.Second*120, time.Microsecond*300).Should(BeNil())
|
||||
clonesetName := app.Spec.Components[0].Name
|
||||
Eventually(
|
||||
func() error {
|
||||
if err := k8sClient.Get(ctx, client.ObjectKey{Namespace: namespaceName, Name: clonesetName}, &kc); err != nil {
|
||||
return err
|
||||
}
|
||||
if kc.Status.UpdatedReplicas != 3 {
|
||||
return fmt.Errorf("upgraded pod number error")
|
||||
}
|
||||
return nil
|
||||
},
|
||||
time.Second*120, time.Millisecond*500).Should(BeNil())
|
||||
By("Verify rollout first batch pod status")
|
||||
Eventually(func() error {
|
||||
podList := corev1.PodList{}
|
||||
if err := k8sClient.List(ctx, &podList, client.MatchingLabels(kc.Spec.Template.Labels), client.InNamespace(namespaceName)); err != nil {
|
||||
return err
|
||||
}
|
||||
if len(podList.Items) != int(*kc.Spec.Replicas) {
|
||||
return fmt.Errorf("pod number error %d", len(podList.Items))
|
||||
}
|
||||
middlePodRes := map[string]int{}
|
||||
for _, pod := range podList.Items {
|
||||
if pod.Spec.Containers[0].Command[1] == fmt.Sprintf("stress-cpu=%d", 1) {
|
||||
middlePodRes[utils.ConstructRevisionName(appName, 1)]++
|
||||
}
|
||||
if pod.Spec.Containers[0].Command[1] == fmt.Sprintf("stress-cpu=%d", 1) {
|
||||
middlePodRes[utils.ConstructRevisionName(appName, 2)]++
|
||||
}
|
||||
Expect(pod.Status.Phase).Should(Equal(corev1.PodRunning))
|
||||
}
|
||||
if middlePodRes[utils.ConstructRevisionName(appName, 1)] != 3 {
|
||||
return fmt.Errorf("revison-1 pod number error ")
|
||||
}
|
||||
if middlePodRes[utils.ConstructRevisionName(appName, 2)] != 3 {
|
||||
return fmt.Errorf("revison-2 pod number error")
|
||||
}
|
||||
return nil
|
||||
}, time.Second*30, time.Microsecond*300).Should(BeNil())
|
||||
|
||||
By("continue rollout next partition and verify status")
|
||||
checkApp := new(v1beta1.Application)
|
||||
Expect(k8sClient.Get(ctx, ctypes.NamespacedName{Namespace: namespaceName, Name: appName}, checkApp)).Should(BeNil())
|
||||
plan = checkApp.Spec.RolloutPlan
|
||||
plan.BatchPartition = pointer.Int32Ptr(1)
|
||||
updateAppWithCpuAndPlan(app, "2", plan)
|
||||
verifyRolloutSucceeded(utils.ConstructRevisionName(appName, 2), "2")
|
||||
By("update again continue rollout to revision-3")
|
||||
updateAppWithCpuAndPlan(app, "3", plan)
|
||||
verifyRolloutSucceeded(utils.ConstructRevisionName(appName, 3), "3")
|
||||
})
|
||||
|
||||
It("Test upgrade application in middle of rolling out", func() {
|
||||
plan := &v1alpha1.RolloutPlan{
|
||||
RolloutStrategy: v1alpha1.IncreaseFirstRolloutStrategyType,
|
||||
RolloutBatches: []v1alpha1.RolloutBatch{
|
||||
{
|
||||
Replicas: intstr.FromString("50%"),
|
||||
},
|
||||
{
|
||||
Replicas: intstr.FromString("50%"),
|
||||
},
|
||||
},
|
||||
TargetSize: pointer.Int32Ptr(6),
|
||||
}
|
||||
appName = "app-rollout-3"
|
||||
app := generateNewApp(appName, namespaceName, "clonesetservice", plan)
|
||||
Expect(k8sClient.Create(ctx, app)).Should(BeNil())
|
||||
verifyRolloutSucceeded(utils.ConstructRevisionName(appName, 1), "1")
|
||||
updateAppWithCpuAndPlan(app, "2", plan)
|
||||
|
||||
By("Wait for the rollout phase change to rolling in batches")
|
||||
Eventually(func() error {
|
||||
checkApp := new(v1beta1.Application)
|
||||
if err := k8sClient.Get(ctx, ctypes.NamespacedName{Name: appName, Namespace: namespaceName}, checkApp); err != nil {
|
||||
return err
|
||||
}
|
||||
if checkApp.Status.Rollout.LastUpgradedTargetAppRevision != utils.ConstructRevisionName(appName, 2) {
|
||||
return fmt.Errorf("app status lastTargetRevision mismatch actually %s ", checkApp.Status.Rollout.LastUpgradedTargetAppRevision)
|
||||
}
|
||||
if checkApp.Status.Rollout.LastSourceAppRevision != utils.ConstructRevisionName(appName, 1) {
|
||||
return fmt.Errorf("app status lastSourceRevision mismatch actually %s ", checkApp.Status.Rollout.LastSourceAppRevision)
|
||||
}
|
||||
if checkApp.Status.Rollout.RollingState != v1alpha1.RollingInBatchesState {
|
||||
return fmt.Errorf("app status rolling state mismatch")
|
||||
}
|
||||
return nil
|
||||
}, time.Second*60, time.Microsecond*300).Should(BeNil())
|
||||
|
||||
By("update app in middle of rollout and verify status")
|
||||
updateAppWithCpuAndPlan(app, "3", plan)
|
||||
verifyRolloutSucceeded(utils.ConstructRevisionName(appName, 3), "3")
|
||||
})
|
||||
|
||||
It("Test pause in middle of embed app rolling out", func() {
|
||||
plan := &v1alpha1.RolloutPlan{
|
||||
RolloutStrategy: v1alpha1.IncreaseFirstRolloutStrategyType,
|
||||
RolloutBatches: []v1alpha1.RolloutBatch{
|
||||
{
|
||||
Replicas: intstr.FromString("50%"),
|
||||
},
|
||||
{
|
||||
Replicas: intstr.FromString("50%"),
|
||||
},
|
||||
},
|
||||
TargetSize: pointer.Int32Ptr(6),
|
||||
}
|
||||
appName = "app-rollout-4"
|
||||
app := generateNewApp(appName, namespaceName, "clonesetservice", plan)
|
||||
Expect(k8sClient.Create(ctx, app)).Should(BeNil())
|
||||
verifyRolloutSucceeded(utils.ConstructRevisionName(appName, 1), "1")
|
||||
updateAppWithCpuAndPlan(app, "2", plan)
|
||||
|
||||
By("Wait for rollout phase change to rolling in batches")
|
||||
checkApp := new(v1beta1.Application)
|
||||
Eventually(func() error {
|
||||
if err := k8sClient.Get(ctx, ctypes.NamespacedName{Name: appName, Namespace: namespaceName}, checkApp); err != nil {
|
||||
return err
|
||||
}
|
||||
if checkApp.Status.Rollout.LastUpgradedTargetAppRevision != utils.ConstructRevisionName(appName, 2) {
|
||||
return fmt.Errorf("app status lastTargetRevision mismatch actually %s ", checkApp.Status.Rollout.LastUpgradedTargetAppRevision)
|
||||
}
|
||||
if checkApp.Status.Rollout.LastSourceAppRevision != utils.ConstructRevisionName(appName, 1) {
|
||||
return fmt.Errorf("app status lastSourceRevision mismatch actually %s ", checkApp.Status.Rollout.LastSourceAppRevision)
|
||||
}
|
||||
if checkApp.Status.Rollout.RollingState != v1alpha1.RollingInBatchesState {
|
||||
return fmt.Errorf("app status rolling state mismatch")
|
||||
}
|
||||
return nil
|
||||
}, time.Second*60, time.Microsecond*300).Should(BeNil())
|
||||
|
||||
By("pause app in middle of rollout and verify status")
|
||||
plan.Paused = true
|
||||
updateAppWithCpuAndPlan(app, "2", plan)
|
||||
By("verify update rolloutPlan shouldn't create new revision")
|
||||
Expect(k8sClient.Get(ctx, ctypes.NamespacedName{Name: appName, Namespace: namespaceName}, checkApp)).Should(BeNil())
|
||||
Expect(checkApp.Status.LatestRevision.Name).Should(BeEquivalentTo(utils.ConstructRevisionName(appName, 2)))
|
||||
By("Verify that the app rollout pauses")
|
||||
Eventually(func() error {
|
||||
if err := k8sClient.Get(ctx, ctypes.NamespacedName{Name: appName, Namespace: namespaceName}, checkApp); err != nil {
|
||||
return err
|
||||
}
|
||||
if checkApp.Status.Rollout.GetCondition(v1alpha1.BatchPaused).Status != corev1.ConditionTrue {
|
||||
return fmt.Errorf("rollout status not paused")
|
||||
}
|
||||
return nil
|
||||
}, time.Second*30, time.Microsecond*300).Should(BeNil())
|
||||
preBatch := checkApp.Status.Rollout.CurrentBatch
|
||||
sleepTime := 10 * time.Second
|
||||
time.Sleep(sleepTime)
|
||||
Expect(k8sClient.Get(ctx, ctypes.NamespacedName{Name: appName, Namespace: namespaceName}, checkApp)).Should(BeNil())
|
||||
Expect(checkApp.Status.Rollout.RollingState).Should(BeEquivalentTo(v1alpha1.RollingInBatchesState))
|
||||
Expect(checkApp.Status.Rollout.CurrentBatch).Should(BeEquivalentTo(preBatch))
|
||||
transitTime := checkApp.Status.Rollout.GetCondition(v1alpha1.BatchPaused).LastTransitionTime
|
||||
beforeSleep := metav1.Time{
|
||||
Time: time.Now().Add(sleepTime),
|
||||
}
|
||||
Expect(transitTime.Before(&beforeSleep)).Should(BeTrue())
|
||||
By("continue rollout and verify status ")
|
||||
plan.Paused = false
|
||||
updateAppWithCpuAndPlan(app, "2", plan)
|
||||
By("verify update rolloutPlan shouldn't create new revision")
|
||||
Expect(k8sClient.Get(ctx, ctypes.NamespacedName{Name: appName, Namespace: namespaceName}, checkApp)).Should(BeNil())
|
||||
Expect(checkApp.Status.LatestRevision.Name).Should(BeEquivalentTo(utils.ConstructRevisionName(appName, 2)))
|
||||
verifyRolloutSucceeded(utils.ConstructRevisionName(appName, 2), "2")
|
||||
})
|
||||
|
||||
It("Test rollout with trait", func() {
|
||||
plan := &v1alpha1.RolloutPlan{
|
||||
RolloutStrategy: v1alpha1.IncreaseFirstRolloutStrategyType,
|
||||
RolloutBatches: []v1alpha1.RolloutBatch{
|
||||
{
|
||||
Replicas: intstr.FromString("50%"),
|
||||
},
|
||||
{
|
||||
Replicas: intstr.FromString("50%"),
|
||||
},
|
||||
},
|
||||
TargetSize: pointer.Int32Ptr(6),
|
||||
}
|
||||
appName = "app-rollout-5"
|
||||
app := generateNewApp(appName, namespaceName, "clonesetservice", plan)
|
||||
ingressProperties := `{"domain":"test-1.example.com","http":{"/":8080}}`
|
||||
app.Spec.Components[0].Traits = []apicommon.ApplicationTrait{{Type: "ingress", Properties: &runtime.RawExtension{Raw: []byte(ingressProperties)}}}
|
||||
Expect(k8sClient.Create(ctx, app)).Should(BeNil())
|
||||
verifyRolloutSucceeded(utils.ConstructRevisionName(appName, 1), "1")
|
||||
updateAppWithCpuAndPlan(app, "2", plan)
|
||||
By("rollout to v2")
|
||||
checkApp := new(v1beta1.Application)
|
||||
verifyRolloutSucceeded(utils.ConstructRevisionName(appName, 2), "2")
|
||||
Expect(k8sClient.Get(ctx, ctypes.NamespacedName{Name: appName, Namespace: namespaceName}, checkApp)).Should(BeNil())
|
||||
Expect(checkApp.Status.LatestRevision.Name).Should(BeEquivalentTo(utils.ConstructRevisionName(appName, 2)))
|
||||
updateAppWithCpuAndPlan(app, "3", plan)
|
||||
By("rollout to v3")
|
||||
verifyRolloutSucceeded(utils.ConstructRevisionName(appName, 3), "3")
|
||||
Expect(k8sClient.Get(ctx, ctypes.NamespacedName{Name: appName, Namespace: namespaceName}, checkApp)).Should(BeNil())
|
||||
Expect(checkApp.Status.LatestRevision.Name).Should(BeEquivalentTo(utils.ConstructRevisionName(appName, 3)))
|
||||
})
|
||||
|
||||
It("Test rollout with another component only rollout first component", func() {
|
||||
plan := &v1alpha1.RolloutPlan{
|
||||
RolloutStrategy: v1alpha1.IncreaseFirstRolloutStrategyType,
|
||||
RolloutBatches: []v1alpha1.RolloutBatch{
|
||||
{
|
||||
Replicas: intstr.FromString("50%"),
|
||||
},
|
||||
{
|
||||
Replicas: intstr.FromString("50%"),
|
||||
},
|
||||
},
|
||||
TargetSize: pointer.Int32Ptr(6),
|
||||
}
|
||||
appName = "app-rollout-6"
|
||||
app := generateNewApp(appName, namespaceName, "clonesetservice", plan)
|
||||
annotherComp := apicommon.ApplicationComponent{
|
||||
Name: "another-comp",
|
||||
Type: "clonesetservice",
|
||||
Properties: &runtime.RawExtension{
|
||||
Raw: []byte(initialProperty),
|
||||
},
|
||||
}
|
||||
app.Spec.Components = append(app.Spec.Components, annotherComp)
|
||||
Expect(k8sClient.Create(ctx, app)).Should(BeNil())
|
||||
verifyRolloutSucceeded(utils.ConstructRevisionName(appName, 1), "1")
|
||||
updateAppWithCpuAndPlan(app, "2", plan)
|
||||
|
||||
checkApp := new(v1beta1.Application)
|
||||
By("verify update rolloutPlan shouldn't create new revision")
|
||||
verifyRolloutSucceeded(utils.ConstructRevisionName(appName, 2), "2")
|
||||
Expect(k8sClient.Get(ctx, ctypes.NamespacedName{Name: appName, Namespace: namespaceName}, checkApp)).Should(BeNil())
|
||||
Expect(checkApp.Status.LatestRevision.Name).Should(BeEquivalentTo(utils.ConstructRevisionName(appName, 2)))
|
||||
updateAppWithCpuAndPlan(app, "3", plan)
|
||||
verifyRolloutSucceeded(utils.ConstructRevisionName(appName, 3), "3")
|
||||
Expect(k8sClient.Get(ctx, ctypes.NamespacedName{Name: appName, Namespace: namespaceName}, checkApp)).Should(BeNil())
|
||||
Expect(checkApp.Status.LatestRevision.Name).Should(BeEquivalentTo(utils.ConstructRevisionName(appName, 3)))
|
||||
})
|
||||
|
||||
// TODO add more corner case tests
|
||||
// update application by clean rolloutPlan strategy in the middle of rollout process
|
||||
})
|
||||
@@ -1,245 +0,0 @@
|
||||
/*
|
||||
Copyright 2021 The KubeVela Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package controllers_test
|
||||
|
||||
import (
|
||||
"context"
|
||||
"time"
|
||||
|
||||
. "github.com/onsi/ginkgo"
|
||||
. "github.com/onsi/gomega"
|
||||
|
||||
appsv1 "k8s.io/api/apps/v1"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
logf "sigs.k8s.io/controller-runtime/pkg/log"
|
||||
|
||||
"github.com/oam-dev/kubevela/apis/core.oam.dev/common"
|
||||
"github.com/oam-dev/kubevela/apis/core.oam.dev/v1alpha2"
|
||||
"github.com/oam-dev/kubevela/pkg/oam/util"
|
||||
)
|
||||
|
||||
var (
|
||||
workloadScopeFinalizer = "scope.finalizer.core.oam.dev"
|
||||
)
|
||||
|
||||
var _ = PDescribe("Finalizer for HealthScope in ApplicationConfiguration", func() {
|
||||
ctx := context.Background()
|
||||
namespace := "finalizer-test"
|
||||
ns := corev1.Namespace{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: namespace,
|
||||
},
|
||||
}
|
||||
var component v1alpha2.Component
|
||||
var appConfig v1alpha2.ApplicationConfiguration
|
||||
componentName := "example-component"
|
||||
appConfigName := "example-appconfig"
|
||||
healthScopeName := "example-health-scope"
|
||||
|
||||
BeforeEach(func() {
|
||||
logf.Log.Info("Start to run a test, clean up previous resources")
|
||||
ns = corev1.Namespace{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: namespace,
|
||||
},
|
||||
}
|
||||
component = v1alpha2.Component{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
APIVersion: "core.oam.dev/v1alpha2",
|
||||
Kind: "Component",
|
||||
},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: componentName,
|
||||
Namespace: namespace,
|
||||
},
|
||||
Spec: v1alpha2.ComponentSpec{
|
||||
Workload: runtime.RawExtension{
|
||||
Object: &appsv1.Deployment{
|
||||
Spec: appsv1.DeploymentSpec{
|
||||
Selector: &metav1.LabelSelector{
|
||||
MatchLabels: map[string]string{
|
||||
"app": "nginx",
|
||||
},
|
||||
},
|
||||
Template: corev1.PodTemplateSpec{
|
||||
Spec: corev1.PodSpec{
|
||||
Containers: []corev1.Container{
|
||||
{
|
||||
Image: "nginx:v3",
|
||||
Name: "nginx",
|
||||
},
|
||||
},
|
||||
},
|
||||
ObjectMeta: metav1.ObjectMeta{Labels: map[string]string{"app": "nginx"}},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
appConfig = v1alpha2.ApplicationConfiguration{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: appConfigName,
|
||||
Namespace: namespace,
|
||||
},
|
||||
}
|
||||
// delete the namespace with all its resources
|
||||
Expect(k8sClient.Delete(ctx, &ns, client.PropagationPolicy(metav1.DeletePropagationForeground))).
|
||||
Should(SatisfyAny(BeNil(), &util.NotFoundMatcher{}))
|
||||
logf.Log.Info("make sure all the resources are removed")
|
||||
objectKey := client.ObjectKey{
|
||||
Name: namespace,
|
||||
}
|
||||
res := &corev1.Namespace{}
|
||||
Eventually(
|
||||
// gomega has a bug that can't take nil as the actual input, so has to make it a func
|
||||
func() error {
|
||||
return k8sClient.Get(ctx, objectKey, res)
|
||||
},
|
||||
time.Second*120, time.Millisecond*500).Should(SatisfyAny(BeNil(), &util.NotFoundMatcher{}))
|
||||
Eventually(
|
||||
func() error {
|
||||
return k8sClient.Create(ctx, &ns)
|
||||
},
|
||||
time.Second*3, time.Millisecond*300).Should(SatisfyAny(BeNil(), &util.AlreadyExistMatcher{}))
|
||||
|
||||
// create Component definition
|
||||
By("Create Component definition")
|
||||
Expect(k8sClient.Create(ctx, &component)).Should(Succeed())
|
||||
|
||||
})
|
||||
AfterEach(func() {
|
||||
logf.Log.Info("Clean up resources")
|
||||
// delete the namespace with all its resources
|
||||
Expect(k8sClient.Delete(ctx, &ns, client.PropagationPolicy(metav1.DeletePropagationForeground))).Should(BeNil())
|
||||
})
|
||||
|
||||
When("AppConfig has no scopes", func() {
|
||||
It("should not register finalizer", func() {
|
||||
appConfig.Spec.Components = []v1alpha2.ApplicationConfigurationComponent{
|
||||
{
|
||||
ComponentName: componentName,
|
||||
},
|
||||
}
|
||||
By("Check component should already existed")
|
||||
Eventually(func() error {
|
||||
return k8sClient.Get(ctx, client.ObjectKey{Namespace: namespace, Name: componentName}, &v1alpha2.Component{})
|
||||
}, time.Second*30, time.Microsecond*500).Should(BeNil())
|
||||
|
||||
By("Apply AppConfig")
|
||||
Expect(k8sClient.Create(ctx, &appConfig)).Should(Succeed())
|
||||
|
||||
By("Check appConfig reconciliation finished")
|
||||
Eventually(func() bool {
|
||||
k8sClient.Get(ctx, client.ObjectKey{Namespace: namespace, Name: appConfigName}, &appConfig)
|
||||
return appConfig.ObjectMeta.Generation >= 1
|
||||
}, time.Second*30, time.Microsecond*500).Should(BeTrue())
|
||||
|
||||
By("Check no finalizer registered")
|
||||
Eventually(func() []string {
|
||||
k8sClient.Get(ctx, client.ObjectKey{Namespace: namespace, Name: appConfigName}, &appConfig)
|
||||
return appConfig.ObjectMeta.Finalizers
|
||||
}, time.Second*30, time.Microsecond*500).ShouldNot(ContainElement(workloadScopeFinalizer))
|
||||
|
||||
})
|
||||
})
|
||||
|
||||
When("AppConfig has scopes", func() {
|
||||
It("should handle finalizer before being deleted", func() {
|
||||
// create health scope definition
|
||||
sd := v1alpha2.ScopeDefinition{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "healthscopes.core.oam.dev",
|
||||
Namespace: "vela-system",
|
||||
},
|
||||
Spec: v1alpha2.ScopeDefinitionSpec{
|
||||
AllowComponentOverlap: true,
|
||||
WorkloadRefsPath: "spec.workloadRefs",
|
||||
Reference: common.DefinitionReference{
|
||||
Name: "healthscope.core.oam.dev",
|
||||
},
|
||||
},
|
||||
}
|
||||
By("Creat health scope definition")
|
||||
Expect(k8sClient.Create(ctx, &sd)).Should(SatisfyAny(BeNil(), &util.AlreadyExistMatcher{}))
|
||||
|
||||
// create health scope.
|
||||
hs := v1alpha2.HealthScope{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: healthScopeName,
|
||||
Namespace: namespace,
|
||||
},
|
||||
Spec: v1alpha2.HealthScopeSpec{
|
||||
WorkloadReferences: []corev1.ObjectReference{},
|
||||
},
|
||||
}
|
||||
By("Creat health scope")
|
||||
Expect(k8sClient.Create(ctx, &hs)).Should(SatisfyAny(BeNil(), &util.AlreadyExistMatcher{}))
|
||||
|
||||
appConfig.Spec.Components = []v1alpha2.ApplicationConfigurationComponent{
|
||||
{
|
||||
ComponentName: componentName,
|
||||
Scopes: []v1alpha2.ComponentScope{
|
||||
{
|
||||
ScopeReference: corev1.ObjectReference{
|
||||
APIVersion: "core.oam.dev/v1alpha2",
|
||||
Kind: "HealthScope",
|
||||
Name: healthScopeName,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
By("Apply AppConfig")
|
||||
Expect(k8sClient.Create(ctx, &appConfig)).Should(Succeed())
|
||||
|
||||
By("Check register finalizer")
|
||||
Eventually(func() []string {
|
||||
k8sClient.Get(ctx, client.ObjectKey{Namespace: namespace, Name: appConfigName}, &appConfig)
|
||||
return appConfig.ObjectMeta.Finalizers
|
||||
}, time.Second*30, time.Microsecond*500).Should(ContainElement(workloadScopeFinalizer))
|
||||
|
||||
By("Check HealthScope WorkloadRefs")
|
||||
Eventually(func() int {
|
||||
k8sClient.Get(ctx, client.ObjectKey{Namespace: namespace, Name: healthScopeName}, &hs)
|
||||
return len(hs.Spec.WorkloadReferences)
|
||||
}, time.Second*30, time.Millisecond*500).Should(Equal(1))
|
||||
|
||||
By("Delete AppConfig")
|
||||
Expect(k8sClient.Delete(ctx, &appConfig)).Should(Succeed())
|
||||
|
||||
By("Check workload ref has been removed from HealthScope's WorkloadRefs")
|
||||
Eventually(func() int {
|
||||
k8sClient.Get(ctx, client.ObjectKey{Namespace: namespace, Name: healthScopeName}, &hs)
|
||||
return len(hs.Spec.WorkloadReferences)
|
||||
}, time.Second*30, time.Millisecond*500).Should(Equal(0))
|
||||
|
||||
By("Check AppConfig has been deleted successfully")
|
||||
deletedAppConfig := &v1alpha2.ApplicationConfiguration{}
|
||||
Eventually(
|
||||
func() error {
|
||||
return k8sClient.Get(ctx, client.ObjectKey{Namespace: namespace, Name: appConfigName}, deletedAppConfig)
|
||||
},
|
||||
time.Second*30, time.Microsecond*500).Should(&util.NotFoundMatcher{})
|
||||
})
|
||||
|
||||
})
|
||||
|
||||
})
|
||||
@@ -1,308 +0,0 @@
|
||||
/*
|
||||
Copyright 2021 The KubeVela Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package controllers_test
|
||||
|
||||
import (
|
||||
"context"
|
||||
"time"
|
||||
|
||||
appsv1 "k8s.io/api/apps/v1"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/util/intstr"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
|
||||
. "github.com/onsi/ginkgo"
|
||||
. "github.com/onsi/gomega"
|
||||
|
||||
"github.com/oam-dev/kubevela/apis/core.oam.dev/common"
|
||||
"github.com/oam-dev/kubevela/apis/core.oam.dev/v1alpha2"
|
||||
"github.com/oam-dev/kubevela/pkg/oam/util"
|
||||
)
|
||||
|
||||
var _ = Describe("AppConfig renders workloads", func() {
|
||||
var (
|
||||
namespace = "appconfig-render-test"
|
||||
cwName = "test-cw"
|
||||
compName = "test-component"
|
||||
wdName = "deployments.apps"
|
||||
containerName = "test-container"
|
||||
containerImage = "notarealimage"
|
||||
acName = "test-ac"
|
||||
|
||||
envVars = []string{
|
||||
"VAR_ONE",
|
||||
"VAR_TWO",
|
||||
"VAR_THREE",
|
||||
}
|
||||
|
||||
paramVals = []string{
|
||||
"replace-one",
|
||||
"replace-two",
|
||||
"replace-three",
|
||||
}
|
||||
)
|
||||
ctx := context.TODO()
|
||||
ns := corev1.Namespace{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: namespace,
|
||||
},
|
||||
}
|
||||
|
||||
BeforeEach(func() {
|
||||
// delete the namespace with all its resources
|
||||
Expect(k8sClient.Delete(ctx, &ns, client.PropagationPolicy(metav1.DeletePropagationForeground))).
|
||||
Should(SatisfyAny(BeNil(), &util.NotFoundMatcher{}))
|
||||
Eventually(func() error {
|
||||
return k8sClient.Get(ctx, client.ObjectKey{Name: namespace}, &corev1.Namespace{})
|
||||
}, time.Second*120, time.Second*10).Should(&util.NotFoundMatcher{})
|
||||
Eventually(func() error {
|
||||
return k8sClient.Create(ctx, &ns)
|
||||
}, time.Second*3, time.Millisecond*300).Should(SatisfyAny(BeNil(), &util.AlreadyExistMatcher{}))
|
||||
})
|
||||
|
||||
AfterEach(func() {
|
||||
// delete the namespace with all its resources
|
||||
Expect(k8sClient.Delete(ctx, &ns, client.PropagationPolicy(metav1.DeletePropagationForeground))).Should(BeNil())
|
||||
})
|
||||
|
||||
It("Test AppConfig controller renders workloads", func() {
|
||||
By("Create WorkloadDefinition")
|
||||
|
||||
label := map[string]string{"workload": "deployment-workload"}
|
||||
d := v1alpha2.WorkloadDefinition{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: wdName,
|
||||
Namespace: namespace,
|
||||
Labels: label,
|
||||
},
|
||||
Spec: v1alpha2.WorkloadDefinitionSpec{
|
||||
Reference: common.DefinitionReference{
|
||||
Name: "deployments.apps",
|
||||
},
|
||||
},
|
||||
}
|
||||
Expect(k8sClient.Create(ctx, &d)).Should(Succeed())
|
||||
|
||||
workload := appsv1.Deployment{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: namespace,
|
||||
Name: cwName,
|
||||
Labels: label,
|
||||
},
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
Kind: "Deployment",
|
||||
APIVersion: "apps/v1",
|
||||
},
|
||||
Spec: appsv1.DeploymentSpec{
|
||||
Selector: &metav1.LabelSelector{
|
||||
MatchLabels: label,
|
||||
},
|
||||
Template: corev1.PodTemplateSpec{
|
||||
Spec: corev1.PodSpec{
|
||||
Containers: []corev1.Container{
|
||||
{
|
||||
Image: containerImage,
|
||||
Name: containerName,
|
||||
Env: []corev1.EnvVar{
|
||||
{
|
||||
Name: envVars[0],
|
||||
Value: paramVals[0],
|
||||
},
|
||||
{
|
||||
Name: envVars[1],
|
||||
Value: paramVals[1],
|
||||
},
|
||||
{
|
||||
Name: envVars[2],
|
||||
Value: paramVals[2],
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: namespace,
|
||||
Labels: label,
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
// reflect workload gvk from scheme
|
||||
gvks, _, _ := scheme.ObjectKinds(&workload)
|
||||
workload.APIVersion = gvks[0].GroupVersion().String()
|
||||
workload.Kind = gvks[0].Kind
|
||||
|
||||
rawWorkload := runtime.RawExtension{Object: &workload}
|
||||
|
||||
By("Create Component")
|
||||
co := comp(
|
||||
compWithName(compName),
|
||||
compWithNamespace(namespace),
|
||||
compWithLabels(label),
|
||||
compWithWorkload(rawWorkload),
|
||||
compWithParams([]v1alpha2.ComponentParameter{
|
||||
{
|
||||
Name: envVars[0],
|
||||
FieldPaths: []string{"spec.template.spec.containers[0].env[0].value"},
|
||||
},
|
||||
{
|
||||
Name: envVars[1],
|
||||
FieldPaths: []string{"spec.template.spec.containers[0].env[1].value"},
|
||||
},
|
||||
{
|
||||
Name: envVars[2],
|
||||
FieldPaths: []string{"spec.template.spec.containers[0].env[2].value"},
|
||||
},
|
||||
}))
|
||||
Expect(k8sClient.Create(ctx, co)).Should(Succeed())
|
||||
verifyComponentCreated("AC render 0", namespace, compName)
|
||||
|
||||
By("Create ApplicationConfiguration")
|
||||
ac := ac(
|
||||
acWithName(acName),
|
||||
acWithNamspace(namespace),
|
||||
acWithLabels(label),
|
||||
acWithComps([]v1alpha2.ApplicationConfigurationComponent{
|
||||
{
|
||||
ComponentName: compName,
|
||||
ParameterValues: []v1alpha2.ComponentParameterValue{
|
||||
{
|
||||
Name: envVars[0],
|
||||
Value: intstr.FromString(paramVals[0]),
|
||||
},
|
||||
{
|
||||
Name: envVars[1],
|
||||
Value: intstr.FromString(paramVals[1]),
|
||||
},
|
||||
{
|
||||
Name: envVars[2],
|
||||
Value: intstr.FromString(paramVals[2]),
|
||||
},
|
||||
},
|
||||
},
|
||||
}))
|
||||
Expect(k8sClient.Create(ctx, ac)).Should(Succeed())
|
||||
|
||||
By("Verify workloads are created")
|
||||
Eventually(func() bool {
|
||||
|
||||
RequestReconcileNow(ctx, ac)
|
||||
cw := &appsv1.Deployment{}
|
||||
if err := k8sClient.Get(ctx, client.ObjectKey{Name: cwName, Namespace: namespace}, cw); err != nil {
|
||||
return false
|
||||
}
|
||||
if len(cw.Spec.Template.Spec.Containers) != 1 {
|
||||
return false
|
||||
}
|
||||
for i, e := range cw.Spec.Template.Spec.Containers[0].Env {
|
||||
if e.Name != envVars[i] {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}, time.Second*10, time.Second*2).Should(BeTrue())
|
||||
})
|
||||
})
|
||||
|
||||
type compModifier func(*v1alpha2.Component)
|
||||
|
||||
func compWithName(n string) compModifier {
|
||||
return func(c *v1alpha2.Component) {
|
||||
c.Name = n
|
||||
}
|
||||
}
|
||||
|
||||
func compWithNamespace(n string) compModifier {
|
||||
return func(c *v1alpha2.Component) {
|
||||
c.Namespace = n
|
||||
}
|
||||
}
|
||||
|
||||
func compWithLabels(labels map[string]string) compModifier {
|
||||
return func(c *v1alpha2.Component) {
|
||||
c.Labels = labels
|
||||
}
|
||||
}
|
||||
|
||||
func compWithWorkload(w runtime.RawExtension) compModifier {
|
||||
return func(c *v1alpha2.Component) {
|
||||
c.Spec.Workload = w
|
||||
}
|
||||
}
|
||||
|
||||
func compWithParams(p []v1alpha2.ComponentParameter) compModifier {
|
||||
return func(c *v1alpha2.Component) {
|
||||
c.Spec.Parameters = p
|
||||
}
|
||||
}
|
||||
|
||||
func comp(m ...compModifier) *v1alpha2.Component {
|
||||
c := &v1alpha2.Component{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
Kind: v1alpha2.ComponentKind,
|
||||
APIVersion: v1alpha2.SchemeGroupVersion.String(),
|
||||
},
|
||||
}
|
||||
|
||||
for _, fn := range m {
|
||||
fn(c)
|
||||
}
|
||||
return c
|
||||
}
|
||||
|
||||
type acModifier func(*v1alpha2.ApplicationConfiguration)
|
||||
|
||||
func acWithName(n string) acModifier {
|
||||
return func(a *v1alpha2.ApplicationConfiguration) {
|
||||
a.Name = n
|
||||
}
|
||||
}
|
||||
|
||||
func acWithNamspace(n string) acModifier {
|
||||
return func(a *v1alpha2.ApplicationConfiguration) {
|
||||
a.Namespace = n
|
||||
}
|
||||
}
|
||||
|
||||
func acWithLabels(labels map[string]string) acModifier {
|
||||
return func(a *v1alpha2.ApplicationConfiguration) {
|
||||
a.Labels = labels
|
||||
}
|
||||
}
|
||||
|
||||
func acWithComps(c []v1alpha2.ApplicationConfigurationComponent) acModifier {
|
||||
return func(a *v1alpha2.ApplicationConfiguration) {
|
||||
a.Spec.Components = c
|
||||
}
|
||||
}
|
||||
|
||||
func ac(m ...acModifier) *v1alpha2.ApplicationConfiguration {
|
||||
a := &v1alpha2.ApplicationConfiguration{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
Kind: v1alpha2.ApplicationConfigurationKind,
|
||||
APIVersion: v1alpha2.SchemeGroupVersion.String(),
|
||||
},
|
||||
}
|
||||
|
||||
for _, fn := range m {
|
||||
fn(a)
|
||||
}
|
||||
return a
|
||||
}
|
||||
@@ -1,38 +0,0 @@
|
||||
/*
|
||||
Copyright 2021 The KubeVela Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package controllers_test
|
||||
|
||||
import (
|
||||
"context"
|
||||
"time"
|
||||
|
||||
"github.com/oam-dev/kubevela/apis/core.oam.dev/v1alpha2"
|
||||
|
||||
. "github.com/onsi/gomega"
|
||||
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
)
|
||||
|
||||
var verifyComponentCreated = func(testcase, namespace, compName string) {
|
||||
|
||||
Eventually(
|
||||
func() error {
|
||||
comp := v1alpha2.Component{}
|
||||
return k8sClient.Get(context.TODO(), client.ObjectKey{Namespace: namespace, Name: compName}, &comp)
|
||||
},
|
||||
time.Second*3, 30*time.Millisecond).Should(BeNil(), "check component created fail for test "+testcase)
|
||||
}
|
||||
@@ -1,641 +0,0 @@
|
||||
/*
|
||||
Copyright 2021 The KubeVela Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package controllers_test
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
. "github.com/onsi/ginkgo"
|
||||
. "github.com/onsi/gomega"
|
||||
|
||||
appsv1 "k8s.io/api/apps/v1"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
logf "sigs.k8s.io/controller-runtime/pkg/log"
|
||||
|
||||
commontypes "github.com/oam-dev/kubevela/apis/core.oam.dev/common"
|
||||
"github.com/oam-dev/kubevela/apis/core.oam.dev/v1alpha2"
|
||||
"github.com/oam-dev/kubevela/pkg/oam/util"
|
||||
"github.com/oam-dev/kubevela/pkg/utils/common"
|
||||
)
|
||||
|
||||
var _ = Describe("Versioning mechanism of components", func() {
|
||||
ctx := context.Background()
|
||||
namespace := "component-versioning-test"
|
||||
ns := corev1.Namespace{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: namespace,
|
||||
},
|
||||
}
|
||||
componentName := "example-component"
|
||||
|
||||
// to identify different revisions of components
|
||||
imageV1 := "wordpress:4.6.1-apache"
|
||||
imageV2 := "wordpress:4.6.2-apache"
|
||||
|
||||
var cwV1, cwV2 appsv1.Deployment
|
||||
var componentV1 v1alpha2.Component
|
||||
var appConfig v1alpha2.ApplicationConfiguration
|
||||
|
||||
BeforeEach(func() {
|
||||
cwV1 = appsv1.Deployment{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: namespace,
|
||||
},
|
||||
Spec: appsv1.DeploymentSpec{
|
||||
Selector: &metav1.LabelSelector{
|
||||
MatchLabels: map[string]string{
|
||||
"app": "wordpress",
|
||||
},
|
||||
},
|
||||
Template: corev1.PodTemplateSpec{
|
||||
Spec: corev1.PodSpec{
|
||||
Containers: []corev1.Container{
|
||||
{
|
||||
Image: imageV1,
|
||||
},
|
||||
},
|
||||
},
|
||||
ObjectMeta: metav1.ObjectMeta{Labels: map[string]string{"app": "wordpress"}},
|
||||
},
|
||||
},
|
||||
}
|
||||
cwV2 = appsv1.Deployment{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: namespace,
|
||||
},
|
||||
Spec: appsv1.DeploymentSpec{
|
||||
Selector: &metav1.LabelSelector{
|
||||
MatchLabels: map[string]string{
|
||||
"app": "wordpress",
|
||||
},
|
||||
},
|
||||
Template: corev1.PodTemplateSpec{
|
||||
Spec: corev1.PodSpec{
|
||||
Containers: []corev1.Container{
|
||||
{
|
||||
Image: imageV2,
|
||||
},
|
||||
},
|
||||
},
|
||||
ObjectMeta: metav1.ObjectMeta{Labels: map[string]string{"app": "wordpress"}},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
componentV1 = v1alpha2.Component{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
APIVersion: "core.oam.dev/v1alpha2",
|
||||
Kind: "Component",
|
||||
},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: componentName,
|
||||
Namespace: namespace,
|
||||
},
|
||||
Spec: v1alpha2.ComponentSpec{
|
||||
Workload: runtime.RawExtension{
|
||||
Object: &cwV1,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
appConfig = v1alpha2.ApplicationConfiguration{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "example-appconfig",
|
||||
Namespace: namespace,
|
||||
},
|
||||
}
|
||||
|
||||
logf.Log.Info("Start to run a test, clean up previous resources")
|
||||
ns = corev1.Namespace{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: namespace,
|
||||
},
|
||||
}
|
||||
// delete the namespace with all its resources
|
||||
Expect(k8sClient.Delete(ctx, &ns, client.PropagationPolicy(metav1.DeletePropagationForeground))).
|
||||
Should(SatisfyAny(BeNil(), &util.NotFoundMatcher{}))
|
||||
logf.Log.Info("make sure all the resources are removed")
|
||||
objectKey := client.ObjectKey{
|
||||
Name: namespace,
|
||||
}
|
||||
res := &corev1.Namespace{}
|
||||
Eventually(
|
||||
// gomega has a bug that can't take nil as the actual input, so has to make it a func
|
||||
func() error {
|
||||
return k8sClient.Get(ctx, objectKey, res)
|
||||
},
|
||||
time.Second*120, time.Millisecond*500).Should(&util.NotFoundMatcher{})
|
||||
Eventually(
|
||||
func() error {
|
||||
return k8sClient.Create(ctx, &ns)
|
||||
},
|
||||
time.Second*3, time.Millisecond*300).Should(SatisfyAny(BeNil(), &util.AlreadyExistMatcher{}))
|
||||
})
|
||||
|
||||
AfterEach(func() {
|
||||
logf.Log.Info("Clean up resources")
|
||||
// delete the namespace with all its resources
|
||||
Expect(k8sClient.Delete(ctx, &ns, client.PropagationPolicy(metav1.DeletePropagationForeground))).Should(BeNil())
|
||||
})
|
||||
|
||||
When("create or update a component", func() {
|
||||
PIt("should create corresponding ControllerRevision", func() {
|
||||
By("Create Component v1")
|
||||
Expect(k8sClient.Create(ctx, &componentV1)).Should(Succeed())
|
||||
|
||||
cmpV1 := &v1alpha2.Component{}
|
||||
By("Get Component v1")
|
||||
Expect(k8sClient.Get(ctx, client.ObjectKey{Namespace: namespace, Name: componentName}, cmpV1)).Should(Succeed())
|
||||
|
||||
By("Get Component latest status after ControllerRevision created")
|
||||
Eventually(
|
||||
func() *commontypes.Revision {
|
||||
k8sClient.Get(ctx, client.ObjectKey{Namespace: namespace, Name: componentName}, cmpV1)
|
||||
return cmpV1.Status.LatestRevision
|
||||
},
|
||||
time.Second*15, time.Millisecond*500).ShouldNot(BeNil())
|
||||
|
||||
revisionNameV1 := cmpV1.Status.LatestRevision.Name
|
||||
By("Get corresponding ControllerRevision of Component v1")
|
||||
cr := &appsv1.ControllerRevision{}
|
||||
Expect(k8sClient.Get(ctx,
|
||||
client.ObjectKey{Namespace: namespace, Name: revisionNameV1}, cr)).ShouldNot(HaveOccurred())
|
||||
By("Check revision seq number")
|
||||
Expect(cr.Revision).Should(Equal(int64(1)))
|
||||
|
||||
cwV2raw, _ := json.Marshal(cwV2)
|
||||
cmpV1.Spec.Workload.Raw = cwV2raw
|
||||
By("Update Component into revision v2")
|
||||
Expect(k8sClient.Update(ctx, cmpV1)).Should(Succeed())
|
||||
|
||||
cmpV2 := &v1alpha2.Component{}
|
||||
By("Get Component v2")
|
||||
Expect(k8sClient.Get(ctx, client.ObjectKey{Namespace: namespace, Name: componentName}, cmpV2)).Should(Succeed())
|
||||
|
||||
By("Get Component latest status after ControllerRevision created")
|
||||
Eventually(
|
||||
func() string {
|
||||
k8sClient.Get(ctx, client.ObjectKey{Namespace: namespace, Name: componentName}, cmpV2)
|
||||
return cmpV2.Status.LatestRevision.Name
|
||||
},
|
||||
time.Second*15, time.Millisecond*500).ShouldNot(Equal(revisionNameV1))
|
||||
|
||||
revisionNameV2 := cmpV2.Status.LatestRevision.Name
|
||||
crV2 := &appsv1.ControllerRevision{}
|
||||
By("Get corresponding ControllerRevision of Component v2")
|
||||
Expect(k8sClient.Get(ctx, client.ObjectKey{Namespace: namespace, Name: revisionNameV2}, crV2)).Should(Succeed())
|
||||
By("Check revision seq number")
|
||||
Expect(crV2.Revision).Should(Equal(int64(2)))
|
||||
|
||||
})
|
||||
})
|
||||
|
||||
When("Components have revisionName in AppConfig", func() {
|
||||
PIt("should NOT create NOR update workloads, when update components", func() {
|
||||
By("Create Component v1")
|
||||
Expect(k8sClient.Create(ctx, &componentV1)).Should(Succeed())
|
||||
|
||||
cmpV1 := &v1alpha2.Component{}
|
||||
By("Get Component v1")
|
||||
Expect(k8sClient.Get(ctx, client.ObjectKey{Namespace: namespace, Name: componentName}, cmpV1)).Should(Succeed())
|
||||
|
||||
By("Get Component latest status after ControllerRevision created")
|
||||
Eventually(
|
||||
func() *commontypes.Revision {
|
||||
k8sClient.Get(ctx, client.ObjectKey{Namespace: namespace, Name: componentName}, cmpV1)
|
||||
return cmpV1.Status.LatestRevision
|
||||
},
|
||||
time.Second*15, time.Millisecond*500).ShouldNot(BeNil())
|
||||
|
||||
revisionNameV1 := cmpV1.Status.LatestRevision.Name
|
||||
|
||||
appConfigWithRevisionName := appConfig
|
||||
appConfigWithRevisionName.Spec.Components = append(appConfigWithRevisionName.Spec.Components,
|
||||
v1alpha2.ApplicationConfigurationComponent{
|
||||
RevisionName: revisionNameV1,
|
||||
})
|
||||
By("Apply appConfig")
|
||||
Expect(k8sClient.Create(ctx, &appConfigWithRevisionName)).Should(Succeed())
|
||||
|
||||
cwWlV1 := appsv1.Deployment{}
|
||||
By("Check Deployment workload's image field is v1")
|
||||
Eventually(
|
||||
func() error {
|
||||
return k8sClient.Get(ctx, client.ObjectKey{Namespace: namespace, Name: componentName}, &cwWlV1)
|
||||
},
|
||||
time.Second*15, time.Millisecond*500).Should(BeNil())
|
||||
Expect(cwWlV1.Spec.Template.Spec.Containers[0].Image).Should(Equal(imageV1))
|
||||
|
||||
cwV2raw, _ := json.Marshal(cwV2)
|
||||
cmpV1.Spec.Workload.Raw = cwV2raw
|
||||
By("Update Component to revision v2")
|
||||
Expect(k8sClient.Update(ctx, cmpV1)).Should(Succeed())
|
||||
|
||||
By("Check Deployment workload's image field is still v1")
|
||||
Expect(k8sClient.Get(ctx, client.ObjectKey{Namespace: namespace, Name: componentName}, &cwWlV1)).Should(Succeed())
|
||||
Expect(cwWlV1.Spec.Template.Spec.Containers[0].Image).Should(Equal(imageV1))
|
||||
})
|
||||
})
|
||||
|
||||
When("Components have componentName", func() {
|
||||
PIt("should update workloads with new revision of components, when update components", func() {
|
||||
By("Create Component v1")
|
||||
Expect(k8sClient.Create(ctx, &componentV1)).Should(Succeed())
|
||||
|
||||
cmpV1 := &v1alpha2.Component{}
|
||||
By("Get Component latest status after ControllerRevision created")
|
||||
Eventually(
|
||||
func() *commontypes.Revision {
|
||||
k8sClient.Get(ctx, client.ObjectKey{Namespace: namespace, Name: componentName}, cmpV1)
|
||||
return cmpV1.Status.LatestRevision
|
||||
},
|
||||
time.Second*30, time.Millisecond*500).ShouldNot(BeNil())
|
||||
|
||||
revisionNameV1 := cmpV1.Status.LatestRevision.Name
|
||||
|
||||
appConfigWithRevisionName := appConfig
|
||||
appConfigWithRevisionName.Spec.Components = append(appConfigWithRevisionName.Spec.Components,
|
||||
v1alpha2.ApplicationConfigurationComponent{
|
||||
ComponentName: componentName,
|
||||
})
|
||||
By("Apply appConfig")
|
||||
Expect(k8sClient.Create(ctx, &appConfigWithRevisionName)).Should(Succeed())
|
||||
|
||||
cwWlV1 := &appsv1.Deployment{}
|
||||
By("Check Deployment workload's image field is v1")
|
||||
Eventually(
|
||||
func() error {
|
||||
return k8sClient.Get(ctx, client.ObjectKey{Namespace: namespace, Name: componentName}, cwWlV1)
|
||||
},
|
||||
time.Second*15, time.Millisecond*500).Should(BeNil())
|
||||
Expect(cwWlV1.Spec.Template.Spec.Containers[0].Image).Should(Equal(imageV1))
|
||||
|
||||
cwV2raw, _ := json.Marshal(cwV2)
|
||||
cmpV1.Spec.Workload.Raw = cwV2raw
|
||||
By("Update Component to revision v2")
|
||||
Expect(k8sClient.Update(ctx, cmpV1)).Should(Succeed())
|
||||
|
||||
By("Check Component has been changed to revision v2")
|
||||
By("Get latest Component revision: revision 2")
|
||||
cmpV2 := &v1alpha2.Component{}
|
||||
Eventually(
|
||||
func() string {
|
||||
k8sClient.Get(ctx, client.ObjectKey{Namespace: namespace, Name: componentName}, cmpV2)
|
||||
return cmpV2.Status.LatestRevision.Name
|
||||
},
|
||||
time.Second*30, time.Millisecond*500).ShouldNot(Equal(revisionNameV1))
|
||||
|
||||
By("Check Deployment workload's image field has been changed to v2")
|
||||
cwWlV2 := &appsv1.Deployment{}
|
||||
Eventually(func() string {
|
||||
RequestReconcileNow(ctx, &appConfigWithRevisionName)
|
||||
k8sClient.Get(ctx, client.ObjectKey{Namespace: namespace, Name: componentName}, cwWlV2)
|
||||
return cwWlV2.Spec.Template.Spec.Containers[0].Image
|
||||
}, time.Second*60, time.Microsecond*500).Should(Equal(imageV2))
|
||||
})
|
||||
})
|
||||
|
||||
When("Components have componentName and have revision-enabled trait", func() {
|
||||
PIt("should create workloads with name of revision and keep the old revision", func() {
|
||||
|
||||
By("Create trait definition")
|
||||
var td v1alpha2.TraitDefinition
|
||||
Expect(common.ReadYamlToObject("testdata/revision/trait-def.yaml", &td)).Should(BeNil())
|
||||
|
||||
var gtd v1alpha2.TraitDefinition
|
||||
if err := k8sClient.Get(ctx, client.ObjectKey{Name: td.Name, Namespace: td.Namespace}, >d); err != nil {
|
||||
Expect(k8sClient.Create(ctx, &td)).Should(Succeed())
|
||||
} else {
|
||||
td.ResourceVersion = gtd.ResourceVersion
|
||||
Expect(k8sClient.Update(ctx, &td)).Should(Succeed())
|
||||
}
|
||||
|
||||
By("Create Component v1")
|
||||
var comp1 v1alpha2.Component
|
||||
Expect(common.ReadYamlToObject("testdata/revision/comp-v1.yaml", &comp1)).Should(BeNil())
|
||||
Expect(k8sClient.Create(ctx, &comp1)).Should(Succeed())
|
||||
|
||||
By("Check component should already existed")
|
||||
Eventually(func() error {
|
||||
return k8sClient.Get(ctx, client.ObjectKey{Namespace: comp1.Namespace, Name: comp1.Name}, &v1alpha2.Component{})
|
||||
}, time.Second*10, time.Microsecond*500).Should(BeNil())
|
||||
|
||||
By("Create AppConfig with component")
|
||||
var appconfig v1alpha2.ApplicationConfiguration
|
||||
Expect(common.ReadYamlToObject("testdata/revision/app.yaml", &appconfig)).Should(BeNil())
|
||||
Expect(k8sClient.Create(ctx, &appconfig)).Should(Succeed())
|
||||
|
||||
By("Get Component latest status after ControllerRevision created")
|
||||
Eventually(
|
||||
func() *commontypes.Revision {
|
||||
k8sClient.Get(ctx, client.ObjectKey{Namespace: namespace, Name: componentName}, &comp1)
|
||||
return comp1.Status.LatestRevision
|
||||
},
|
||||
time.Second*300, time.Millisecond*500).ShouldNot(BeNil())
|
||||
|
||||
revisionNameV1 := comp1.Status.LatestRevision.Name
|
||||
|
||||
By("Workload created with revisionName v1")
|
||||
var w1 unstructured.Unstructured
|
||||
Eventually(
|
||||
func() error {
|
||||
RequestReconcileNow(ctx, &appconfig)
|
||||
w1.SetAPIVersion("example.com/v1")
|
||||
w1.SetKind("Bar")
|
||||
return k8sClient.Get(ctx, client.ObjectKey{Namespace: namespace, Name: revisionNameV1}, &w1)
|
||||
},
|
||||
time.Second*60, time.Millisecond*500).Should(BeNil())
|
||||
k1, _, _ := unstructured.NestedString(w1.Object, "spec", "key")
|
||||
Expect(k1).Should(BeEquivalentTo("v1"), fmt.Sprintf("%v", w1.Object))
|
||||
|
||||
By("Create Component v2")
|
||||
var comp2 v1alpha2.Component
|
||||
Expect(common.ReadYamlToObject("testdata/revision/comp-v2.yaml", &comp2)).Should(BeNil())
|
||||
comp2.ResourceVersion = comp1.ResourceVersion
|
||||
Expect(k8sClient.Update(ctx, &comp2)).Should(Succeed())
|
||||
|
||||
By("Get Component latest status after ControllerRevision created")
|
||||
Eventually(
|
||||
func() *commontypes.Revision {
|
||||
k8sClient.Get(ctx, client.ObjectKey{Namespace: namespace, Name: componentName}, &comp2)
|
||||
if comp2.Status.LatestRevision != nil && comp2.Status.LatestRevision.Revision > 1 {
|
||||
return comp2.Status.LatestRevision
|
||||
}
|
||||
return nil
|
||||
},
|
||||
time.Second*120, time.Millisecond*500).ShouldNot(BeNil())
|
||||
|
||||
revisionNameV2 := comp2.Status.LatestRevision.Name
|
||||
|
||||
By("Workload exist with revisionName v2")
|
||||
var w2 unstructured.Unstructured
|
||||
Eventually(
|
||||
func() error {
|
||||
RequestReconcileNow(ctx, &appconfig)
|
||||
w2.SetAPIVersion("example.com/v1")
|
||||
w2.SetKind("Bar")
|
||||
return k8sClient.Get(ctx, client.ObjectKey{Namespace: namespace, Name: revisionNameV2}, &w2)
|
||||
},
|
||||
time.Second*30, time.Millisecond*500).Should(BeNil())
|
||||
k2, _, _ := unstructured.NestedString(w2.Object, "spec", "key")
|
||||
Expect(k2).Should(BeEquivalentTo("v2"), fmt.Sprintf("%v", w2.Object))
|
||||
|
||||
By("Check AppConfig status")
|
||||
Eventually(
|
||||
func() string {
|
||||
err := k8sClient.Get(ctx, client.ObjectKey{Namespace: namespace, Name: appconfig.Name}, &appconfig)
|
||||
if err != nil {
|
||||
return ""
|
||||
}
|
||||
if len(appconfig.Status.Workloads) == 0 {
|
||||
return ""
|
||||
}
|
||||
return appconfig.Status.Workloads[0].ComponentRevisionName
|
||||
},
|
||||
time.Second*60, time.Millisecond*500).Should(BeEquivalentTo(revisionNameV2))
|
||||
|
||||
Expect(len(appconfig.Status.Workloads)).Should(BeEquivalentTo(1))
|
||||
|
||||
Expect(len(appconfig.Status.HistoryWorkloads)).Should(BeEquivalentTo(1))
|
||||
Expect(appconfig.Status.HistoryWorkloads[0].Revision).Should(BeEquivalentTo(revisionNameV1))
|
||||
|
||||
// Clean
|
||||
k8sClient.Delete(ctx, &appconfig)
|
||||
k8sClient.Delete(ctx, &comp1)
|
||||
k8sClient.Delete(ctx, &comp2)
|
||||
})
|
||||
})
|
||||
|
||||
When("Components have componentName and without revision-enabled trait", func() {
|
||||
PIt("should create workloads with name of component and replace the old revision", func() {
|
||||
|
||||
By("Create trait definition")
|
||||
var td v1alpha2.TraitDefinition
|
||||
Expect(common.ReadYamlToObject("testdata/revision/trait-def-no-revision.yaml", &td)).Should(BeNil())
|
||||
var gtd v1alpha2.TraitDefinition
|
||||
if err := k8sClient.Get(ctx, client.ObjectKey{Name: td.Name, Namespace: td.Namespace}, >d); err != nil {
|
||||
Expect(k8sClient.Create(ctx, &td)).Should(Succeed())
|
||||
} else {
|
||||
td.ResourceVersion = gtd.ResourceVersion
|
||||
Expect(k8sClient.Update(ctx, &td)).Should(Succeed())
|
||||
}
|
||||
|
||||
By("Create Component v1")
|
||||
var comp1 v1alpha2.Component
|
||||
Expect(common.ReadYamlToObject("testdata/revision/comp-v1.yaml", &comp1)).Should(BeNil())
|
||||
Expect(k8sClient.Create(ctx, &comp1)).Should(Succeed())
|
||||
|
||||
By("Create AppConfig with component")
|
||||
var appconfig v1alpha2.ApplicationConfiguration
|
||||
Expect(common.ReadYamlToObject("testdata/revision/app.yaml", &appconfig)).Should(BeNil())
|
||||
Expect(k8sClient.Create(ctx, &appconfig)).Should(Succeed())
|
||||
|
||||
By("Workload created with component name")
|
||||
var w1 unstructured.Unstructured
|
||||
Eventually(
|
||||
func() error {
|
||||
w1.SetAPIVersion("example.com/v1")
|
||||
w1.SetKind("Bar")
|
||||
return k8sClient.Get(ctx, client.ObjectKey{Namespace: namespace, Name: componentName}, &w1)
|
||||
},
|
||||
time.Second*60, time.Millisecond*500).Should(BeNil())
|
||||
|
||||
k1, _, _ := unstructured.NestedString(w1.Object, "spec", "key")
|
||||
Expect(k1).Should(BeEquivalentTo("v1"), fmt.Sprintf("%v", w1.Object))
|
||||
|
||||
By("Create Component v2")
|
||||
var comp2 v1alpha2.Component
|
||||
Expect(common.ReadYamlToObject("testdata/revision/comp-v2.yaml", &comp2)).Should(BeNil())
|
||||
Eventually(func() error {
|
||||
tmp := &v1alpha2.Component{}
|
||||
k8sClient.Get(ctx, client.ObjectKey{Namespace: namespace, Name: componentName}, tmp)
|
||||
updatedComp := comp2.DeepCopy()
|
||||
updatedComp.ResourceVersion = tmp.ResourceVersion
|
||||
return k8sClient.Update(ctx, updatedComp)
|
||||
}, 5*time.Second, time.Second).Should(Succeed())
|
||||
|
||||
By("Workload exist with revisionName v2")
|
||||
var w2 unstructured.Unstructured
|
||||
Eventually(
|
||||
func() string {
|
||||
RequestReconcileNow(ctx, &appconfig)
|
||||
w2.SetAPIVersion("example.com/v1")
|
||||
w2.SetKind("Bar")
|
||||
err := k8sClient.Get(ctx, client.ObjectKey{Namespace: namespace, Name: componentName}, &w2)
|
||||
if err != nil {
|
||||
return ""
|
||||
}
|
||||
k2, _, _ := unstructured.NestedString(w2.Object, "spec", "key")
|
||||
return k2
|
||||
},
|
||||
time.Second*30, time.Millisecond*500).Should(BeEquivalentTo("v2"))
|
||||
|
||||
By("Check AppConfig status")
|
||||
Eventually(
|
||||
func() error {
|
||||
return k8sClient.Get(ctx, client.ObjectKey{Namespace: namespace, Name: appconfig.Name}, &appconfig)
|
||||
},
|
||||
time.Second*15, time.Millisecond*500).Should(BeNil())
|
||||
|
||||
Expect(len(appconfig.Status.Workloads)).Should(BeEquivalentTo(1))
|
||||
|
||||
// Clean
|
||||
k8sClient.Delete(ctx, &appconfig)
|
||||
k8sClient.Delete(ctx, &comp1)
|
||||
k8sClient.Delete(ctx, &comp2)
|
||||
})
|
||||
})
|
||||
})
|
||||
|
||||
var _ = Describe("Component revision", func() {
|
||||
ctx := context.Background()
|
||||
apiVersion := "core.oam.dev/v1alpha2"
|
||||
namespace := "default"
|
||||
componentName := "revision-component"
|
||||
appConfigName := "revision-app"
|
||||
workload := appsv1.Deployment{
|
||||
TypeMeta: metav1.TypeMeta{APIVersion: "apps/v1", Kind: "Deployment"},
|
||||
ObjectMeta: metav1.ObjectMeta{Namespace: namespace},
|
||||
Spec: appsv1.DeploymentSpec{
|
||||
Selector: &metav1.LabelSelector{
|
||||
MatchLabels: map[string]string{
|
||||
"app": "nginx",
|
||||
},
|
||||
},
|
||||
Template: corev1.PodTemplateSpec{
|
||||
Spec: corev1.PodSpec{
|
||||
Containers: []corev1.Container{
|
||||
{
|
||||
Name: "nginx",
|
||||
Image: "nginx:1.9.4",
|
||||
},
|
||||
},
|
||||
},
|
||||
ObjectMeta: metav1.ObjectMeta{Labels: map[string]string{"app": "nginx"}},
|
||||
},
|
||||
},
|
||||
}
|
||||
component := v1alpha2.Component{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
APIVersion: apiVersion,
|
||||
Kind: "Component",
|
||||
},
|
||||
ObjectMeta: metav1.ObjectMeta{Name: componentName, Namespace: namespace},
|
||||
Spec: v1alpha2.ComponentSpec{
|
||||
Workload: runtime.RawExtension{Object: workload.DeepCopyObject()},
|
||||
},
|
||||
}
|
||||
|
||||
TraitDefinition := v1alpha2.TraitDefinition{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
APIVersion: apiVersion,
|
||||
Kind: "TraitDefinition",
|
||||
},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "manualscalertraits2.core.oam.dev",
|
||||
Namespace: "vela-system",
|
||||
},
|
||||
Spec: v1alpha2.TraitDefinitionSpec{
|
||||
RevisionEnabled: true,
|
||||
Reference: commontypes.DefinitionReference{
|
||||
Name: "manualscalertraits.core.oam.dev",
|
||||
},
|
||||
WorkloadRefPath: "spec.workloadRef",
|
||||
},
|
||||
}
|
||||
|
||||
appConfig := v1alpha2.ApplicationConfiguration{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
APIVersion: apiVersion,
|
||||
Kind: "ApplicationConfiguration",
|
||||
},
|
||||
ObjectMeta: metav1.ObjectMeta{Name: appConfigName, Namespace: namespace},
|
||||
Spec: v1alpha2.ApplicationConfigurationSpec{
|
||||
Components: []v1alpha2.ApplicationConfigurationComponent{{
|
||||
ComponentName: componentName},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
workloadObjKey := client.ObjectKey{Name: componentName, Namespace: namespace}
|
||||
appConfigObjKey := client.ObjectKey{Name: appConfigName, Namespace: namespace}
|
||||
|
||||
trait := v1alpha2.ManualScalerTrait{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
APIVersion: apiVersion,
|
||||
Kind: "ManualScalerTrait",
|
||||
},
|
||||
ObjectMeta: metav1.ObjectMeta{Name: appConfigName, Namespace: namespace},
|
||||
Spec: v1alpha2.ManualScalerTraitSpec{
|
||||
ReplicaCount: 2,
|
||||
},
|
||||
}
|
||||
|
||||
Context("Attach a revision-enable trait the first time, workload should not be recreated", func() {
|
||||
It("should create Component and ApplicationConfiguration", func() {
|
||||
By("submit Component")
|
||||
Expect(k8sClient.Create(ctx, &component)).Should(Succeed())
|
||||
By("check Component exist")
|
||||
Eventually(
|
||||
func() error {
|
||||
return k8sClient.Get(ctx, client.ObjectKey{Namespace: namespace, Name: componentName}, &v1alpha2.Component{})
|
||||
},
|
||||
time.Second*3, time.Millisecond*500).Should(BeNil())
|
||||
By("submit ApplicationConfiguration")
|
||||
Expect(k8sClient.Create(ctx, &appConfig)).Should(Succeed())
|
||||
|
||||
By("check workload")
|
||||
var deploy appsv1.Deployment
|
||||
Eventually(
|
||||
func() error {
|
||||
return k8sClient.Get(ctx, workloadObjKey, &deploy)
|
||||
},
|
||||
time.Second*15, time.Millisecond*500).Should(BeNil())
|
||||
|
||||
By("apply new ApplicationConfiguration with a revision enabled trait")
|
||||
Expect(k8sClient.Create(ctx, &TraitDefinition)).Should(Succeed())
|
||||
Expect(k8sClient.Get(ctx, appConfigObjKey, &appConfig)).Should(Succeed())
|
||||
updatedAppConfig := appConfig.DeepCopy()
|
||||
updatedAppConfig.Spec.Components[0].Traits = []v1alpha2.ComponentTrait{{Trait: runtime.RawExtension{Object: trait.DeepCopyObject()}}}
|
||||
updatedAppConfig.SetResourceVersion("")
|
||||
Expect(k8sClient.Patch(ctx, updatedAppConfig, client.Merge)).Should(Succeed())
|
||||
|
||||
By("check current workload exists")
|
||||
time.Sleep(3 * time.Second)
|
||||
var currentDeploy appsv1.Deployment
|
||||
Expect(k8sClient.Get(ctx, workloadObjKey, ¤tDeploy)).Should(BeNil())
|
||||
|
||||
By("check version 1 workload doesn't exist")
|
||||
var v1Deploy appsv1.Deployment
|
||||
workloadObjKey := client.ObjectKey{Name: componentName + "-v1", Namespace: namespace}
|
||||
Expect(k8sClient.Get(ctx, workloadObjKey, &v1Deploy)).Should(SatisfyAny(&util.NotFoundMatcher{}))
|
||||
})
|
||||
})
|
||||
|
||||
AfterEach(func() {
|
||||
k8sClient.Delete(ctx, &appConfig)
|
||||
k8sClient.Delete(ctx, &component)
|
||||
k8sClient.Delete(ctx, &TraitDefinition)
|
||||
})
|
||||
})
|
||||
@@ -25,11 +25,8 @@ import (
|
||||
. "github.com/onsi/ginkgo"
|
||||
. "github.com/onsi/gomega"
|
||||
|
||||
appsv1 "k8s.io/api/apps/v1"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/util/intstr"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
logf "sigs.k8s.io/controller-runtime/pkg/log"
|
||||
|
||||
@@ -40,15 +37,9 @@ import (
|
||||
utilcommon "github.com/oam-dev/kubevela/pkg/utils/common"
|
||||
)
|
||||
|
||||
var (
|
||||
varInt32_60 int32 = 60
|
||||
)
|
||||
|
||||
var _ = Describe("HealthScope", func() {
|
||||
ctx := context.Background()
|
||||
var namespace string
|
||||
trueVar := true
|
||||
falseVar := false
|
||||
var ns corev1.Namespace
|
||||
BeforeEach(func() {
|
||||
namespace = randomNamespaceName("health-scope-test")
|
||||
@@ -83,234 +74,6 @@ var _ = Describe("HealthScope", func() {
|
||||
Expect(k8sClient.Delete(ctx, &ns, client.PropagationPolicy(metav1.DeletePropagationForeground))).Should(BeNil())
|
||||
})
|
||||
|
||||
It("Test an application config with health scope", func() {
|
||||
healthScopeName := "example-health-scope"
|
||||
// create health scope.
|
||||
hs := v1alpha2.HealthScope{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: healthScopeName,
|
||||
Namespace: namespace,
|
||||
},
|
||||
Spec: v1alpha2.HealthScopeSpec{
|
||||
ProbeTimeout: &varInt32_60,
|
||||
WorkloadReferences: []corev1.ObjectReference{},
|
||||
},
|
||||
}
|
||||
Expect(k8sClient.Create(ctx, &hs)).Should(SatisfyAny(BeNil(), &util.AlreadyExistMatcher{}))
|
||||
|
||||
By("Check empty health scope is healthy")
|
||||
Eventually(func() v1alpha2.HealthStatus {
|
||||
k8sClient.Get(ctx, client.ObjectKey{Namespace: namespace, Name: healthScopeName}, &hs)
|
||||
return hs.Status.ScopeHealthCondition.HealthStatus
|
||||
}, time.Second*30, time.Millisecond*500).Should(Equal(v1alpha2.StatusHealthy))
|
||||
|
||||
label := map[string]string{"workload": "deployment-workload"}
|
||||
wd := v1alpha2.WorkloadDefinition{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "deployments.apps",
|
||||
Namespace: namespace,
|
||||
Labels: label,
|
||||
},
|
||||
Spec: v1alpha2.WorkloadDefinitionSpec{
|
||||
Reference: common.DefinitionReference{
|
||||
Name: "deployments.apps",
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
logf.Log.Info("Creating workload definition")
|
||||
// For some reason, WorkloadDefinition is created as a Cluster scope object
|
||||
Expect(k8sClient.Create(ctx, &wd)).Should(SatisfyAny(BeNil(), &util.AlreadyExistMatcher{}))
|
||||
|
||||
workloadName := "example-deployment-workload"
|
||||
wl := appsv1.Deployment{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
APIVersion: "apps/v1",
|
||||
Kind: "Deployment",
|
||||
},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: namespace,
|
||||
Labels: label,
|
||||
Name: workloadName,
|
||||
},
|
||||
Spec: appsv1.DeploymentSpec{
|
||||
Selector: &metav1.LabelSelector{
|
||||
MatchLabels: label,
|
||||
},
|
||||
Template: corev1.PodTemplateSpec{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: namespace,
|
||||
Labels: label,
|
||||
},
|
||||
Spec: corev1.PodSpec{
|
||||
Containers: []corev1.Container{
|
||||
{
|
||||
Name: "wordpress",
|
||||
Image: "wordpress:php7.2",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
// reflect workload gvk from scheme
|
||||
gvks, _, _ := scheme.ObjectKinds(&wl)
|
||||
wl.APIVersion = gvks[0].GroupVersion().String()
|
||||
wl.Kind = gvks[0].Kind
|
||||
|
||||
// Create a component definition
|
||||
componentName := "example-component"
|
||||
comp := v1alpha2.Component{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: componentName,
|
||||
Namespace: namespace,
|
||||
Labels: label,
|
||||
},
|
||||
Spec: v1alpha2.ComponentSpec{
|
||||
Workload: runtime.RawExtension{
|
||||
Object: &wl,
|
||||
},
|
||||
Parameters: []v1alpha2.ComponentParameter{
|
||||
{
|
||||
Name: "instance-name",
|
||||
Required: &trueVar,
|
||||
FieldPaths: []string{"metadata.name"},
|
||||
},
|
||||
{
|
||||
Name: "image",
|
||||
Required: &falseVar,
|
||||
FieldPaths: []string{"spec.containers[0].image"},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
logf.Log.Info("Creating component", "Name", comp.Name, "Namespace", comp.Namespace)
|
||||
Expect(k8sClient.Create(ctx, &comp)).Should(BeNil())
|
||||
|
||||
By("check component successfully created")
|
||||
Eventually(
|
||||
func() error {
|
||||
return k8sClient.Get(ctx, client.ObjectKey{Name: componentName, Namespace: comp.Namespace}, &comp)
|
||||
},
|
||||
time.Second*5, time.Millisecond*100).Should(BeNil())
|
||||
|
||||
// Create application configuration
|
||||
workloadInstanceName1 := "example-appconfig-healthscope-a"
|
||||
workloadInstanceName2 := "example-appconfig-healthscope-b"
|
||||
imageName := "wordpress:php7.2"
|
||||
appConfig := v1alpha2.ApplicationConfiguration{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "example-appconfig",
|
||||
Namespace: namespace,
|
||||
Labels: label,
|
||||
},
|
||||
Spec: v1alpha2.ApplicationConfigurationSpec{
|
||||
Components: []v1alpha2.ApplicationConfigurationComponent{
|
||||
{
|
||||
ComponentName: componentName,
|
||||
ParameterValues: []v1alpha2.ComponentParameterValue{
|
||||
{
|
||||
Name: "instance-name",
|
||||
Value: intstr.IntOrString{StrVal: workloadInstanceName1, Type: intstr.String},
|
||||
},
|
||||
{
|
||||
Name: "image",
|
||||
Value: intstr.IntOrString{StrVal: imageName, Type: intstr.String},
|
||||
},
|
||||
},
|
||||
Scopes: []v1alpha2.ComponentScope{
|
||||
{
|
||||
ScopeReference: corev1.ObjectReference{
|
||||
APIVersion: "core.oam.dev/v1alpha2",
|
||||
Kind: v1alpha2.HealthScopeGroupVersionKind.Kind,
|
||||
Name: healthScopeName,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
ComponentName: componentName,
|
||||
ParameterValues: []v1alpha2.ComponentParameterValue{
|
||||
{
|
||||
Name: "instance-name",
|
||||
Value: intstr.IntOrString{StrVal: workloadInstanceName2, Type: intstr.String},
|
||||
},
|
||||
{
|
||||
Name: "image",
|
||||
Value: intstr.IntOrString{StrVal: imageName, Type: intstr.String},
|
||||
},
|
||||
},
|
||||
Scopes: []v1alpha2.ComponentScope{
|
||||
{
|
||||
ScopeReference: corev1.ObjectReference{
|
||||
APIVersion: "core.oam.dev/v1alpha2",
|
||||
Kind: v1alpha2.HealthScopeGroupVersionKind.Kind,
|
||||
Name: healthScopeName,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
logf.Log.Info("Creating application config", "Name", appConfig.Name, "Namespace", appConfig.Namespace)
|
||||
Expect(k8sClient.Create(ctx, &appConfig)).Should(BeNil())
|
||||
// Verification
|
||||
By("Checking deployment-a is created")
|
||||
objectKey := client.ObjectKey{
|
||||
Name: workloadInstanceName1,
|
||||
Namespace: namespace,
|
||||
}
|
||||
deploy := &appsv1.Deployment{}
|
||||
logf.Log.Info("Checking on deployment", "Key", objectKey)
|
||||
Eventually(
|
||||
func() error {
|
||||
return k8sClient.Get(ctx, objectKey, deploy)
|
||||
},
|
||||
time.Second*15, time.Millisecond*500).Should(BeNil())
|
||||
|
||||
// Verify all components declared in AppConfig are created
|
||||
By("Checking deployment-b is created")
|
||||
objectKey2 := client.ObjectKey{
|
||||
Name: workloadInstanceName2,
|
||||
Namespace: namespace,
|
||||
}
|
||||
deploy2 := &appsv1.Deployment{}
|
||||
logf.Log.Info("Checking on deployment", "Key", objectKey2)
|
||||
Eventually(
|
||||
func() error {
|
||||
return k8sClient.Get(ctx, objectKey2, deploy2)
|
||||
},
|
||||
time.Second*15, time.Millisecond*500).Should(BeNil())
|
||||
|
||||
By("Verify that the parameter substitute works")
|
||||
Expect(deploy.Spec.Template.Spec.Containers[0].Image).Should(Equal(imageName))
|
||||
|
||||
healthScopeObject := client.ObjectKey{
|
||||
Name: healthScopeName,
|
||||
Namespace: namespace,
|
||||
}
|
||||
healthScope := &v1alpha2.HealthScope{}
|
||||
By("Verify health scope")
|
||||
Eventually(
|
||||
func() v1alpha2.ScopeHealthCondition {
|
||||
RequestReconcileNow(ctx, &appConfig)
|
||||
*healthScope = v1alpha2.HealthScope{}
|
||||
k8sClient.Get(ctx, healthScopeObject, healthScope)
|
||||
logf.Log.Info("Checking on health scope",
|
||||
"len(WorkloadReferences)",
|
||||
len(healthScope.Spec.WorkloadReferences),
|
||||
"health",
|
||||
healthScope.Status.ScopeHealthCondition)
|
||||
return healthScope.Status.ScopeHealthCondition
|
||||
},
|
||||
time.Second*150, time.Second*5).Should(Equal(v1alpha2.ScopeHealthCondition{
|
||||
HealthStatus: v1alpha2.StatusHealthy,
|
||||
Total: int64(2),
|
||||
HealthyWorkloads: int64(2),
|
||||
}))
|
||||
})
|
||||
|
||||
It("Test an application with health policy", func() {
|
||||
By("Apply a healthy application")
|
||||
var newApp v1beta1.Application
|
||||
|
||||
@@ -1,244 +0,0 @@
|
||||
/*
|
||||
Copyright 2021 The KubeVela Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package controllers_test
|
||||
|
||||
import (
|
||||
"context"
|
||||
"time"
|
||||
|
||||
. "github.com/onsi/ginkgo"
|
||||
. "github.com/onsi/gomega"
|
||||
|
||||
appsv1 "k8s.io/api/apps/v1"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/util/intstr"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
logf "sigs.k8s.io/controller-runtime/pkg/log"
|
||||
|
||||
"github.com/oam-dev/kubevela/apis/core.oam.dev/common"
|
||||
"github.com/oam-dev/kubevela/apis/core.oam.dev/v1alpha2"
|
||||
"github.com/oam-dev/kubevela/pkg/oam/util"
|
||||
)
|
||||
|
||||
var _ = Describe("Test kubernetes native workloads", func() {
|
||||
ctx := context.Background()
|
||||
namespace := "kubernetes-workload-test"
|
||||
falseVar := false
|
||||
ns := corev1.Namespace{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: namespace,
|
||||
},
|
||||
}
|
||||
BeforeEach(func() {
|
||||
logf.Log.Info("Start to run a test, clean up previous resources")
|
||||
// delete the namespace with all its resources
|
||||
Expect(k8sClient.Delete(ctx, &ns, client.PropagationPolicy(metav1.DeletePropagationForeground))).
|
||||
Should(SatisfyAny(BeNil(), &util.NotFoundMatcher{}))
|
||||
logf.Log.Info("make sure all the resources are removed")
|
||||
objectKey := client.ObjectKey{
|
||||
Name: namespace,
|
||||
}
|
||||
res := &corev1.Namespace{}
|
||||
Eventually(
|
||||
// gomega has a bug that can't take nil as the actual input, so has to make it a func
|
||||
func() error {
|
||||
return k8sClient.Get(ctx, objectKey, res)
|
||||
},
|
||||
time.Second*120, time.Millisecond*500).Should(&util.NotFoundMatcher{})
|
||||
// recreate it
|
||||
Eventually(
|
||||
func() error {
|
||||
return k8sClient.Create(ctx, &ns)
|
||||
},
|
||||
time.Second*3, time.Millisecond*300).Should(SatisfyAny(BeNil(), &util.AlreadyExistMatcher{}))
|
||||
|
||||
})
|
||||
AfterEach(func() {
|
||||
logf.Log.Info("Clean up resources")
|
||||
// delete the namespace with all its resources
|
||||
Expect(k8sClient.Delete(ctx, &ns, client.PropagationPolicy(metav1.DeletePropagationForeground))).Should(BeNil())
|
||||
})
|
||||
|
||||
It("use deployment workload", func() {
|
||||
label := map[string]string{"workload": "deployment"}
|
||||
// create a workload definition for
|
||||
wd := v1alpha2.WorkloadDefinition{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "deployments.apps",
|
||||
Namespace: "oam-runtime-system",
|
||||
Labels: label,
|
||||
},
|
||||
Spec: v1alpha2.WorkloadDefinitionSpec{
|
||||
Reference: common.DefinitionReference{
|
||||
Name: "deployments.apps",
|
||||
},
|
||||
},
|
||||
}
|
||||
logf.Log.Info("Creating workload definition for deployment")
|
||||
// For some reason, WorkloadDefinition is created as a Cluster scope object
|
||||
Expect(k8sClient.Create(ctx, wd.DeepCopy())).Should(SatisfyAny(BeNil(), &util.AlreadyExistMatcher{}))
|
||||
// create a workload CR
|
||||
workloadName := "example-deployment-workload"
|
||||
wl := appsv1.Deployment{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: namespace,
|
||||
Name: workloadName,
|
||||
},
|
||||
Spec: appsv1.DeploymentSpec{
|
||||
Selector: &metav1.LabelSelector{
|
||||
MatchLabels: label,
|
||||
},
|
||||
Template: corev1.PodTemplateSpec{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: namespace,
|
||||
Labels: label,
|
||||
},
|
||||
Spec: corev1.PodSpec{
|
||||
Containers: []corev1.Container{
|
||||
{
|
||||
Name: "wordpress",
|
||||
Image: "wordpress:4.6.1-apache",
|
||||
Ports: []corev1.ContainerPort{
|
||||
{
|
||||
Name: "wordpress",
|
||||
HostPort: 80,
|
||||
ContainerPort: 8080,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
// reflect workload gvk from scheme
|
||||
gvks, _, _ := scheme.ObjectKinds(&wl)
|
||||
wl.APIVersion = gvks[0].GroupVersion().String()
|
||||
wl.Kind = gvks[0].Kind
|
||||
// Create a component definition
|
||||
componentName := "example-deployment-workload"
|
||||
comp := v1alpha2.Component{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: componentName,
|
||||
Namespace: namespace,
|
||||
Labels: label,
|
||||
},
|
||||
Spec: v1alpha2.ComponentSpec{
|
||||
Workload: runtime.RawExtension{
|
||||
Object: &wl,
|
||||
},
|
||||
Parameters: []v1alpha2.ComponentParameter{
|
||||
{
|
||||
Name: "image",
|
||||
Required: &falseVar,
|
||||
FieldPaths: []string{"spec.template.spec.containers[0].image"},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
logf.Log.Info("Creating component", "Name", comp.Name, "Namespace", comp.Namespace)
|
||||
|
||||
Eventually(func() error {
|
||||
return k8sClient.Create(ctx, &comp)
|
||||
}, 30*time.Second, 300*time.Microsecond).Should(SatisfyAny(BeNil(), util.AlreadyExistMatcher{}))
|
||||
|
||||
By("Check component created as expected")
|
||||
Eventually(
|
||||
func() error {
|
||||
return k8sClient.Get(ctx, client.ObjectKey{
|
||||
Namespace: namespace,
|
||||
Name: componentName,
|
||||
}, &v1alpha2.Component{})
|
||||
},
|
||||
time.Second*5, time.Millisecond*100).Should(BeNil())
|
||||
|
||||
// Create a manualscaler trait CR
|
||||
var replica int32 = 5
|
||||
mts := v1alpha2.ManualScalerTrait{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: namespace,
|
||||
Name: "sample-manualscaler-trait",
|
||||
Labels: label,
|
||||
},
|
||||
Spec: v1alpha2.ManualScalerTraitSpec{
|
||||
ReplicaCount: replica,
|
||||
},
|
||||
}
|
||||
// reflect trait gvk from scheme
|
||||
gvks, _, _ = scheme.ObjectKinds(&mts)
|
||||
mts.APIVersion = gvks[0].GroupVersion().String()
|
||||
mts.Kind = gvks[0].Kind
|
||||
// Create application configuration
|
||||
imageName := "wordpress:php7.2"
|
||||
appConfig := v1alpha2.ApplicationConfiguration{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "example-appconfig",
|
||||
Namespace: namespace,
|
||||
Labels: label,
|
||||
},
|
||||
Spec: v1alpha2.ApplicationConfigurationSpec{
|
||||
Components: []v1alpha2.ApplicationConfigurationComponent{
|
||||
{
|
||||
ComponentName: componentName,
|
||||
ParameterValues: []v1alpha2.ComponentParameterValue{
|
||||
{
|
||||
Name: "image",
|
||||
Value: intstr.IntOrString{StrVal: imageName, Type: intstr.String},
|
||||
},
|
||||
},
|
||||
Traits: []v1alpha2.ComponentTrait{
|
||||
{
|
||||
Trait: runtime.RawExtension{
|
||||
Object: &mts,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
logf.Log.Info("Creating application config", "Name", appConfig.Name, "Namespace", appConfig.Namespace)
|
||||
Expect(k8sClient.Create(ctx, &appConfig)).Should(BeNil())
|
||||
// Verification
|
||||
By("Checking deployment is created")
|
||||
objectKey := client.ObjectKey{
|
||||
Name: workloadName,
|
||||
Namespace: namespace,
|
||||
}
|
||||
deploy := &appsv1.Deployment{}
|
||||
logf.Log.Info("Checking on deployment", "Key", objectKey)
|
||||
Eventually(
|
||||
func() error {
|
||||
return k8sClient.Get(ctx, objectKey, deploy)
|
||||
},
|
||||
time.Second*15, time.Millisecond*500).Should(BeNil())
|
||||
|
||||
By("Verify that the parameter substitute works")
|
||||
Expect(deploy.Spec.Template.Spec.Containers[0].Image).Should(Equal(imageName))
|
||||
|
||||
By("Verify deployment scaled according to the manualScaler trait")
|
||||
Eventually(
|
||||
func() int32 {
|
||||
k8sClient.Get(ctx, objectKey, deploy)
|
||||
return deploy.Status.Replicas
|
||||
},
|
||||
time.Second*60, time.Second*5).Should(BeEquivalentTo(replica))
|
||||
Expect(*deploy.Spec.Replicas).Should(BeEquivalentTo(replica))
|
||||
})
|
||||
})
|
||||
@@ -1,754 +0,0 @@
|
||||
/*
|
||||
Copyright 2021 The KubeVela Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package controllers_test
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
. "github.com/onsi/ginkgo"
|
||||
. "github.com/onsi/gomega"
|
||||
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
corev1beta1 "k8s.io/api/networking/v1beta1"
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/utils/pointer"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
|
||||
kruise "github.com/openkruise/kruise-api/apps/v1alpha1"
|
||||
|
||||
oamcomm "github.com/oam-dev/kubevela/apis/core.oam.dev/common"
|
||||
"github.com/oam-dev/kubevela/apis/core.oam.dev/v1beta1"
|
||||
oamstd "github.com/oam-dev/kubevela/apis/standard.oam.dev/v1alpha1"
|
||||
"github.com/oam-dev/kubevela/pkg/controller/utils"
|
||||
"github.com/oam-dev/kubevela/pkg/oam"
|
||||
"github.com/oam-dev/kubevela/pkg/oam/util"
|
||||
"github.com/oam-dev/kubevela/pkg/utils/common"
|
||||
)
|
||||
|
||||
var _ = PDescribe("rollout related e2e-test,Cloneset based rollout tests", func() {
|
||||
ctx := context.Background()
|
||||
var namespaceName, appRolloutName string
|
||||
var ns corev1.Namespace
|
||||
var app v1beta1.Application
|
||||
var kc kruise.CloneSet
|
||||
var appRollout v1beta1.AppRollout
|
||||
|
||||
createNamespace := func() {
|
||||
ns = corev1.Namespace{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: namespaceName,
|
||||
},
|
||||
}
|
||||
// delete the namespaceName with all its resources
|
||||
Eventually(
|
||||
func() error {
|
||||
return k8sClient.Delete(ctx, &ns, client.PropagationPolicy(metav1.DeletePropagationForeground))
|
||||
},
|
||||
time.Second*120, time.Millisecond*500).Should(SatisfyAny(BeNil(), &util.NotFoundMatcher{}))
|
||||
By("make sure all the resources are removed")
|
||||
objectKey := client.ObjectKey{
|
||||
Name: namespaceName,
|
||||
}
|
||||
res := &corev1.Namespace{}
|
||||
Eventually(
|
||||
func() error {
|
||||
return k8sClient.Get(ctx, objectKey, res)
|
||||
},
|
||||
time.Second*120, time.Millisecond*500).Should(&util.NotFoundMatcher{})
|
||||
Eventually(
|
||||
func() error {
|
||||
return k8sClient.Create(ctx, &ns)
|
||||
},
|
||||
time.Second*3, time.Millisecond*300).Should(SatisfyAny(BeNil(), &util.AlreadyExistMatcher{}))
|
||||
}
|
||||
|
||||
CreateClonesetDef := func() {
|
||||
By("Install CloneSet based componentDefinition")
|
||||
var cd v1beta1.ComponentDefinition
|
||||
Expect(common.ReadYamlToObject("testdata/rollout/cloneset/clonesetDefinition.yaml", &cd)).Should(BeNil())
|
||||
// create the componentDefinition if not exist
|
||||
Eventually(
|
||||
func() error {
|
||||
return k8sClient.Create(ctx, &cd)
|
||||
},
|
||||
time.Second*3, time.Millisecond*300).Should(SatisfyAny(BeNil(), &util.AlreadyExistMatcher{}))
|
||||
}
|
||||
|
||||
CreateIngressDef := func() {
|
||||
By("Install Ingress trait definition")
|
||||
var td v1beta1.TraitDefinition
|
||||
Expect(common.ReadYamlToObject("testdata/rollout/cloneset/ingressDefinition.yaml", &td)).Should(BeNil())
|
||||
// create the traitDefinition if not exist
|
||||
Eventually(
|
||||
func() error {
|
||||
return k8sClient.Create(ctx, &td)
|
||||
},
|
||||
time.Second*3, time.Millisecond*300).Should(SatisfyAny(BeNil(), &util.AlreadyExistMatcher{}))
|
||||
}
|
||||
|
||||
applySourceApp := func(source string) {
|
||||
By("Apply an application")
|
||||
var newApp v1beta1.Application
|
||||
Expect(common.ReadYamlToObject("testdata/rollout/cloneset/"+source, &newApp)).Should(BeNil())
|
||||
newApp.Namespace = namespaceName
|
||||
Eventually(func() error {
|
||||
return k8sClient.Create(ctx, &newApp)
|
||||
}, 10*time.Second, 500*time.Millisecond).Should(Succeed())
|
||||
|
||||
By("Get Application latest status")
|
||||
Eventually(
|
||||
func() *oamcomm.Revision {
|
||||
app = v1beta1.Application{}
|
||||
k8sClient.Get(ctx, client.ObjectKey{Namespace: namespaceName, Name: newApp.Name}, &app)
|
||||
if app.Status.LatestRevision != nil {
|
||||
return app.Status.LatestRevision
|
||||
}
|
||||
return nil
|
||||
},
|
||||
time.Second*30, time.Millisecond*500).ShouldNot(BeNil())
|
||||
}
|
||||
|
||||
updateApp := func(target string) {
|
||||
By("Update the application to target spec")
|
||||
var targetApp v1beta1.Application
|
||||
Expect(common.ReadYamlToObject("testdata/rollout/cloneset/"+target, &targetApp)).Should(BeNil())
|
||||
|
||||
Eventually(
|
||||
func() error {
|
||||
app = v1beta1.Application{}
|
||||
err := k8sClient.Get(ctx, client.ObjectKey{Namespace: namespaceName, Name: targetApp.Name}, &app)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if app.Status.Phase != oamcomm.ApplicationRunning {
|
||||
return fmt.Errorf("application is still last generating apprev ")
|
||||
}
|
||||
app.Spec = targetApp.DeepCopy().Spec
|
||||
return k8sClient.Update(ctx, app.DeepCopy())
|
||||
}, time.Second*15, time.Millisecond*500).Should(Succeed())
|
||||
|
||||
By("Get Application Revision created with more than one")
|
||||
Eventually(
|
||||
func() error {
|
||||
var appRevList = &v1beta1.ApplicationRevisionList{}
|
||||
err := k8sClient.List(ctx, appRevList, client.InNamespace(namespaceName),
|
||||
client.MatchingLabels(map[string]string{oam.LabelAppName: targetApp.Name}))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if len(appRevList.Items) < 2 {
|
||||
return fmt.Errorf("appRevision number mismatch acctually %d", len(appRevList.Items))
|
||||
}
|
||||
return nil
|
||||
},
|
||||
time.Second*30, time.Millisecond*300).Should(BeNil())
|
||||
}
|
||||
|
||||
createAppRolling := func(newAppRollout *v1beta1.AppRollout) {
|
||||
By(fmt.Sprintf("Apply an application rollout %s", newAppRollout.Name))
|
||||
Eventually(
|
||||
func() error {
|
||||
return k8sClient.Create(ctx, newAppRollout)
|
||||
}, time.Second*5, time.Millisecond*100).Should(Succeed())
|
||||
}
|
||||
|
||||
verifyRolloutOwnsCloneset := func() {
|
||||
By("Verify that rollout controller owns the cloneset")
|
||||
clonesetName := appRollout.Spec.ComponentList[0]
|
||||
Eventually(
|
||||
func() string {
|
||||
k8sClient.Get(ctx, client.ObjectKey{Namespace: namespaceName, Name: clonesetName}, &kc)
|
||||
clonesetOwner := metav1.GetControllerOf(&kc)
|
||||
if clonesetOwner == nil {
|
||||
return ""
|
||||
}
|
||||
return clonesetOwner.Kind
|
||||
}, time.Second*10, time.Millisecond*100).Should(BeEquivalentTo(v1beta1.AppRolloutKind))
|
||||
clonesetOwner := metav1.GetControllerOf(&kc)
|
||||
Expect(clonesetOwner.APIVersion).Should(BeEquivalentTo(v1beta1.SchemeGroupVersion.String()))
|
||||
}
|
||||
|
||||
verifyRolloutDeleted := func() {
|
||||
By("Wait for the rollout delete")
|
||||
Eventually(
|
||||
func() bool {
|
||||
err := k8sClient.Get(ctx, client.ObjectKey{Namespace: namespaceName, Name: appRolloutName}, &appRollout)
|
||||
return apierrors.IsNotFound(err)
|
||||
},
|
||||
time.Second*60, time.Millisecond*500).Should(BeTrue())
|
||||
}
|
||||
|
||||
verifyRolloutSucceeded := func(targetAppName string) {
|
||||
By(fmt.Sprintf("Wait for the rollout `%s` to succeed", targetAppName))
|
||||
Eventually(
|
||||
func() oamstd.RollingState {
|
||||
appRollout = v1beta1.AppRollout{}
|
||||
k8sClient.Get(ctx, client.ObjectKey{Namespace: namespaceName, Name: appRolloutName}, &appRollout)
|
||||
return appRollout.Status.RollingState
|
||||
},
|
||||
time.Second*120, time.Second).Should(Equal(oamstd.RolloutSucceedState))
|
||||
Expect(appRollout.Status.UpgradedReadyReplicas).Should(BeEquivalentTo(appRollout.Status.RolloutTargetSize))
|
||||
Expect(appRollout.Status.UpgradedReplicas).Should(BeEquivalentTo(appRollout.Status.RolloutTargetSize))
|
||||
clonesetName := appRollout.Spec.ComponentList[0]
|
||||
|
||||
By("Wait for resourceTracker to resume the control of cloneset")
|
||||
|
||||
Eventually(
|
||||
func() error {
|
||||
kc = kruise.CloneSet{}
|
||||
err := k8sClient.Get(ctx, client.ObjectKey{Namespace: namespaceName, Name: clonesetName}, &kc)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if kc.Status.UpdatedReplicas != *kc.Spec.Replicas {
|
||||
return fmt.Errorf("expect cloneset updated replicas %d, but got %d",
|
||||
kc.Status.UpdatedReplicas, *kc.Spec.Replicas)
|
||||
}
|
||||
return nil
|
||||
},
|
||||
time.Second*60, time.Millisecond*500).Should(BeNil())
|
||||
// make sure all pods are upgraded
|
||||
image := kc.Spec.Template.Spec.Containers[0].Image
|
||||
podList := corev1.PodList{}
|
||||
Eventually(func() error {
|
||||
if err := k8sClient.List(ctx, &podList, client.MatchingLabels(kc.Spec.Template.Labels),
|
||||
client.InNamespace(namespaceName)); err != nil {
|
||||
return err
|
||||
}
|
||||
if len(podList.Items) != int(*kc.Spec.Replicas) {
|
||||
return fmt.Errorf("expect pod numbers %q, got %q", int(*kc.Spec.Replicas), len(podList.Items))
|
||||
}
|
||||
for _, pod := range podList.Items {
|
||||
gotImage := pod.Spec.Containers[0].Image
|
||||
if gotImage != image {
|
||||
return fmt.Errorf("expect pod container image %q, got %q", image, gotImage)
|
||||
}
|
||||
if pod.Status.Phase != corev1.PodRunning {
|
||||
return fmt.Errorf("expect pod phase %q, got %q", corev1.PodRunning, pod.Status.Phase)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}, 60*time.Second, 500*time.Millisecond).Should(Succeed())
|
||||
}
|
||||
|
||||
verifyIngress := func(domain string) {
|
||||
ingress := &corev1beta1.Ingress{}
|
||||
Eventually(func() error {
|
||||
var err error
|
||||
if err = k8sClient.Get(ctx, types.NamespacedName{Namespace: namespaceName, Name: appRollout.Spec.ComponentList[0]}, ingress); err != nil {
|
||||
return err
|
||||
}
|
||||
if ingress.Spec.Rules[0].Host != domain {
|
||||
return fmt.Errorf("domain mismatch wants %s actually %s", domain, ingress.Spec.Rules[0].Host)
|
||||
}
|
||||
return nil
|
||||
}, time.Second*30, time.Microsecond*300).Should(BeNil())
|
||||
}
|
||||
|
||||
applyTwoAppVersion := func() {
|
||||
CreateClonesetDef()
|
||||
applySourceApp("app-source.yaml")
|
||||
updateApp("app-target.yaml")
|
||||
}
|
||||
|
||||
initialScale := func() {
|
||||
By("Apply the application scale to deploy the source")
|
||||
var newAppRollout v1beta1.AppRollout
|
||||
Expect(common.ReadYamlToObject("testdata/rollout/cloneset/appRollout.yaml", &newAppRollout)).Should(BeNil())
|
||||
newAppRollout.Namespace = namespaceName
|
||||
newAppRollout.Spec.SourceAppRevisionName = ""
|
||||
newAppRollout.Spec.TargetAppRevisionName = utils.ConstructRevisionName(app.GetName(), 1)
|
||||
newAppRollout.Spec.RolloutPlan.TargetSize = pointer.Int32Ptr(5)
|
||||
createAppRolling(&newAppRollout)
|
||||
appRolloutName = newAppRollout.Name
|
||||
verifyRolloutSucceeded(newAppRollout.Spec.TargetAppRevisionName)
|
||||
}
|
||||
|
||||
rollForwardToSource := func() {
|
||||
By("Revert the application back to source")
|
||||
updateApp("app-source.yaml")
|
||||
|
||||
By("Modify the application rollout with new target and source")
|
||||
Eventually(
|
||||
func() error {
|
||||
k8sClient.Get(ctx, client.ObjectKey{Namespace: namespaceName, Name: appRollout.Name}, &appRollout)
|
||||
appRollout.Spec.SourceAppRevisionName = utils.ConstructRevisionName(app.GetName(), 2)
|
||||
appRollout.Spec.TargetAppRevisionName = utils.ConstructRevisionName(app.GetName(), 1)
|
||||
appRollout.Spec.RolloutPlan.BatchPartition = nil
|
||||
return k8sClient.Update(ctx, &appRollout)
|
||||
},
|
||||
time.Second*15, time.Millisecond*500).Should(Succeed())
|
||||
|
||||
By("Wait for the rollout phase change to rolling in batches")
|
||||
Eventually(
|
||||
func() oamstd.RollingState {
|
||||
k8sClient.Get(ctx, client.ObjectKey{Namespace: namespaceName, Name: appRollout.Name}, &appRollout)
|
||||
return appRollout.Status.RollingState
|
||||
},
|
||||
time.Second*10, time.Millisecond*10).Should(BeEquivalentTo(oamstd.RollingInBatchesState))
|
||||
|
||||
verifyRolloutSucceeded(appRollout.Spec.TargetAppRevisionName)
|
||||
}
|
||||
|
||||
BeforeEach(func() {
|
||||
By("Start to run a test, clean up previous resources")
|
||||
namespaceName = randomNamespaceName("rolling-e2e-test")
|
||||
createNamespace()
|
||||
})
|
||||
|
||||
AfterEach(func() {
|
||||
By("Clean up resources after a test")
|
||||
Eventually(func() error {
|
||||
err := k8sClient.Delete(ctx, &app)
|
||||
if err == nil || apierrors.IsNotFound(err) {
|
||||
return nil
|
||||
}
|
||||
return err
|
||||
}, 15*time.Second, 300*time.Microsecond).Should(BeNil())
|
||||
Eventually(func() error {
|
||||
err := k8sClient.Delete(ctx, &appRollout)
|
||||
if err == nil || apierrors.IsNotFound(err) {
|
||||
return nil
|
||||
}
|
||||
return err
|
||||
}, 15*time.Second, 300*time.Microsecond).Should(BeNil())
|
||||
verifyRolloutDeleted()
|
||||
By(fmt.Sprintf("Delete the entire namespaceName %s", ns.Name))
|
||||
// delete the namespaceName with all its resources
|
||||
Expect(k8sClient.Delete(ctx, &ns, client.PropagationPolicy(metav1.DeletePropagationBackground))).Should(BeNil())
|
||||
})
|
||||
|
||||
It("Test cloneset basic scale", func() {
|
||||
CreateClonesetDef()
|
||||
applySourceApp("app-no-replica.yaml")
|
||||
By("Apply the application rollout go directly to the target")
|
||||
Expect(common.ReadYamlToObject("testdata/rollout/cloneset/appRollout.yaml", &appRollout)).Should(BeNil())
|
||||
appRollout.Namespace = namespaceName
|
||||
appRollout.Spec.SourceAppRevisionName = ""
|
||||
appRollout.Spec.TargetAppRevisionName = utils.ConstructRevisionName(app.GetName(), 1)
|
||||
appRollout.Spec.RolloutPlan.TargetSize = pointer.Int32Ptr(7)
|
||||
appRollout.Spec.RolloutPlan.BatchPartition = nil
|
||||
createAppRolling(&appRollout)
|
||||
appRolloutName = appRollout.Name
|
||||
verifyRolloutSucceeded(appRollout.Spec.TargetAppRevisionName)
|
||||
})
|
||||
|
||||
It("Test cloneset rollout with a manual check", func() {
|
||||
applyTwoAppVersion()
|
||||
// scale to v1
|
||||
initialScale()
|
||||
By("Apply the application rollout that stops after the first batch")
|
||||
batchPartition := 0
|
||||
Eventually(
|
||||
func() error {
|
||||
k8sClient.Get(ctx, client.ObjectKey{Namespace: namespaceName, Name: appRollout.Name}, &appRollout)
|
||||
appRollout.Spec.SourceAppRevisionName = utils.ConstructRevisionName(app.GetName(), 1)
|
||||
appRollout.Spec.TargetAppRevisionName = utils.ConstructRevisionName(app.GetName(), 2)
|
||||
appRollout.Spec.RolloutPlan.BatchPartition = pointer.Int32Ptr(int32(batchPartition))
|
||||
return k8sClient.Update(ctx, &appRollout)
|
||||
}, time.Second*15, time.Millisecond*500).Should(Succeed())
|
||||
|
||||
By("Wait for the rollout phase change to rolling in batches")
|
||||
Eventually(
|
||||
func() oamstd.RollingState {
|
||||
k8sClient.Get(ctx, client.ObjectKey{Namespace: namespaceName, Name: appRolloutName}, &appRollout)
|
||||
return appRollout.Status.RollingState
|
||||
},
|
||||
time.Second*60, time.Millisecond*500).Should(BeEquivalentTo(oamstd.RollingInBatchesState))
|
||||
|
||||
By("Wait for rollout to finish one batch")
|
||||
Eventually(
|
||||
func() int32 {
|
||||
k8sClient.Get(ctx, client.ObjectKey{Namespace: namespaceName, Name: appRollout.Name}, &appRollout)
|
||||
return appRollout.Status.CurrentBatch
|
||||
},
|
||||
time.Second*15, time.Millisecond*500).Should(BeEquivalentTo(batchPartition))
|
||||
|
||||
By("Verify that the rollout stops at the first batch")
|
||||
// wait for the batch to be ready
|
||||
Eventually(
|
||||
func() oamstd.BatchRollingState {
|
||||
k8sClient.Get(ctx, client.ObjectKey{Namespace: namespaceName, Name: appRollout.Name}, &appRollout)
|
||||
return appRollout.Status.BatchRollingState
|
||||
},
|
||||
time.Second*30, time.Millisecond*500).Should(Equal(oamstd.BatchReadyState))
|
||||
// wait for 15 seconds, it should stop at 1
|
||||
time.Sleep(15 * time.Second)
|
||||
k8sClient.Get(ctx, client.ObjectKey{Namespace: namespaceName, Name: appRollout.Name}, &appRollout)
|
||||
Expect(appRollout.Status.RollingState).Should(BeEquivalentTo(oamstd.RollingInBatchesState))
|
||||
Expect(appRollout.Status.BatchRollingState).Should(BeEquivalentTo(oamstd.BatchReadyState))
|
||||
Expect(appRollout.Status.CurrentBatch).Should(BeEquivalentTo(batchPartition))
|
||||
|
||||
verifyRolloutOwnsCloneset()
|
||||
|
||||
By("Finish the application rollout")
|
||||
// set the partition as the same size as the array
|
||||
appRollout.Spec.RolloutPlan.BatchPartition = pointer.Int32Ptr(int32(len(appRollout.Spec.RolloutPlan.
|
||||
RolloutBatches) - 1))
|
||||
Expect(k8sClient.Update(ctx, &appRollout)).Should(Succeed())
|
||||
verifyRolloutSucceeded(appRollout.Spec.TargetAppRevisionName)
|
||||
})
|
||||
|
||||
It("Test pause and modify rollout plan after rolling succeeded", func() {
|
||||
CreateClonesetDef()
|
||||
applySourceApp("app-no-replica.yaml")
|
||||
By("Apply the application rollout go directly to the target")
|
||||
var newAppRollout v1beta1.AppRollout
|
||||
Expect(common.ReadYamlToObject("testdata/rollout/cloneset/appRollout.yaml", &newAppRollout)).Should(BeNil())
|
||||
newAppRollout.Namespace = namespaceName
|
||||
newAppRollout.Spec.SourceAppRevisionName = ""
|
||||
newAppRollout.Spec.TargetAppRevisionName = utils.ConstructRevisionName(app.GetName(), 1)
|
||||
newAppRollout.Spec.RolloutPlan.TargetSize = pointer.Int32Ptr(10)
|
||||
newAppRollout.Spec.RolloutPlan.BatchPartition = nil
|
||||
createAppRolling(&newAppRollout)
|
||||
appRolloutName = newAppRollout.Name
|
||||
By("Wait for the rollout phase change to rollingInBatches")
|
||||
Eventually(
|
||||
func() oamstd.RollingState {
|
||||
appRollout = v1beta1.AppRollout{}
|
||||
k8sClient.Get(ctx, client.ObjectKey{Namespace: namespaceName, Name: appRolloutName}, &appRollout)
|
||||
return appRollout.Status.RollingState
|
||||
},
|
||||
time.Second*10, time.Millisecond).Should(BeEquivalentTo(oamstd.RollingInBatchesState))
|
||||
|
||||
By("Pause the rollout")
|
||||
Eventually(
|
||||
func() error {
|
||||
k8sClient.Get(ctx, client.ObjectKey{Namespace: namespaceName, Name: appRollout.Name}, &appRollout)
|
||||
appRollout.Spec.RolloutPlan.Paused = true
|
||||
err := k8sClient.Update(ctx, &appRollout)
|
||||
return err
|
||||
},
|
||||
time.Second*15, time.Millisecond*500).Should(Succeed())
|
||||
By("Verify that the rollout pauses")
|
||||
Eventually(
|
||||
func() corev1.ConditionStatus {
|
||||
k8sClient.Get(ctx, client.ObjectKey{Namespace: namespaceName, Name: appRollout.Name}, &appRollout)
|
||||
return appRollout.Status.GetCondition(oamstd.BatchPaused).Status
|
||||
},
|
||||
time.Second*30, time.Millisecond*500).Should(Equal(corev1.ConditionTrue))
|
||||
|
||||
preBatch := appRollout.Status.CurrentBatch
|
||||
// wait for 15 seconds, the batch should not move
|
||||
time.Sleep(15 * time.Second)
|
||||
k8sClient.Get(ctx, client.ObjectKey{Namespace: namespaceName, Name: appRollout.Name}, &appRollout)
|
||||
Expect(appRollout.Status.RollingState).Should(BeEquivalentTo(oamstd.RollingInBatchesState))
|
||||
Expect(appRollout.Status.CurrentBatch).Should(BeEquivalentTo(preBatch))
|
||||
k8sClient.Get(ctx, client.ObjectKey{Namespace: namespaceName, Name: appRollout.Name}, &appRollout)
|
||||
lt := appRollout.Status.GetCondition(oamstd.BatchPaused).LastTransitionTime
|
||||
beforeSleep := metav1.Time{
|
||||
Time: time.Now().Add(-15 * time.Second),
|
||||
}
|
||||
Expect((<).Before(&beforeSleep)).Should(BeTrue())
|
||||
|
||||
verifyRolloutOwnsCloneset()
|
||||
|
||||
By("Finish the application rollout")
|
||||
// remove the batch restriction
|
||||
appRollout.Spec.RolloutPlan.Paused = false
|
||||
Expect(k8sClient.Update(ctx, &appRollout)).Should(Succeed())
|
||||
|
||||
verifyRolloutSucceeded(appRollout.Spec.TargetAppRevisionName)
|
||||
// record the transition time
|
||||
k8sClient.Get(ctx, client.ObjectKey{Namespace: namespaceName, Name: appRollout.Name}, &appRollout)
|
||||
lt = appRollout.Status.GetCondition(oamstd.RolloutSucceed).LastTransitionTime
|
||||
|
||||
// nothing should happen, the transition time should be the same
|
||||
verifyRolloutSucceeded(appRollout.Spec.TargetAppRevisionName)
|
||||
k8sClient.Get(ctx, client.ObjectKey{Namespace: namespaceName, Name: appRollout.Name}, &appRollout)
|
||||
Expect(appRollout.Status.RollingState).Should(BeEquivalentTo(oamstd.RolloutSucceedState))
|
||||
Expect(appRollout.Status.GetCondition(oamstd.RolloutSucceed).LastTransitionTime).Should(BeEquivalentTo(lt))
|
||||
})
|
||||
|
||||
It("Test rolling forward after a successful rollout", func() {
|
||||
applyTwoAppVersion()
|
||||
// scale to v1
|
||||
initialScale()
|
||||
|
||||
By("Finish the application rollout")
|
||||
Eventually(
|
||||
func() error {
|
||||
k8sClient.Get(ctx, client.ObjectKey{Namespace: namespaceName, Name: appRollout.Name}, &appRollout)
|
||||
appRollout.Spec.SourceAppRevisionName = utils.ConstructRevisionName(app.GetName(), 1)
|
||||
appRollout.Spec.TargetAppRevisionName = utils.ConstructRevisionName(app.GetName(), 2)
|
||||
appRollout.Spec.RolloutPlan.BatchPartition = nil
|
||||
return k8sClient.Update(ctx, &appRollout)
|
||||
}, time.Second*5, time.Millisecond).Should(Succeed())
|
||||
|
||||
By("Wait for the rollout phase change to rolling in batches")
|
||||
Eventually(
|
||||
func() oamstd.RollingState {
|
||||
k8sClient.Get(ctx, client.ObjectKey{Namespace: namespaceName, Name: appRollout.Name}, &appRollout)
|
||||
return appRollout.Status.RollingState
|
||||
},
|
||||
time.Second*10, time.Millisecond*10).Should(BeEquivalentTo(oamstd.RollingInBatchesState))
|
||||
|
||||
verifyRolloutSucceeded(appRollout.Spec.TargetAppRevisionName)
|
||||
rollForwardToSource()
|
||||
})
|
||||
|
||||
It("Test rolling forward in the middle of rollout", func() {
|
||||
applyTwoAppVersion()
|
||||
// scale to v1
|
||||
initialScale()
|
||||
|
||||
By("Finish the application rollout")
|
||||
Eventually(
|
||||
func() error {
|
||||
k8sClient.Get(ctx, client.ObjectKey{Namespace: namespaceName, Name: appRollout.Name}, &appRollout)
|
||||
appRollout.Spec.SourceAppRevisionName = utils.ConstructRevisionName(app.GetName(), 1)
|
||||
appRollout.Spec.TargetAppRevisionName = utils.ConstructRevisionName(app.GetName(), 2)
|
||||
appRollout.Spec.RolloutPlan.BatchPartition = nil
|
||||
return k8sClient.Update(ctx, &appRollout)
|
||||
}, time.Second*15, time.Millisecond*500).Should(Succeed())
|
||||
|
||||
By("Wait for the rollout phase change to rolling in batches")
|
||||
Eventually(
|
||||
func() oamstd.RollingState {
|
||||
k8sClient.Get(ctx, client.ObjectKey{Namespace: namespaceName, Name: appRollout.Name}, &appRollout)
|
||||
return appRollout.Status.RollingState
|
||||
},
|
||||
time.Second*10, time.Millisecond*10).Should(BeEquivalentTo(oamstd.RollingInBatchesState))
|
||||
// revert to source by rolling forward
|
||||
rollForwardToSource()
|
||||
})
|
||||
|
||||
It("Test delete rollout plan should not remove workload", func() {
|
||||
CreateClonesetDef()
|
||||
applyTwoAppVersion()
|
||||
// scale to v1
|
||||
initialScale()
|
||||
|
||||
By("Finish the application rollout")
|
||||
Eventually(
|
||||
func() error {
|
||||
k8sClient.Get(ctx, client.ObjectKey{Namespace: namespaceName, Name: appRollout.Name}, &appRollout)
|
||||
appRollout.Spec.SourceAppRevisionName = utils.ConstructRevisionName(app.GetName(), 1)
|
||||
appRollout.Spec.TargetAppRevisionName = utils.ConstructRevisionName(app.GetName(), 2)
|
||||
appRollout.Spec.RolloutPlan.BatchPartition = nil
|
||||
return k8sClient.Update(ctx, &appRollout)
|
||||
}, time.Second*10, time.Millisecond*500).Should(Succeed())
|
||||
|
||||
By("Wait for the rollout phase change to rolling in batches")
|
||||
Eventually(
|
||||
func() oamstd.RollingState {
|
||||
k8sClient.Get(ctx, client.ObjectKey{Namespace: namespaceName, Name: appRollout.Name}, &appRollout)
|
||||
return appRollout.Status.RollingState
|
||||
},
|
||||
time.Second*10, time.Millisecond*500).Should(BeEquivalentTo(oamstd.RollingInBatchesState))
|
||||
|
||||
verifyRolloutOwnsCloneset()
|
||||
|
||||
By("Remove the application rollout")
|
||||
// remove the rollout
|
||||
Expect(k8sClient.Delete(ctx, &appRollout)).Should(Succeed())
|
||||
verifyRolloutDeleted()
|
||||
// wait for a bit until the application takes back control
|
||||
By("Verify that application does not control the cloneset")
|
||||
clonesetName := appRollout.Spec.ComponentList[0]
|
||||
Eventually(
|
||||
func() bool {
|
||||
k8sClient.Get(ctx, client.ObjectKey{Namespace: namespaceName, Name: clonesetName}, &kc)
|
||||
if metav1.GetControllerOf(&kc) != nil {
|
||||
return false
|
||||
}
|
||||
return kc.Spec.UpdateStrategy.Paused
|
||||
}, time.Second*30, time.Second).Should(BeTrue())
|
||||
})
|
||||
|
||||
It("Test revert the rollout plan in the middle of rollout", func() {
|
||||
CreateClonesetDef()
|
||||
applyTwoAppVersion()
|
||||
// scale to v1
|
||||
initialScale()
|
||||
|
||||
By("Finish the application rollout")
|
||||
Eventually(
|
||||
func() error {
|
||||
k8sClient.Get(ctx, client.ObjectKey{Namespace: namespaceName, Name: appRollout.Name}, &appRollout)
|
||||
appRollout.Spec.SourceAppRevisionName = utils.ConstructRevisionName(app.GetName(), 1)
|
||||
appRollout.Spec.TargetAppRevisionName = utils.ConstructRevisionName(app.GetName(), 2)
|
||||
appRollout.Spec.RolloutPlan.BatchPartition = nil
|
||||
return k8sClient.Update(ctx, &appRollout)
|
||||
}, time.Second*15, time.Millisecond*500).Should(Succeed())
|
||||
|
||||
By("Wait for the rollout phase change to rolling in batches")
|
||||
Eventually(
|
||||
func() oamstd.RollingState {
|
||||
k8sClient.Get(ctx, client.ObjectKey{Namespace: namespaceName, Name: appRollout.Name}, &appRollout)
|
||||
return appRollout.Status.RollingState
|
||||
},
|
||||
time.Second*10, time.Millisecond*500).Should(BeEquivalentTo(oamstd.RollingInBatchesState))
|
||||
|
||||
verifyRolloutOwnsCloneset()
|
||||
|
||||
By("Revert the application rollout")
|
||||
Eventually(
|
||||
func() error {
|
||||
k8sClient.Get(ctx, client.ObjectKey{Namespace: namespaceName, Name: appRollout.Name}, &appRollout)
|
||||
appRollout.Spec.SourceAppRevisionName = utils.ConstructRevisionName(app.GetName(), 2)
|
||||
appRollout.Spec.TargetAppRevisionName = utils.ConstructRevisionName(app.GetName(), 1)
|
||||
appRollout.Spec.RolloutPlan.BatchPartition = nil
|
||||
return k8sClient.Update(ctx, &appRollout)
|
||||
}, time.Second*15, time.Millisecond*500).Should(Succeed())
|
||||
verifyRolloutSucceeded(appRollout.Spec.TargetAppRevisionName)
|
||||
})
|
||||
|
||||
It("Test rollout will update same name trait", func() {
|
||||
CreateClonesetDef()
|
||||
CreateIngressDef()
|
||||
applySourceApp("app-with-ingress-source.yaml")
|
||||
By("Apply the application rollout go directly to the target")
|
||||
appRollout = v1beta1.AppRollout{}
|
||||
Expect(common.ReadYamlToObject("testdata/rollout/cloneset/appRollout.yaml", &appRollout)).Should(BeNil())
|
||||
appRollout.Namespace = namespaceName
|
||||
appRollout.Spec.SourceAppRevisionName = ""
|
||||
appRollout.Spec.TargetAppRevisionName = utils.ConstructRevisionName(app.GetName(), 1)
|
||||
appRollout.Spec.RolloutPlan.TargetSize = pointer.Int32Ptr(7)
|
||||
appRollout.Spec.RolloutPlan.BatchPartition = nil
|
||||
createAppRolling(&appRollout)
|
||||
appRolloutName = appRollout.Name
|
||||
verifyRolloutSucceeded(appRollout.Spec.TargetAppRevisionName)
|
||||
By("verify ingress status")
|
||||
verifyIngress("test.example.com")
|
||||
By("rollout to revision 2")
|
||||
updateApp("app-with-ingress-target.yaml")
|
||||
Eventually(
|
||||
func() error {
|
||||
k8sClient.Get(ctx, client.ObjectKey{Namespace: namespaceName, Name: appRollout.Name}, &appRollout)
|
||||
appRollout.Spec.SourceAppRevisionName = utils.ConstructRevisionName(app.GetName(), 1)
|
||||
appRollout.Spec.TargetAppRevisionName = utils.ConstructRevisionName(app.GetName(), 2)
|
||||
appRollout.Spec.RolloutPlan.BatchPartition = nil
|
||||
return k8sClient.Update(ctx, &appRollout)
|
||||
}, time.Second*10, time.Millisecond*500).Should(Succeed())
|
||||
verifyRolloutSucceeded(appRollout.Spec.TargetAppRevisionName)
|
||||
By("verify after rollout ingress status")
|
||||
verifyIngress("test-1.example.com")
|
||||
})
|
||||
|
||||
It("Test rollout succeed will gc useless trait", func() {
|
||||
CreateClonesetDef()
|
||||
CreateIngressDef()
|
||||
applySourceApp("app-with-ingress-source.yaml")
|
||||
By("Apply the application rollout go directly to the target")
|
||||
appRollout = v1beta1.AppRollout{}
|
||||
Expect(common.ReadYamlToObject("testdata/rollout/cloneset/appRollout.yaml", &appRollout)).Should(BeNil())
|
||||
appRollout.Namespace = namespaceName
|
||||
appRollout.Spec.SourceAppRevisionName = ""
|
||||
appRollout.Spec.TargetAppRevisionName = utils.ConstructRevisionName(app.GetName(), 1)
|
||||
appRollout.Spec.RolloutPlan.TargetSize = pointer.Int32Ptr(7)
|
||||
appRollout.Spec.RolloutPlan.BatchPartition = nil
|
||||
createAppRolling(&appRollout)
|
||||
appRolloutName = appRollout.Name
|
||||
verifyRolloutSucceeded(appRollout.Spec.TargetAppRevisionName)
|
||||
By("verify ingress status")
|
||||
verifyIngress("test.example.com")
|
||||
By("rollout to revision 2 to disable ingress trait")
|
||||
|
||||
updateApp("app-remove-ingress.yaml")
|
||||
Eventually(
|
||||
func() error {
|
||||
k8sClient.Get(ctx, client.ObjectKey{Namespace: namespaceName, Name: appRollout.Name}, &appRollout)
|
||||
appRollout.Spec.SourceAppRevisionName = utils.ConstructRevisionName(app.GetName(), 1)
|
||||
appRollout.Spec.TargetAppRevisionName = utils.ConstructRevisionName(app.GetName(), 2)
|
||||
appRollout.Spec.RolloutPlan.BatchPartition = nil
|
||||
return k8sClient.Update(ctx, &appRollout)
|
||||
}, time.Second*10, time.Millisecond*500).Should(Succeed())
|
||||
verifyRolloutSucceeded(appRollout.Spec.TargetAppRevisionName)
|
||||
By("verify after rollout ingress have been removed")
|
||||
Eventually(func() error {
|
||||
ingress := &corev1beta1.Ingress{}
|
||||
return k8sClient.Get(ctx, types.NamespacedName{Namespace: namespaceName, Name: appRollout.Spec.ComponentList[0]}, ingress)
|
||||
}, time.Second*30, 300*time.Microsecond).Should(util.NotFoundMatcher{})
|
||||
})
|
||||
|
||||
It("Test scale again by modify targetSize", func() {
|
||||
var err error
|
||||
CreateClonesetDef()
|
||||
applySourceApp("app-no-replica.yaml")
|
||||
By("Apply the application rollout go directly to the target")
|
||||
appRollout = v1beta1.AppRollout{}
|
||||
Expect(common.ReadYamlToObject("testdata/rollout/cloneset/appRolloutScale.yaml", &appRollout)).Should(BeNil())
|
||||
appRollout.Namespace = namespaceName
|
||||
appRollout.Spec.SourceAppRevisionName = ""
|
||||
appRollout.Spec.TargetAppRevisionName = utils.ConstructRevisionName(app.GetName(), 1)
|
||||
appRollout.Spec.RolloutPlan.TargetSize = pointer.Int32Ptr(2)
|
||||
appRollout.Spec.RolloutPlan.BatchPartition = nil
|
||||
By("create appRollout initial targetSize is 3")
|
||||
createAppRolling(&appRollout)
|
||||
appRolloutName = appRollout.Name
|
||||
verifyRolloutSucceeded(appRollout.Spec.TargetAppRevisionName)
|
||||
By("modify appRollout targetSize to 4")
|
||||
Eventually(func() error {
|
||||
if err = k8sClient.Get(ctx, client.ObjectKey{Namespace: namespaceName, Name: appRolloutName}, &appRollout); err != nil {
|
||||
return err
|
||||
}
|
||||
appRollout.Spec.RolloutPlan.TargetSize = pointer.Int32Ptr(4)
|
||||
if err = k8sClient.Update(ctx, &appRollout); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}, 60*time.Second, 300*time.Microsecond).Should(BeNil())
|
||||
// before check status, we must guarantee the cloneset has been update
|
||||
Eventually(func() error {
|
||||
clonesetName := appRollout.Spec.ComponentList[0]
|
||||
kc = kruise.CloneSet{}
|
||||
err := k8sClient.Get(ctx, client.ObjectKey{Namespace: namespaceName, Name: clonesetName}, &kc)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if *kc.Spec.Replicas != 4 {
|
||||
return fmt.Errorf("pod replicas mismatch")
|
||||
}
|
||||
return nil
|
||||
}, 30*time.Second, 300*time.Microsecond).Should(BeNil())
|
||||
verifyRolloutSucceeded(appRollout.Spec.TargetAppRevisionName)
|
||||
By("modify appRollout targetSize to 6")
|
||||
Eventually(func() error {
|
||||
if err = k8sClient.Get(ctx, client.ObjectKey{Namespace: namespaceName, Name: appRolloutName}, &appRollout); err != nil {
|
||||
return err
|
||||
}
|
||||
appRollout.Spec.RolloutPlan.TargetSize = pointer.Int32Ptr(6)
|
||||
if err = k8sClient.Update(ctx, &appRollout); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}, 60*time.Second, 300*time.Microsecond).Should(BeNil())
|
||||
// before check status, we must guarantee the cloneset has been update
|
||||
Eventually(func() error {
|
||||
kc = kruise.CloneSet{}
|
||||
clonesetName := appRollout.Spec.ComponentList[0]
|
||||
err := k8sClient.Get(ctx, client.ObjectKey{Namespace: namespaceName, Name: clonesetName}, &kc)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if *kc.Spec.Replicas != 6 {
|
||||
return fmt.Errorf("pod replicas mismatch")
|
||||
}
|
||||
return nil
|
||||
}, 30*time.Second, 300*time.Microsecond).Should(BeNil())
|
||||
verifyRolloutSucceeded(appRollout.Spec.TargetAppRevisionName)
|
||||
})
|
||||
|
||||
It("Test rolling by changing the definition", func() {
|
||||
//TODO(@wonderflow): we should support rollout by changing definition
|
||||
})
|
||||
})
|
||||
@@ -49,17 +49,14 @@ import (
|
||||
commontypes "github.com/oam-dev/kubevela/apis/core.oam.dev/common"
|
||||
"github.com/oam-dev/kubevela/apis/core.oam.dev/v1alpha2"
|
||||
"github.com/oam-dev/kubevela/pkg/oam/util"
|
||||
"github.com/oam-dev/kubevela/pkg/utils/common"
|
||||
// +kubebuilder:scaffold:imports
|
||||
)
|
||||
|
||||
var k8sClient client.Client
|
||||
var scheme = runtime.NewScheme()
|
||||
var manualscalertrait v1alpha2.TraitDefinition
|
||||
var extendedmanualscalertrait v1alpha2.TraitDefinition
|
||||
var roleName = "oam-example-com"
|
||||
var roleBindingName = "oam-role-binding"
|
||||
var crd crdv1.CustomResourceDefinition
|
||||
|
||||
// A DefinitionExtension is an Object type for xxxDefinitin.spec.extension
|
||||
type DefinitionExtension struct {
|
||||
@@ -123,8 +120,6 @@ var _ = BeforeSuite(func(done Done) {
|
||||
}
|
||||
// For some reason, traitDefinition is created as a Cluster scope object
|
||||
Expect(k8sClient.Create(context.Background(), manualscalertrait.DeepCopy())).Should(SatisfyAny(BeNil(), &util.AlreadyExistMatcher{}))
|
||||
// for oam spec v0.2 e2e-test
|
||||
manualscalertrait.Namespace = "oam-runtime-system"
|
||||
Expect(k8sClient.Create(context.Background(), &manualscalertrait)).Should(SatisfyAny(BeNil(), &util.AlreadyExistMatcher{}))
|
||||
// Create manual scaler trait definition with spec.extension field
|
||||
definitionExtension := DefinitionExtension{
|
||||
@@ -133,24 +128,6 @@ var _ = BeforeSuite(func(done Done) {
|
||||
in := new(runtime.RawExtension)
|
||||
in.Raw, _ = json.Marshal(definitionExtension)
|
||||
|
||||
extendedmanualscalertrait = v1alpha2.TraitDefinition{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "manualscalertraits-extended.core.oam.dev",
|
||||
Namespace: "vela-system",
|
||||
Labels: map[string]string{"trait": "manualscalertrait"},
|
||||
},
|
||||
Spec: v1alpha2.TraitDefinitionSpec{
|
||||
WorkloadRefPath: "spec.workloadRef",
|
||||
Reference: commontypes.DefinitionReference{
|
||||
Name: "manualscalertraits-extended.core.oam.dev",
|
||||
},
|
||||
Extension: in,
|
||||
},
|
||||
}
|
||||
Expect(k8sClient.Create(context.Background(), extendedmanualscalertrait.DeepCopy())).Should(SatisfyAny(BeNil(), &util.AlreadyExistMatcher{}))
|
||||
// for oam spec v0.2 e2e-test
|
||||
extendedmanualscalertrait.Namespace = "oam-runtime-system"
|
||||
Expect(k8sClient.Create(context.Background(), &extendedmanualscalertrait)).Should(SatisfyAny(BeNil(), &util.AlreadyExistMatcher{}))
|
||||
By("Created extended manualscalertraits.core.oam.dev")
|
||||
|
||||
// create workload definition for 'deployments'
|
||||
@@ -205,62 +182,6 @@ var _ = BeforeSuite(func(done Done) {
|
||||
Expect(k8sClient.Create(context.Background(), &adminRoleBinding)).Should(SatisfyAny(BeNil(), &util.AlreadyExistMatcher{}))
|
||||
By("Created cluster role binding for the test service account")
|
||||
|
||||
crd = crdv1.CustomResourceDefinition{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "bars.example.com",
|
||||
Labels: map[string]string{"crd": "revision-test"},
|
||||
},
|
||||
Spec: crdv1.CustomResourceDefinitionSpec{
|
||||
Group: "example.com",
|
||||
Names: crdv1.CustomResourceDefinitionNames{
|
||||
Kind: "Bar",
|
||||
ListKind: "BarList",
|
||||
Plural: "bars",
|
||||
Singular: "bar",
|
||||
},
|
||||
Versions: []crdv1.CustomResourceDefinitionVersion{
|
||||
{
|
||||
Name: "v1",
|
||||
Served: true,
|
||||
Storage: true,
|
||||
Schema: &crdv1.CustomResourceValidation{
|
||||
OpenAPIV3Schema: &crdv1.JSONSchemaProps{
|
||||
Type: "object",
|
||||
Properties: map[string]crdv1.JSONSchemaProps{
|
||||
"spec": {
|
||||
Type: "object",
|
||||
Properties: map[string]crdv1.JSONSchemaProps{
|
||||
"key": {Type: "string"},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
Scope: crdv1.NamespaceScoped,
|
||||
},
|
||||
}
|
||||
Expect(k8sClient.Create(context.Background(), crd.DeepCopy())).Should(SatisfyAny(BeNil(), &util.AlreadyExistMatcher{}))
|
||||
// for oam spec v0.2 e2e-test
|
||||
crd.Namespace = "oam-runtime-system"
|
||||
Expect(k8sClient.Create(context.Background(), &crd)).Should(SatisfyAny(BeNil(), &util.AlreadyExistMatcher{}))
|
||||
By("Created a crd for revision mechanism test")
|
||||
|
||||
By("Create workload definition for revision mechanism test")
|
||||
var nwd v1alpha2.WorkloadDefinition
|
||||
Expect(common.ReadYamlToObject("testdata/revision/workload-def.yaml", &nwd)).Should(BeNil())
|
||||
Eventually(
|
||||
func() error {
|
||||
return k8sClient.Create(context.Background(), nwd.DeepCopy())
|
||||
},
|
||||
time.Second*3, time.Millisecond*300).Should(SatisfyAny(BeNil(), &util.AlreadyExistMatcher{}))
|
||||
nwd.Namespace = "oam-runtime-system"
|
||||
Eventually(
|
||||
func() error {
|
||||
return k8sClient.Create(context.Background(), &nwd)
|
||||
},
|
||||
time.Second*3, time.Millisecond*300).Should(SatisfyAny(BeNil(), &util.AlreadyExistMatcher{}))
|
||||
close(done)
|
||||
}, 300)
|
||||
|
||||
@@ -274,14 +195,6 @@ var _ = AfterSuite(func() {
|
||||
}
|
||||
Expect(k8sClient.Delete(context.Background(), &adminRoleBinding)).Should(BeNil())
|
||||
By("Deleted the cluster role binding")
|
||||
|
||||
crd = crdv1.CustomResourceDefinition{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "bars.example.com",
|
||||
Labels: map[string]string{"crd": "revision-test"},
|
||||
},
|
||||
}
|
||||
Expect(k8sClient.Delete(context.Background(), &crd)).Should(BeNil())
|
||||
})
|
||||
|
||||
// RequestReconcileNow will trigger an immediate reconciliation on K8s object.
|
||||
|
||||
Reference in New Issue
Block a user