Compare commits

...

18 Commits

Author SHA1 Message Date
Somefive
18d93039c9 [Backport release-1.4] Fix: gc failure cause workflow restart not working properly (#5241) (#5243)
* Fix: gc failure cause workflow restart not working properly

Signed-off-by: Somefive <yd219913@alibaba-inc.com>

* Feat: switch ci machine

Signed-off-by: Somefive <yd219913@alibaba-inc.com>

* Fix: enhance test

Signed-off-by: Somefive <yd219913@alibaba-inc.com>

Signed-off-by: Somefive <yd219913@alibaba-inc.com>

Signed-off-by: Somefive <yd219913@alibaba-inc.com>
2023-01-03 20:01:29 +08:00
github-actions[bot]
8ffd80e4a7 Fix:Dry-run from revision application,Problems caused by resource version lower than the current version (#5249)
Signed-off-by: old.prince <di7zhang@gmail.com>
(cherry picked from commit d5fcb04147)

Co-authored-by: oldprince <di7zhang@gmail.com>
2023-01-03 11:36:57 +08:00
github-actions[bot]
735075f5a6 [Backport release-1.5] Fix: forbid 302 request to avoid SSRF (#5003)
* fix helm chart list endpoint SSRF CVE

Signed-off-by: 楚岳 <wangyike.wyk@alibaba-inc.com>
(cherry picked from commit 8883a6219d)

* revert error log

Signed-off-by: 楚岳 <wangyike.wyk@alibaba-inc.com>
(cherry picked from commit e1e6972b17)

* change with const value

Signed-off-by: 楚岳 <wangyike.wyk@alibaba-inc.com>

fix ci

Signed-off-by: 楚岳 <wangyike.wyk@alibaba-inc.com>
(cherry picked from commit fbeacb0a6b)

Co-authored-by: 楚岳 <wangyike.wyk@alibaba-inc.com>
2022-11-04 20:16:53 +08:00
github-actions[bot]
52d1a4364b fix gitlab addon registry (#4938)
Signed-off-by: 楚岳 <wangyike.wyk@alibaba-inc.com>
(cherry picked from commit f3ee964734)

Co-authored-by: 楚岳 <wangyike.wyk@alibaba-inc.com>
2022-10-27 22:19:34 +08:00
github-actions[bot]
1b7f9aae65 [Backport release-1.5] Chore: wrong endpoint for LoadBalancer type service(revert #4729) (#4906)
* Chore: wrong endpoint for LoadBalancer type service(revert #4729)

Signed-off-by: barnettZQG <barnett.zqg@gmail.com>
(cherry picked from commit 92ed75c863)

* Fix: change the unit test

Signed-off-by: barnettZQG <barnett.zqg@gmail.com>
(cherry picked from commit 7985353e1d)

Co-authored-by: barnettZQG <barnett.zqg@gmail.com>
2022-10-22 13:27:07 +08:00
github-actions[bot]
e94519b788 Chore: change the package name of the readme-generator-for-helm (#4896)
Signed-off-by: barnettZQG <barnett.zqg@gmail.com>
(cherry picked from commit 8b46e6076a)

Co-authored-by: barnettZQG <barnett.zqg@gmail.com>
2022-10-20 16:37:25 +08:00
Somefive
18d755ed72 Fix: #4865 (#4867)
Signed-off-by: Somefive <yd219913@alibaba-inc.com>

Signed-off-by: Somefive <yd219913@alibaba-inc.com>
2022-10-15 13:18:43 +08:00
Somefive
c15d0e4e0f Fix: prevent rerun application while upgrading due to old apprev lack app workflow (#4865)
Signed-off-by: Somefive <yd219913@alibaba-inc.com>

Signed-off-by: Somefive <yd219913@alibaba-inc.com>
2022-10-14 22:16:51 +08:00
github-actions[bot]
82ce9daf38 Fix: support default value of ui schema (#4859)
Signed-off-by: Jianbo Sun <jianbo.sjb@alibaba-inc.com>
(cherry picked from commit e584a35c83)

Co-authored-by: Jianbo Sun <jianbo.sjb@alibaba-inc.com>
2022-10-13 14:45:47 +08:00
github-actions[bot]
6eef47c5e0 [Backport release-1.5] Fix: change LabelResourceRuleFormat from Annotations to Labels (#4855)
* fix: change Annotations to Labels

Signed-off-by: chengleqi <chengleqi5g@hotmail.com>
(cherry picked from commit 73696d8c04)

* fix: add FormatYAML label for test case

Signed-off-by: chengleqi <chengleqi5g@hotmail.com>
(cherry picked from commit 32b6ba2c41)

* add json format rules test case

Signed-off-by: chengleqi <chengleqi5g@hotmail.com>
(cherry picked from commit cf207da2b9)

Co-authored-by: chengleqi <chengleqi5g@hotmail.com>
2022-10-12 15:28:34 +08:00
Jianbo Sun
7728de933b Fix: align sha256sums file with binary name (#4835)
Signed-off-by: Jianbo Sun <jianbo.sjb@alibaba-inc.com>

Signed-off-by: Jianbo Sun <jianbo.sjb@alibaba-inc.com>
2022-10-09 10:41:42 +08:00
github-actions[bot]
5ceb193ebe Chore: add IP for gateway trait (#4834)
Signed-off-by: Jianbo Sun <jianbo.sjb@alibaba-inc.com>
(cherry picked from commit 93b85480a6)

Co-authored-by: Jianbo Sun <jianbo.sjb@alibaba-inc.com>
2022-10-09 10:04:57 +08:00
github-actions[bot]
186ab37547 fix bugs and add tests (#4832)
Signed-off-by: 楚岳 <wangyike.wyk@alibaba-inc.com>
(cherry picked from commit e6c9e3887f)

Co-authored-by: 楚岳 <wangyike.wyk@alibaba-inc.com>
2022-10-08 17:28:47 +08:00
Somefive
5a98541e8e Feat: workflow will rerun when application workflow changes (#4813)
Signed-off-by: Somefive <yd219913@alibaba-inc.com>

Signed-off-by: Somefive <yd219913@alibaba-inc.com>
2022-09-30 21:22:59 +08:00
github-actions[bot]
d599e63ec9 Feat: add image-pull-policy to init-container (#4812)
Signed-off-by: Somefive <yd219913@alibaba-inc.com>
(cherry picked from commit a9ba8bb502)

Co-authored-by: Somefive <yd219913@alibaba-inc.com>
2022-09-29 22:43:00 +08:00
Somefive
055a50fad8 Feat: add port name to CollectServiceEndpoints (#4805)
Signed-off-by: Somefive <yd219913@alibaba-inc.com>

Signed-off-by: Somefive <yd219913@alibaba-inc.com>
2022-09-29 13:58:35 +08:00
github-actions[bot]
7921475af2 Fix: add path clean for request (#4800)
Signed-off-by: Somefive <yd219913@alibaba-inc.com>
(cherry picked from commit 9d4630ea32)

Co-authored-by: Somefive <yd219913@alibaba-inc.com>
2022-09-28 14:23:44 +08:00
github-actions[bot]
caf09b81b0 Fix: it does not check the same name for 'vela env init' (#4798)
Signed-off-by: wuzhongjian <wuzhongjian_yewu@cmss.chinamobile.com>
(cherry picked from commit a38e6d401b)

Co-authored-by: wuzhongjian <wuzhongjian_yewu@cmss.chinamobile.com>
2022-09-28 10:59:26 +08:00
40 changed files with 536 additions and 95 deletions

View File

@@ -104,7 +104,7 @@ jobs:
name: codecov-umbrella
apiserver-e2e-tests:
runs-on: aliyun
runs-on: aliyun-legacy
needs: [ detect-noop,set-k8s-matrix ]
if: needs.detect-noop.outputs.noop != 'true'
strategy:

View File

@@ -52,7 +52,7 @@ jobs:
e2e-multi-cluster-tests:
runs-on: aliyun
runs-on: aliyun-legacy
needs: [ detect-noop,set-k8s-matrix ]
if: needs.detect-noop.outputs.noop != 'true'
strategy:

View File

@@ -51,7 +51,7 @@ jobs:
fi
e2e-rollout-tests:
runs-on: aliyun
runs-on: aliyun-legacy
needs: [ detect-noop,set-k8s-matrix ]
if: needs.detect-noop.outputs.noop != 'true'
strategy:

View File

@@ -51,7 +51,7 @@ jobs:
fi
e2e-tests:
runs-on: aliyun
runs-on: aliyun-legacy
needs: [ detect-noop,set-k8s-matrix ]
if: needs.detect-noop.outputs.noop != 'true'
strategy:

View File

@@ -97,7 +97,7 @@ jobs:
version: ${{ env.GOLANGCI_VERSION }}
check-diff:
runs-on: aliyun
runs-on: aliyun-legacy
needs: detect-noop
if: needs.detect-noop.outputs.noop != 'true'

View File

@@ -150,11 +150,15 @@ jobs:
- name: Display structure of downloaded files
run: ls -R
working-directory: cli-artifacts
- name: Get version
run: echo "VELA_VERSION=${GITHUB_REF#refs/tags/}" >> $GITHUB_ENV
- shell: bash
working-directory: cli-artifacts
run: |
for file in *
do
sed -i "s/\/vela/-${{ env.VELA_VERSION }}/g" ${file}
sed -i "s/\/kubectl-vela/-${{ env.VELA_VERSION }}/g" ${file}
cat ${file} >> sha256sums.txt
done
- name: Upload Checksums

View File

@@ -4,7 +4,7 @@ on:
- cron: '* * * * *'
jobs:
clean-image:
runs-on: aliyun
runs-on: aliyun-legacy
steps:
- name: Cleanup image
run: docker image prune -f

View File

@@ -104,7 +104,7 @@ spec:
message: "Visiting URL: " + context.outputs.ingress.spec.rules[0].host + ", IP: " + igs[0].ip
}
if igs[0].host == _|_ {
message: "Host not specified, visit the cluster or load balancer in front of the cluster"
message: "Host not specified, visit the cluster or load balancer in front of the cluster with IP: " + igs[0].ip
}
}
if igs[0].ip == _|_ {

View File

@@ -28,8 +28,9 @@ spec:
}]
}]
initContainers: [{
name: parameter.name
image: parameter.image
name: parameter.name
image: parameter.image
imagePullPolicy: parameter.imagePullPolicy
if parameter.cmd != _|_ {
command: parameter.cmd
}
@@ -59,6 +60,9 @@ spec:
// +usage=Specify the image of init container
image: string
// +usage=Specify image pull policy for your service
imagePullPolicy: *"IfNotPresent" | "Always" | "Never"
// +usage=Specify the commands run in the init container
cmd?: [...string]

View File

@@ -104,7 +104,7 @@ spec:
message: "Visiting URL: " + context.outputs.ingress.spec.rules[0].host + ", IP: " + igs[0].ip
}
if igs[0].host == _|_ {
message: "Host not specified, visit the cluster or load balancer in front of the cluster"
message: "Host not specified, visit the cluster or load balancer in front of the cluster with IP: " + igs[0].ip
}
}
if igs[0].ip == _|_ {

View File

@@ -28,8 +28,9 @@ spec:
}]
}]
initContainers: [{
name: parameter.name
image: parameter.image
name: parameter.name
image: parameter.image
imagePullPolicy: parameter.imagePullPolicy
if parameter.cmd != _|_ {
command: parameter.cmd
}
@@ -59,6 +60,9 @@ spec:
// +usage=Specify the image of init container
image: string
// +usage=Specify image pull policy for your service
imagePullPolicy: *"IfNotPresent" | "Always" | "Never"
// +usage=Specify the commands run in the init container
cmd?: [...string]

View File

@@ -80,7 +80,7 @@ ifeq (, $(shell which readme-generator))
@{ \
set -e ;\
echo 'installing readme-generator-for-helm' ;\
npm install -g readme-generator-for-helm ;\
npm install -g @bitnami/readme-generator-for-helm ;\
}
else
@$(OK) readme-generator-for-helm is already installed

View File

@@ -86,7 +86,6 @@ var _ = Describe("test FindWholeAddonPackagesFromRegistry", func() {
Expect(res).To(HaveLen(1))
Expect(res[0].Name).To(Equal("velaux"))
Expect(res[0].InstallPackage).ToNot(BeNil())
Expect(res[0].APISchema).ToNot(BeNil())
})
It("should return one valid result, matching one registry", func() {
res, err := FindWholeAddonPackagesFromRegistry(context.Background(), k8sClient, []string{"velaux"}, []string{"KubeVela"})
@@ -94,7 +93,6 @@ var _ = Describe("test FindWholeAddonPackagesFromRegistry", func() {
Expect(res).To(HaveLen(1))
Expect(res[0].Name).To(Equal("velaux"))
Expect(res[0].InstallPackage).ToNot(BeNil())
Expect(res[0].APISchema).ToNot(BeNil())
})
})
@@ -113,10 +111,8 @@ var _ = Describe("test FindWholeAddonPackagesFromRegistry", func() {
Expect(res).To(HaveLen(2))
Expect(res[0].Name).To(Equal("velaux"))
Expect(res[0].InstallPackage).ToNot(BeNil())
Expect(res[0].APISchema).ToNot(BeNil())
Expect(res[1].Name).To(Equal("traefik"))
Expect(res[1].InstallPackage).ToNot(BeNil())
Expect(res[1].APISchema).ToNot(BeNil())
})
})
@@ -127,7 +123,6 @@ var _ = Describe("test FindWholeAddonPackagesFromRegistry", func() {
Expect(res).To(HaveLen(1))
Expect(res[0].Name).To(Equal("velaux"))
Expect(res[0].InstallPackage).ToNot(BeNil())
Expect(res[0].APISchema).ToNot(BeNil())
})
})
})

View File

@@ -48,6 +48,7 @@ const (
addonAllClusterPolicy = "deploy-addon-to-all-clusters"
renderOutputCuePath = "output"
renderAuxiliaryOutputsPath = "outputs"
defaultCuePackageHeader = "main"
)
type addonCueTemplateRender struct {
@@ -334,6 +335,13 @@ func renderResources(addon *InstallPackage, args map[string]interface{}) ([]comm
}
for _, tmpl := range addon.CUETemplates {
isMainCueTemplate, err := checkCueFileHasPackageHeader(tmpl)
if err != nil {
return nil, err
}
if isMainCueTemplate {
continue
}
comp, err := renderCompAccordingCUETemplate(tmpl, addon, args)
if err != nil && strings.Contains(err.Error(), "var(path=output) not exist") {
continue
@@ -367,3 +375,14 @@ func isDeployToRuntime(addon *InstallPackage) bool {
}
return addon.DeployTo.RuntimeCluster || addon.DeployTo.LegacyRuntimeCluster
}
func checkCueFileHasPackageHeader(cueTemplate ElementFile) (bool, error) {
cueFile, err := parser.ParseFile(cueTemplate.Name, cueTemplate.Data, parser.ParseComments)
if err != nil {
return false, err
}
if cueFile.PackageName() == defaultCuePackageHeader {
return true, nil
}
return false, nil
}

View File

@@ -451,3 +451,99 @@ func TestRenderCueResourceError(t *testing.T) {
assert.NoError(t, err)
assert.Equal(t, len(comp), 2)
}
func TestCheckCueFileHasPackageHeader(t *testing.T) {
testCueTemplateWithPkg := `
package main
kustomizeController: {
// About this name, refer to #429 for details.
name: "fluxcd-kustomize-controller"
type: "webservice"
dependsOn: ["fluxcd-ns"]
properties: {
imagePullPolicy: "IfNotPresent"
image: _base + "fluxcd/kustomize-controller:v0.26.0"
env: [
{
name: "RUNTIME_NAMESPACE"
value: _targetNamespace
},
]
livenessProbe: {
httpGet: {
path: "/healthz"
port: 9440
}
timeoutSeconds: 5
}
readinessProbe: {
httpGet: {
path: "/readyz"
port: 9440
}
timeoutSeconds: 5
}
volumeMounts: {
emptyDir: [
{
name: "temp"
mountPath: "/tmp"
},
]
}
}
traits: [
{
type: "service-account"
properties: {
name: "sa-kustomize-controller"
create: true
privileges: _rules
}
},
{
type: "labels"
properties: {
"control-plane": "controller"
// This label is kept to avoid breaking existing
// KubeVela e2e tests (makefile e2e-setup).
"app": "kustomize-controller"
}
},
{
type: "command"
properties: {
args: controllerArgs
}
},
]
}
`
testCueTemplateWithoutPkg := `
output: {
type: "helm"
name: "nginx-ingress"
properties: {
repoType: "helm"
url: "https://kubernetes.github.io/ingress-nginx"
chart: "ingress-nginx"
version: "4.2.0"
values: {
controller: service: type: parameter["serviceType"]
}
}
}
`
cueTemplate := ElementFile{Name: "test-file.cue", Data: testCueTemplateWithPkg}
ok, err := checkCueFileHasPackageHeader(cueTemplate)
assert.NoError(t, err)
assert.Equal(t, true, ok)
cueTemplate = ElementFile{Name: "test-file-without-pkg.cue", Data: testCueTemplateWithoutPkg}
ok, err = checkCueFileHasPackageHeader(cueTemplate)
assert.NoError(t, err)
assert.Equal(t, false, ok)
}

View File

@@ -1550,6 +1550,13 @@ func (c *applicationServiceImpl) DryRunAppOrRevision(ctx context.Context, appMod
}
case "REVISION":
app, _, err = c.getAppModelFromRevision(ctx, appModel.Name, dryRunReq.Version)
originalApp := &v1beta1.Application{}
if err := c.KubeClient.Get(ctx, types.NamespacedName{
Name: app.Name,
Namespace: app.Namespace,
}, originalApp); err == nil {
app.ResourceVersion = originalApp.ResourceVersion
}
if err != nil {
return nil, err
}

View File

@@ -82,8 +82,8 @@ func (c Condition) Validate() error {
if c.JSONKey == "" {
return fmt.Errorf("the json key of the condition can not be empty")
}
if c.Action != "enable" && c.Action != "disable" {
return fmt.Errorf("the action of the condition must be enable or disable")
if c.Action != "enable" && c.Action != "disable" && c.Action != "" {
return fmt.Errorf("the action of the condition only supports enable, disable or leave it empty")
}
if c.Op != "" && !StringsContain([]string{"==", "!=", "in"}, c.Op) {
return fmt.Errorf("the op of the condition must be `==` 、`!=` and `in`")

View File

@@ -299,6 +299,11 @@ func (r *Reconciler) gcResourceTrackers(logCtx monitorContext.Context, handler *
}))
defer subCtx.Commit("finish gc resourceTrackers")
statusUpdater := r.updateStatus
if isPatch {
statusUpdater = r.patchStatus
}
var options []resourcekeeper.GCOption
if !gcOutdated {
options = append(options, resourcekeeper.DisableMarkStageGCOption{}, resourcekeeper.DisableGCComponentRevisionOption{}, resourcekeeper.DisableLegacyGCOption{})
@@ -306,8 +311,10 @@ func (r *Reconciler) gcResourceTrackers(logCtx monitorContext.Context, handler *
finished, waiting, err := handler.resourceKeeper.GarbageCollect(logCtx, options...)
if err != nil {
logCtx.Error(err, "Failed to gc resourcetrackers")
r.Recorder.Event(handler.app, event.Warning(velatypes.ReasonFailedGC, err))
return r.endWithNegativeCondition(logCtx, handler.app, condition.ReconcileError(err), phase)
cond := condition.Deleting()
cond.Message = fmt.Sprintf("error encountered during garbage collection: %s", err.Error())
handler.app.Status.SetConditions(cond)
return r.result(statusUpdater(logCtx, handler.app, phase)).ret()
}
if !finished {
logCtx.Info("GarbageCollecting resourcetrackers unfinished")
@@ -316,13 +323,13 @@ func (r *Reconciler) gcResourceTrackers(logCtx monitorContext.Context, handler *
cond.Message = fmt.Sprintf("Waiting for %s to delete. (At least %d resources are deleting.)", waiting[0].DisplayName(), len(waiting))
}
handler.app.Status.SetConditions(cond)
return r.result(r.patchStatus(logCtx, handler.app, phase)).requeue(baseGCBackoffWaitTime).ret()
return r.result(statusUpdater(logCtx, handler.app, phase)).requeue(baseGCBackoffWaitTime).ret()
}
logCtx.Info("GarbageCollected resourcetrackers")
if !isPatch {
return r.result(r.updateStatus(logCtx, handler.app, common.ApplicationRunningWorkflow)).ret()
phase = common.ApplicationRunningWorkflow
}
return r.result(r.patchStatus(logCtx, handler.app, phase)).ret()
return r.result(statusUpdater(logCtx, handler.app, phase)).ret()
}
type reconcileResult struct {

View File

@@ -23,6 +23,7 @@ import (
"sort"
"strings"
"github.com/hashicorp/go-version"
"github.com/pkg/errors"
appsv1 "k8s.io/api/apps/v1"
corev1 "k8s.io/api/core/v1"
@@ -233,9 +234,6 @@ func (h *AppHandler) gatherRevisionSpec(af *appfile.Appfile) (*v1beta1.Applicati
copiedApp := h.app.DeepCopy()
// We better to remove all object status in the appRevision
copiedApp.Status = common.AppStatus{}
if !metav1.HasAnnotation(h.app.ObjectMeta, oam.AnnotationPublishVersion) {
copiedApp.Spec.Workflow = nil
}
appRev := &v1beta1.ApplicationRevision{
Spec: v1beta1.ApplicationRevisionSpec{
Application: *copiedApp,
@@ -525,6 +523,28 @@ func deepEqualWorkflow(old, new v1alpha1.Workflow) bool {
return apiequality.Semantic.DeepEqual(old.Steps, new.Steps)
}
const velaVersionNumberToCompareWorkflow = "v1.5.7"
func deepEqualAppSpec(old, new *v1beta1.Application) bool {
oldSpec, newSpec := old.Spec.DeepCopy(), new.Spec.DeepCopy()
// legacy code: KubeVela version before v1.5.7 & v1.6.0-alpha.4 does not
// record workflow in application spec in application revision. The comparison
// need to bypass the equality check of workflow to prevent unintended rerun
var curVerNum, publishVersion string
if annotations := old.GetAnnotations(); annotations != nil {
curVerNum = annotations[oam.AnnotationKubeVelaVersion]
publishVersion = annotations[oam.AnnotationPublishVersion]
}
if publishVersion == "" && curVerNum != "" {
cmpVer, _ := version.NewVersion(velaVersionNumberToCompareWorkflow)
if curVer, err := version.NewVersion(curVerNum); err == nil && curVer.LessThan(cmpVer) {
oldSpec.Workflow = nil
newSpec.Workflow = nil
}
}
return apiequality.Semantic.DeepEqual(oldSpec, newSpec)
}
func deepEqualAppInRevision(old, new *v1beta1.ApplicationRevision) bool {
if len(old.Spec.Policies) != len(new.Spec.Policies) {
return false
@@ -542,8 +562,10 @@ func deepEqualAppInRevision(old, new *v1beta1.ApplicationRevision) bool {
return false
}
}
return apiequality.Semantic.DeepEqual(filterSkipAffectAppRevTrait(old.Spec.Application.Spec, old.Spec.TraitDefinitions),
filterSkipAffectAppRevTrait(new.Spec.Application.Spec, new.Spec.TraitDefinitions))
oldApp, newApp := old.Spec.Application.DeepCopy(), new.Spec.Application.DeepCopy()
oldApp.Spec = filterSkipAffectAppRevTrait(oldApp.Spec, old.Spec.TraitDefinitions)
newApp.Spec = filterSkipAffectAppRevTrait(newApp.Spec, new.Spec.TraitDefinitions)
return deepEqualAppSpec(oldApp, newApp)
}
// HandleComponentsRevision manages Component revisions

View File

@@ -22,10 +22,12 @@ import (
"fmt"
"reflect"
"strconv"
"testing"
"time"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"github.com/stretchr/testify/require"
"github.com/google/go-cmp/cmp"
appsv1 "k8s.io/api/apps/v1"
@@ -1171,3 +1173,19 @@ status: {}
}()).Should(BeTrue())
})
})
func TestDeepEqualAppInRevision(t *testing.T) {
oldRev := &v1beta1.ApplicationRevision{}
newRev := &v1beta1.ApplicationRevision{}
newRev.Spec.Application.Spec.Workflow = &v1beta1.Workflow{
Steps: []v1beta1.WorkflowStep{{
Type: "deploy",
Name: "deploy",
}},
}
require.False(t, deepEqualAppInRevision(oldRev, newRev))
metav1.SetMetaDataAnnotation(&oldRev.Spec.Application.ObjectMeta, oam.AnnotationKubeVelaVersion, "v1.6.0-alpha.5")
require.False(t, deepEqualAppInRevision(oldRev, newRev))
metav1.SetMetaDataAnnotation(&oldRev.Spec.Application.ObjectMeta, oam.AnnotationKubeVelaVersion, "v1.5.0")
require.True(t, deepEqualAppInRevision(oldRev, newRev))
}

View File

@@ -149,6 +149,7 @@
appProtocol?: string
host?: string
port: int
portName?: string
path?: string
inner?: bool
}

View File

@@ -26,6 +26,7 @@ import (
"fmt"
"io"
"net/http"
neturl "net/url"
"os"
"os/exec"
"path/filepath"
@@ -78,9 +79,15 @@ import (
var (
// Scheme defines the default KubeVela schema
Scheme = k8sruntime.NewScheme()
// forbidRedirectFunc general check func for http redirect response
forbidRedirectFunc = func(req *http.Request, via []*http.Request) error {
return errors.New("got a redirect response which is forbidden")
}
//nolint:gosec
// insecureHTTPClient insecure http client
insecureHTTPClient = &http.Client{Transport: &http.Transport{TLSClientConfig: &tls.Config{InsecureSkipVerify: true}}}
insecureHTTPClient = &http.Client{Transport: &http.Transport{TLSClientConfig: &tls.Config{InsecureSkipVerify: true}}, CheckRedirect: forbidRedirectFunc}
// forbidRedirectClient is a http client forbid redirect http request
forbidRedirectClient = &http.Client{CheckRedirect: forbidRedirectFunc}
)
const (
@@ -161,11 +168,14 @@ func GetClient() (client.Client, error) {
// HTTPGetResponse use HTTP option and default client to send request and get raw response
func HTTPGetResponse(ctx context.Context, url string, opts *HTTPOption) (*http.Response, error) {
// Change NewRequest to NewRequestWithContext and pass context it
if _, err := neturl.ParseRequestURI(url); err != nil {
return nil, err
}
req, err := http.NewRequestWithContext(ctx, http.MethodGet, url, nil)
if err != nil {
return nil, err
}
httpClient := http.DefaultClient
httpClient := forbidRedirectClient
if opts != nil && len(opts.Username) != 0 && len(opts.Password) != 0 {
req.SetBasicAuth(opts.Username, opts.Password)
}
@@ -193,7 +203,7 @@ func HTTPGetResponse(ctx context.Context, url string, opts *HTTPOption) (*http.R
}
tr.TLSClientConfig = tlsConfig
defer tr.CloseIdleConnections()
httpClient = &http.Client{Transport: &tr}
httpClient = &http.Client{Transport: &tr, CheckRedirect: forbidRedirectFunc}
}
return httpClient.Do(req)
}

View File

@@ -26,6 +26,7 @@ import (
"os"
"os/exec"
"path/filepath"
"strings"
"testing"
"time"
@@ -223,6 +224,25 @@ func TestHttpGetCaFile(t *testing.T) {
}
}
func TestHttpGetForbidRedirect(t *testing.T) {
var ctx = context.Background()
testServer := &http.Server{Addr: ":19090"}
http.HandleFunc("/redirect", func(writer http.ResponseWriter, request *http.Request) {
http.Redirect(writer, request, "http://192.168.1.1", http.StatusFound)
})
go func() {
err := testServer.ListenAndServe()
assert.NoError(t, err)
}()
time.Sleep(time.Millisecond)
_, err := HTTPGetWithOption(ctx, "http://127.0.0.1:19090/redirect", nil)
assert.Error(t, err)
assert.True(t, strings.Contains(err.Error(), "got a redirect response which is forbidden"))
}
func TestGetCUEParameterValue(t *testing.T) {
type want struct {
err error

11
pkg/utils/env/env.go vendored
View File

@@ -73,6 +73,17 @@ func CreateEnv(envArgs *types.EnvMeta) error {
}
}
ctx := context.TODO()
var nsList v1.NamespaceList
err = c.List(ctx, &nsList, client.MatchingLabels{oam.LabelControlPlaneNamespaceUsage: oam.VelaNamespaceUsageEnv,
oam.LabelNamespaceOfEnvName: envArgs.Name})
if err != nil {
return err
}
if len(nsList.Items) > 0 {
return fmt.Errorf("env name %s already exists", envArgs.Name)
}
namespace, err := utils.GetNamespace(ctx, c, envArgs.Namespace)
if err != nil && !apierrors.IsNotFound(err) {
return err

View File

@@ -225,7 +225,7 @@ func (h *Helper) GetIndexInfo(repoURL string, skipCache bool, opts *common.HTTPO
}
i := &repo.IndexFile{}
if err := yaml.UnmarshalStrict(body, i); err != nil {
return nil, fmt.Errorf("parse index file from %s failure %w", repoURL, err)
return nil, fmt.Errorf("parse index file from %s failure", repoURL)
}
if h.cache != nil {

View File

@@ -43,10 +43,10 @@ import (
"github.com/oam-dev/kubevela/pkg/workflow/types"
)
// GeneratorServiceEndpoints generator service endpoints is available for common component type,
// CollectServiceEndpoints generator service endpoints is available for common component type,
// such as webservice or helm
// it can not support the cloud service component currently
func (h *provider) GeneratorServiceEndpoints(wfctx wfContext.Context, v *value.Value, act types.Action) error {
func (h *provider) CollectServiceEndpoints(wfctx wfContext.Context, v *value.Value, act types.Action) error {
ctx := context.Background()
val, err := v.LookupValue("app")
@@ -189,13 +189,14 @@ func generatorFromService(service corev1.Service, selectorNodeIP func() string,
ResourceVersion: service.ResourceVersion,
}
formatEndpoint := func(host, appProtocol string, portProtocol corev1.Protocol, portNum int32, inner bool) querytypes.ServiceEndpoint {
formatEndpoint := func(host, appProtocol string, portName string, portProtocol corev1.Protocol, portNum int32, inner bool) querytypes.ServiceEndpoint {
return querytypes.ServiceEndpoint{
Endpoint: querytypes.Endpoint{
Protocol: portProtocol,
AppProtocol: &appProtocol,
Host: host,
Port: int(portNum),
PortName: portName,
Path: path,
Inner: inner,
},
@@ -210,22 +211,22 @@ func generatorFromService(service corev1.Service, selectorNodeIP func() string,
appp := judgeAppProtocol(port.Port)
for _, ingress := range service.Status.LoadBalancer.Ingress {
if ingress.Hostname != "" {
serviceEndpoints = append(serviceEndpoints, formatEndpoint(ingress.Hostname, appp, port.Protocol, port.NodePort, false))
serviceEndpoints = append(serviceEndpoints, formatEndpoint(ingress.Hostname, appp, port.Name, port.Protocol, port.Port, false))
}
if ingress.IP != "" {
serviceEndpoints = append(serviceEndpoints, formatEndpoint(ingress.IP, appp, port.Protocol, port.NodePort, false))
serviceEndpoints = append(serviceEndpoints, formatEndpoint(ingress.IP, appp, port.Name, port.Protocol, port.Port, false))
}
}
}
case corev1.ServiceTypeNodePort:
for _, port := range service.Spec.Ports {
appp := judgeAppProtocol(port.Port)
serviceEndpoints = append(serviceEndpoints, formatEndpoint(selectorNodeIP(), appp, port.Protocol, port.NodePort, false))
serviceEndpoints = append(serviceEndpoints, formatEndpoint(selectorNodeIP(), appp, port.Name, port.Protocol, port.NodePort, false))
}
case corev1.ServiceTypeClusterIP, corev1.ServiceTypeExternalName:
for _, port := range service.Spec.Ports {
appp := judgeAppProtocol(port.Port)
serviceEndpoints = append(serviceEndpoints, formatEndpoint(fmt.Sprintf("%s.%s", service.Name, service.Namespace), appp, port.Protocol, port.Port, true))
serviceEndpoints = append(serviceEndpoints, formatEndpoint(fmt.Sprintf("%s.%s", service.Name, service.Namespace), appp, port.Name, port.Protocol, port.Port, true))
}
}
return serviceEndpoints

View File

@@ -165,7 +165,7 @@ var _ = Describe("Test Query Provider", func() {
{
"name": "seldon-ambassador-2",
"ports": []corev1.ServicePort{
{Port: 80, TargetPort: intstr.FromInt(80), Name: "80port", NodePort: 30010},
{Port: 80, TargetPort: intstr.FromInt(80), Name: "80port"},
},
"type": corev1.ServiceTypeLoadBalancer,
"status": corev1.ServiceStatus{
@@ -244,15 +244,15 @@ var _ = Describe("Test Query Provider", func() {
pr := &provider{
cli: k8sClient,
}
err = pr.GeneratorServiceEndpoints(nil, v, nil)
err = pr.CollectServiceEndpoints(nil, v, nil)
Expect(err).Should(BeNil())
urls := []string{
"http://1.1.1.1:30010/seldon/default/sdep2",
"http://1.1.1.1/seldon/default/sdep2",
"http://clusterip-2.default",
"clusterip-2.default:81",
"http://2.2.2.2:30020",
"http://1.1.1.1:30010",
"http://2.2.2.2:8080",
"http://1.1.1.1",
}
endValue, err := v.Field("list")
Expect(err).Should(BeNil())

View File

@@ -305,6 +305,6 @@ func Install(p providers.Providers, cli client.Client, cfg *rest.Config) {
"collectResources": prd.CollectResources,
"searchEvents": prd.SearchEvents,
"collectLogsInPod": prd.CollectLogsInPod,
"collectServiceEndpoints": prd.GeneratorServiceEndpoints,
"collectServiceEndpoints": prd.CollectServiceEndpoints,
})
}

View File

@@ -928,7 +928,7 @@ options: {
pr := &provider{
cli: k8sClient,
}
err = pr.GeneratorServiceEndpoints(nil, v, nil)
err = pr.CollectServiceEndpoints(nil, v, nil)
Expect(err).Should(BeNil())
gatewayIP := selectorNodeIP(ctx, "", k8sClient)
urls := []string{
@@ -937,13 +937,13 @@ options: {
"https://ingress.domain.path/test",
"https://ingress.domain.path/test2",
fmt.Sprintf("http://%s:30229", gatewayIP),
"http://10.10.10.10:30080",
"http://text.example.com:30080",
"10.10.10.10:30081",
"text.example.com:30081",
"http://10.10.10.10",
"http://text.example.com",
"10.10.10.10:81",
"text.example.com:81",
fmt.Sprintf("http://%s:30002", gatewayIP),
"http://ingress.domain.helm",
"http://1.1.1.1:30011/seldon/default/sdep",
"http://1.1.1.1/seldon/default/sdep",
"http://gateway.domain",
"http://gateway.domain/api",
"https://demo.kubevela.net",

View File

@@ -879,8 +879,8 @@ func mergeCustomRules(ctx context.Context, k8sClient client.Client) error {
format string
err error
)
if item.Annotations != nil {
format = item.Annotations[oam.LabelResourceRuleFormat]
if item.Labels != nil {
format = item.Labels[oam.LabelResourceRuleFormat]
}
switch format {
case oam.ResourceTopologyFormatJSON:

View File

@@ -1448,6 +1448,26 @@ var _ = Describe("test merge globalRules", func() {
defaultLabelSelector: true
- apiVersion: apps/v1
kind: ControllerRevision
`
clickhouseJsonStr := `
[
{
"parentResourceType": {
"group": "clickhouse.altinity.com",
"kind": "ClickHouseInstallation"
},
"childrenResourceType": [
{
"apiVersion": "apps/v1",
"kind": "StatefulSet"
},
{
"apiVersion": "v1",
"kind": "Service"
}
]
}
]
`
daemonSetStr := `
- parentResourceType:
@@ -1481,20 +1501,29 @@ childrenResourceType:
It("test merge rules", func() {
Expect(k8sClient.Create(ctx, &v1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: "vela-system"}})).Should(SatisfyAny(BeNil(), util.AlreadyExistMatcher{}))
cloneSetConfigMap := v1.ConfigMap{TypeMeta: metav1.TypeMeta{APIVersion: "v1", Kind: "ConfigMap"},
ObjectMeta: metav1.ObjectMeta{Namespace: types3.DefaultKubeVelaNS, Name: "cloneset", Labels: map[string]string{oam.LabelResourceRules: "true"}},
Data: map[string]string{relationshipKey: cloneSetStr},
ObjectMeta: metav1.ObjectMeta{Namespace: types3.DefaultKubeVelaNS, Name: "cloneset", Labels: map[string]string{
oam.LabelResourceRules: "true",
oam.LabelResourceRuleFormat: oam.ResourceTopologyFormatYAML,
}},
Data: map[string]string{relationshipKey: cloneSetStr},
}
Expect(k8sClient.Create(ctx, &cloneSetConfigMap)).Should(BeNil())
daemonSetConfigMap := v1.ConfigMap{TypeMeta: metav1.TypeMeta{APIVersion: "v1", Kind: "ConfigMap"},
ObjectMeta: metav1.ObjectMeta{Namespace: types3.DefaultKubeVelaNS, Name: "daemonset", Labels: map[string]string{oam.LabelResourceRules: "true"}},
Data: map[string]string{relationshipKey: daemonSetStr},
ObjectMeta: metav1.ObjectMeta{Namespace: types3.DefaultKubeVelaNS, Name: "daemonset", Labels: map[string]string{
oam.LabelResourceRules: "true",
oam.LabelResourceRuleFormat: oam.ResourceTopologyFormatYAML,
}},
Data: map[string]string{relationshipKey: daemonSetStr},
}
Expect(k8sClient.Create(ctx, &daemonSetConfigMap)).Should(BeNil())
stsConfigMap := v1.ConfigMap{TypeMeta: metav1.TypeMeta{APIVersion: "v1", Kind: "ConfigMap"},
ObjectMeta: metav1.ObjectMeta{Namespace: types3.DefaultKubeVelaNS, Name: "sts", Labels: map[string]string{oam.LabelResourceRules: "true"}},
Data: map[string]string{relationshipKey: stsStr},
ObjectMeta: metav1.ObjectMeta{Namespace: types3.DefaultKubeVelaNS, Name: "sts", Labels: map[string]string{
oam.LabelResourceRules: "true",
oam.LabelResourceRuleFormat: oam.ResourceTopologyFormatYAML,
}},
Data: map[string]string{relationshipKey: stsStr},
}
Expect(k8sClient.Create(ctx, &stsConfigMap)).Should(BeNil())
@@ -1504,6 +1533,15 @@ childrenResourceType:
}
Expect(k8sClient.Create(ctx, &missConfigedCm)).Should(BeNil())
clickhouseJsonCm := v1.ConfigMap{TypeMeta: metav1.TypeMeta{APIVersion: "v1", Kind: "ConfigMap"},
ObjectMeta: metav1.ObjectMeta{Namespace: types3.DefaultKubeVelaNS, Name: "clickhouse", Labels: map[string]string{
oam.LabelResourceRules: "true",
oam.LabelResourceRuleFormat: oam.ResourceTopologyFormatJSON,
}},
Data: map[string]string{relationshipKey: clickhouseJsonStr},
}
Expect(k8sClient.Create(ctx, &clickhouseJsonCm)).Should(BeNil())
Expect(mergeCustomRules(ctx, k8sClient)).Should(BeNil())
childrenResources, ok := globalRule.GetRule(GroupResourceType{Group: "apps.kruise.io", Kind: "CloneSet"})
Expect(ok).Should(BeTrue())
@@ -1539,6 +1577,19 @@ childrenResourceType:
Expect(revisionCR).ShouldNot(BeNil())
Expect(revisionCR.listOptions).Should(BeNil())
chChildrenResources, ok := globalRule.GetRule(GroupResourceType{Group: "clickhouse.altinity.com", Kind: "ClickHouseInstallation"})
Expect(ok).Should(BeTrue())
Expect(chChildrenResources.DefaultGenListOptionFunc).Should(BeNil())
Expect(len(*chChildrenResources.SubResources)).Should(BeEquivalentTo(2))
chSts := chChildrenResources.SubResources.Get(ResourceType{APIVersion: "apps/v1", Kind: "StatefulSet"})
Expect(chSts).ShouldNot(BeNil())
Expect(chSts.listOptions).Should(BeNil())
chSvc := chChildrenResources.SubResources.Get(ResourceType{APIVersion: "v1", Kind: "Service"})
Expect(chSvc).ShouldNot(BeNil())
Expect(chSvc.listOptions).Should(BeNil())
// clear data
Expect(k8sClient.Delete(context.TODO(), &missConfigedCm)).Should(BeNil())
Expect(k8sClient.Delete(context.TODO(), &stsConfigMap)).Should(BeNil())

View File

@@ -92,6 +92,10 @@ type Endpoint struct {
// Default is 80.
Port int `json:"port"`
// +optional
// the name of the port
PortName string `json:"portName,omitempty"`
// the path for the endpoint
Path string `json:"path,omitempty"`

View File

@@ -31,19 +31,20 @@ import (
)
const (
addonRegistryType = "type"
addonEndpoint = "endpoint"
addonOssBucket = "bucket"
addonPath = "path"
addonGitToken = "gitToken"
addonOssType = "OSS"
addonGitType = "git"
addonGiteeType = "gitee"
addonGitlabType = "gitlab"
addonHelmType = "helm"
addonUsername = "username"
addonPassword = "password"
addonRepoName = "repoName"
addonRegistryType = "type"
addonEndpoint = "endpoint"
addonOssBucket = "bucket"
addonPath = "path"
addonGitToken = "gitToken"
addonOssType = "OSS"
addonGitType = "git"
addonGiteeType = "gitee"
addonGitlabType = "gitlab"
addonHelmType = "helm"
addonUsername = "username"
addonPassword = "password"
// only gitlab registry need set this flag
addonRepoName = "gitlabRepoName"
addonHelmInsecureSkipTLS = "insecureSkipTLS"
)
@@ -67,10 +68,12 @@ func NewAddonRegistryCommand(c common.Args, ioStreams cmdutil.IOStreams) *cobra.
// NewAddAddonRegistryCommand return an addon registry create command
func NewAddAddonRegistryCommand(c common.Args, ioStreams cmdutil.IOStreams) *cobra.Command {
cmd := &cobra.Command{
Use: "add",
Short: "Add an addon registry.",
Long: "Add an addon registry.",
Example: `"vela addon registry add <my-registry-name> --type OSS --endpoint=<URL> --bucket=<bukect-name> or vela addon registry add my-repo --type git --endpoint=<URL> --path=<OSS-ptah> --gitToken=<git token>"`,
Use: "add",
Short: "Add an addon registry.",
Long: "Add an addon registry.",
Example: `add a helm repo registry: vela addon registry add --type=helm my-repo --endpoint=<URL>
add a github registry: vela addon registry add my-repo --type git --endpoint=<URL> --path=<ptah> --token=<git token>"
add a gitlab registry: vela addon registry add my-repo --type gitlab --endpoint=<URL> --gitlabRepoName=<repoName> --path=<path> --token=<git token>`,
RunE: func(cmd *cobra.Command, args []string) error {
registry, err := getRegistryFromArgs(cmd, args)
if err != nil {
@@ -298,6 +301,7 @@ func parseArgsFromFlag(cmd *cobra.Command) {
cmd.Flags().StringP(addonGitToken, "", "", "specify the github repo token")
cmd.Flags().StringP(addonUsername, "", "", "specify the Helm addon registry username")
cmd.Flags().StringP(addonPassword, "", "", "specify the Helm addon registry password")
cmd.Flags().StringP(addonRepoName, "", "", "specify the gitlab addon registry repoName")
cmd.Flags().BoolP(addonHelmInsecureSkipTLS, "", false,
"specify the Helm addon registry skip tls verify")
}

View File

@@ -169,7 +169,7 @@ var _ = Describe("Addon status or info", func() {
Expect(ds.DeleteRegistry(context.Background(), "KubeVela")).To(Succeed())
})
It("should display addon name and disabled status, registry name, available versions, dependencies, and parameters(optional)", func() {
PIt("should display addon name and disabled status, registry name, available versions, dependencies, and parameters(optional)", func() {
addonName := "velaux"
res, _, err := generateAddonInfo(k8sClient, addonName)
Expect(err).Should(BeNil())

View File

@@ -436,10 +436,10 @@ var _ = Describe("Test velaQL", func() {
"https://ingress.domain.path/test",
"https://ingress.domain.path/test2",
fmt.Sprintf("http://%s:30229", gatewayIP),
"http://10.10.10.10:30180",
"http://text.example.com:30180",
"10.10.10.10:30181",
"text.example.com:30181",
"http://10.10.10.10",
"http://text.example.com",
"10.10.10.10:81",
"text.example.com:81",
// helmRelease
fmt.Sprintf("http://%s:30002", gatewayIP),
"http://ingress.domain.helm",

View File

@@ -20,7 +20,6 @@ import (
"context"
"encoding/json"
"fmt"
"io/ioutil"
"os"
"strings"
"time"
@@ -42,6 +41,7 @@ import (
"github.com/oam-dev/kubevela/apis/core.oam.dev/common"
"github.com/oam-dev/kubevela/apis/core.oam.dev/v1alpha1"
"github.com/oam-dev/kubevela/apis/core.oam.dev/v1beta1"
kubevelatypes "github.com/oam-dev/kubevela/apis/types"
"github.com/oam-dev/kubevela/pkg/multicluster"
"github.com/oam-dev/kubevela/pkg/oam"
"github.com/oam-dev/kubevela/pkg/utils"
@@ -124,7 +124,7 @@ var _ = Describe("Test multicluster scenario", func() {
_, err := execCommand("cluster", "join", "/tmp/worker.kubeconfig", "--name", testClusterName)
Expect(err).Should(Succeed())
app := &v1beta1.Application{}
bs, err := ioutil.ReadFile("./testdata/app/example-lite-envbinding-app.yaml")
bs, err := os.ReadFile("./testdata/app/example-lite-envbinding-app.yaml")
Expect(err).Should(Succeed())
appYaml := strings.ReplaceAll(string(bs), "TEST_CLUSTER_NAME", testClusterName)
Expect(yaml.Unmarshal([]byte(appYaml), app)).Should(Succeed())
@@ -230,7 +230,7 @@ var _ = Describe("Test multicluster scenario", func() {
// 4. Component selector.
By("apply application")
app := &v1beta1.Application{}
bs, err := ioutil.ReadFile("./testdata/app/example-envbinding-app.yaml")
bs, err := os.ReadFile("./testdata/app/example-envbinding-app.yaml")
Expect(err).Should(Succeed())
appYaml := strings.ReplaceAll(strings.ReplaceAll(string(bs), "TEST_NAMESPACE", testNamespace), "PROD_NAMESPACE", prodNamespace)
Expect(yaml.Unmarshal([]byte(appYaml), app)).Should(Succeed())
@@ -277,7 +277,7 @@ var _ = Describe("Test multicluster scenario", func() {
// 5. add env
By("apply application")
app := &v1beta1.Application{}
bs, err := ioutil.ReadFile("./testdata/app/example-envbinding-app-wo-workflow.yaml")
bs, err := os.ReadFile("./testdata/app/example-envbinding-app-wo-workflow.yaml")
Expect(err).Should(Succeed())
appYaml := strings.ReplaceAll(string(bs), "TEST_NAMESPACE", testNamespace)
Expect(yaml.Unmarshal([]byte(appYaml), app)).Should(Succeed())
@@ -364,7 +364,7 @@ var _ = Describe("Test multicluster scenario", func() {
It("Test helm addon relied feature", func() {
By("apply application")
app := &v1beta1.Application{}
bs, err := ioutil.ReadFile("./testdata/app/app-apply-in-order.yaml")
bs, err := os.ReadFile("./testdata/app/app-apply-in-order.yaml")
Expect(err).Should(Succeed())
Expect(yaml.Unmarshal([]byte(bs), app)).Should(Succeed())
app.SetNamespace(testNamespace)
@@ -568,7 +568,7 @@ var _ = Describe("Test multicluster scenario", func() {
})
It("Test applications with bad resource", func() {
bs, err := ioutil.ReadFile("./testdata/app/app-bad-resource.yaml")
bs, err := os.ReadFile("./testdata/app/app-bad-resource.yaml")
Expect(err).Should(Succeed())
appYaml := strings.ReplaceAll(string(bs), "TEST_NAMESPACE", testNamespace)
app := &v1beta1.Application{}
@@ -588,7 +588,7 @@ var _ = Describe("Test multicluster scenario", func() {
})
It("Test applications with env and storage trait", func() {
bs, err := ioutil.ReadFile("./testdata/app/app-with-env-and-storage.yaml")
bs, err := os.ReadFile("./testdata/app/app-with-env-and-storage.yaml")
Expect(err).Should(Succeed())
appYaml := strings.ReplaceAll(string(bs), "TEST_NAMESPACE", testNamespace)
app := &v1beta1.Application{}
@@ -601,7 +601,7 @@ var _ = Describe("Test multicluster scenario", func() {
})
It("Test applications with gc policy change (onAppUpdate -> never)", func() {
bs, err := ioutil.ReadFile("./testdata/app/app-gc-policy-change.yaml")
bs, err := os.ReadFile("./testdata/app/app-gc-policy-change.yaml")
Expect(err).Should(Succeed())
appYaml := strings.ReplaceAll(string(bs), "TEST_NAMESPACE", testNamespace)
app := &v1beta1.Application{}
@@ -657,7 +657,7 @@ var _ = Describe("Test multicluster scenario", func() {
})
It("Test Application with env in webservice and labels & storage trait", func() {
bs, err := ioutil.ReadFile("./testdata/app/app-with-env-labels-storage.yaml")
bs, err := os.ReadFile("./testdata/app/app-with-env-labels-storage.yaml")
Expect(err).Should(Succeed())
app := &v1beta1.Application{}
Expect(yaml.Unmarshal(bs, app)).Should(Succeed())
@@ -673,5 +673,119 @@ var _ = Describe("Test multicluster scenario", func() {
Expect(deploy.Spec.Template.Spec.Containers[0].Env[0].Value).Should(Equal("testValue"))
Expect(len(deploy.Spec.Template.Spec.Volumes)).Should(Equal(1))
})
It("Test application with workflow change will rerun", func() {
By("create application")
bs, err := os.ReadFile("./testdata/app/app-lite-with-workflow.yaml")
Expect(err).Should(Succeed())
app := &v1beta1.Application{}
Expect(yaml.Unmarshal(bs, app)).Should(Succeed())
app.SetNamespace(testNamespace)
Expect(k8sClient.Create(hubCtx, app)).Should(Succeed())
Eventually(func(g Gomega) {
g.Expect(k8sClient.Get(hubCtx, client.ObjectKeyFromObject(app), app)).Should(Succeed())
g.Expect(app.Status.Phase).Should(Equal(common.ApplicationRunning))
}, 20*time.Second).Should(Succeed())
Expect(k8sClient.Get(hubCtx, client.ObjectKey{Namespace: testNamespace, Name: "data-worker"}, &appsv1.Deployment{})).Should(Succeed())
Eventually(func(g Gomega) {
g.Expect(k8sClient.Get(hubCtx, client.ObjectKeyFromObject(app), app)).Should(Succeed())
app.Spec.Workflow.Steps[0].Properties = &runtime.RawExtension{Raw: []byte(`{"policies":["worker"]}`)}
g.Expect(k8sClient.Update(hubCtx, app)).Should(Succeed())
}, 10*time.Second).Should(Succeed())
Eventually(func(g Gomega) {
g.Expect(k8sClient.Get(hubCtx, client.ObjectKey{Namespace: testNamespace, Name: "data-worker"}, &appsv1.Deployment{})).Should(Satisfy(kerrors.IsNotFound))
g.Expect(k8sClient.Get(workerCtx, client.ObjectKey{Namespace: testNamespace, Name: "data-worker"}, &appsv1.Deployment{})).Should(Succeed())
}, 20*time.Second).Should(Succeed())
})
It("Test application with failed gc and restart workflow", func() {
By("duplicate cluster")
secret := &corev1.Secret{}
const secretName = "disconnection-test"
Expect(k8sClient.Get(hubCtx, types.NamespacedName{Namespace: kubevelatypes.DefaultKubeVelaNS, Name: WorkerClusterName}, secret)).Should(Succeed())
secret.SetName(secretName)
secret.SetResourceVersion("")
Expect(k8sClient.Create(hubCtx, secret)).Should(Succeed())
defer func() {
_ = k8sClient.Delete(hubCtx, secret)
}()
By("create cluster normally")
bs, err := os.ReadFile("./testdata/app/app-disconnection-test.yaml")
Expect(err).Should(Succeed())
app := &v1beta1.Application{}
Expect(yaml.Unmarshal(bs, app)).Should(Succeed())
app.SetNamespace(namespace)
Expect(k8sClient.Create(hubCtx, app)).Should(Succeed())
key := client.ObjectKeyFromObject(app)
Eventually(func(g Gomega) {
g.Expect(k8sClient.Get(hubCtx, key, app)).Should(Succeed())
g.Expect(app.Status.Phase).Should(Equal(common.ApplicationRunning))
}).WithTimeout(30 * time.Second).WithPolling(2 * time.Second).Should(Succeed())
By("disconnect cluster")
Expect(k8sClient.Get(hubCtx, types.NamespacedName{Namespace: kubevelatypes.DefaultKubeVelaNS, Name: secretName}, secret)).Should(Succeed())
secret.Data["tls.crt"] = []byte("-")
Expect(k8sClient.Update(hubCtx, secret)).Should(Succeed())
By("update application")
Expect(k8sClient.Get(hubCtx, key, app)).Should(Succeed())
app.Spec.Policies = nil
Expect(k8sClient.Update(hubCtx, app)).Should(Succeed())
Eventually(func(g Gomega) {
g.Expect(k8sClient.Get(hubCtx, key, app)).Should(Succeed())
g.Expect(app.Status.ObservedGeneration).Should(Equal(app.Generation))
g.Expect(app.Status.Phase).Should(Equal(common.ApplicationRunning))
rts := &v1beta1.ResourceTrackerList{}
g.Expect(k8sClient.List(hubCtx, rts, client.MatchingLabels{oam.LabelAppName: key.Name, oam.LabelAppNamespace: key.Namespace})).Should(Succeed())
cnt := 0
for _, item := range rts.Items {
if item.Spec.Type == v1beta1.ResourceTrackerTypeVersioned {
cnt++
}
}
g.Expect(cnt).Should(Equal(2))
}).WithTimeout(30 * time.Second).WithPolling(2 * time.Second).Should(Succeed())
By("try update application again")
Expect(k8sClient.Get(hubCtx, key, app)).Should(Succeed())
if app.Annotations == nil {
app.Annotations = map[string]string{}
}
app.Annotations[oam.AnnotationPublishVersion] = "test"
Expect(k8sClient.Update(hubCtx, app)).Should(Succeed())
Eventually(func(g Gomega) {
g.Expect(k8sClient.Get(hubCtx, key, app)).Should(Succeed())
g.Expect(app.Status.LatestRevision).ShouldNot(BeNil())
g.Expect(app.Status.LatestRevision.Revision).Should(Equal(int64(3)))
g.Expect(app.Status.ObservedGeneration).Should(Equal(app.Generation))
g.Expect(app.Status.Phase).Should(Equal(common.ApplicationRunning))
}).WithTimeout(1 * time.Minute).WithPolling(2 * time.Second).Should(Succeed())
By("clear disconnection cluster secret")
Expect(k8sClient.Get(hubCtx, types.NamespacedName{Namespace: kubevelatypes.DefaultKubeVelaNS, Name: secretName}, secret)).Should(Succeed())
Expect(k8sClient.Delete(hubCtx, secret)).Should(Succeed())
By("update application again")
Eventually(func(g Gomega) {
g.Expect(k8sClient.Get(hubCtx, key, app)).Should(Succeed())
app.Annotations[oam.AnnotationPublishVersion] = "test2"
g.Expect(k8sClient.Update(hubCtx, app)).Should(Succeed())
}).WithTimeout(10 * time.Second).WithPolling(2 * time.Second).Should(Succeed())
By("wait gc application completed")
Eventually(func(g Gomega) {
rts := &v1beta1.ResourceTrackerList{}
g.Expect(k8sClient.List(hubCtx, rts, client.MatchingLabels{oam.LabelAppName: key.Name, oam.LabelAppNamespace: key.Namespace})).Should(Succeed())
cnt := 0
for _, item := range rts.Items {
if item.Spec.Type == v1beta1.ResourceTrackerTypeVersioned {
cnt++
}
}
g.Expect(cnt).Should(Equal(1))
}).WithTimeout(3 * time.Minute).WithPolling(10 * time.Second).Should(Succeed())
})
})
})

View File

@@ -0,0 +1,17 @@
apiVersion: core.oam.dev/v1beta1
kind: Application
metadata:
name: app-disconnection-test
spec:
components:
- type: k8s-objects
name: app-dis-cm
properties:
objects:
- apiVersion: v1
kind: ConfigMap
policies:
- type: topology
name: disconnection-test
properties:
clusters: ["disconnection-test"]

View File

@@ -0,0 +1,28 @@
apiVersion: core.oam.dev/v1beta1
kind: Application
metadata:
name: example-lite-app
spec:
components:
- name: data-worker
type: worker
properties:
image: busybox
cmd:
- sleep
- '1000000'
policies:
- name: local
type: topology
properties:
clusters: ["local"]
- name: worker
type: topology
properties:
clusters: ["cluster-worker"]
workflow:
steps:
- name: deploy
type: deploy
properties:
policies: ["local"]

View File

@@ -19,7 +19,7 @@ gateway: {
message: "Visiting URL: " + context.outputs.ingress.spec.rules[0].host + ", IP: " + igs[0].ip
}
if igs[0].host == _|_ {
message: "Host not specified, visit the cluster or load balancer in front of the cluster"
message: "Host not specified, visit the cluster or load balancer in front of the cluster with IP: " + igs[0].ip
}
}
if igs[0].ip == _|_ {

View File

@@ -19,8 +19,9 @@ template: {
}]
}]
initContainers: [{
name: parameter.name
image: parameter.image
name: parameter.name
image: parameter.image
imagePullPolicy: parameter.imagePullPolicy
if parameter.cmd != _|_ {
command: parameter.cmd
}
@@ -50,6 +51,9 @@ template: {
// +usage=Specify the image of init container
image: string
// +usage=Specify image pull policy for your service
imagePullPolicy: *"IfNotPresent" | "Always" | "Never"
// +usage=Specify the commands run in the init container
cmd?: [...string]