Compare commits

..

25 Commits

Author SHA1 Message Date
Tianxin Dong
8f35596872 Fix: fix panic if trait tries to patch an invalid workload like terraform (#5329)
Signed-off-by: FogDong <dongtianxin.tx@alibaba-inc.com>

Signed-off-by: FogDong <dongtianxin.tx@alibaba-inc.com>
2023-01-19 14:40:49 +08:00
github-actions[bot]
18c2fa15a2 fix: fix --cluster when addon enable (#5339)
Signed-off-by: zhaowei.wang <zhaowei.wang@metabit-trading.com>
(cherry picked from commit 021ca69cfd)

Co-authored-by: zhaowei.wang <zhaowei.wang@metabit-trading.com>
2023-01-13 17:06:50 +08:00
Somefive
18d93039c9 [Backport release-1.4] Fix: gc failure cause workflow restart not working properly (#5241) (#5243)
* Fix: gc failure cause workflow restart not working properly

Signed-off-by: Somefive <yd219913@alibaba-inc.com>

* Feat: switch ci machine

Signed-off-by: Somefive <yd219913@alibaba-inc.com>

* Fix: enhance test

Signed-off-by: Somefive <yd219913@alibaba-inc.com>

Signed-off-by: Somefive <yd219913@alibaba-inc.com>

Signed-off-by: Somefive <yd219913@alibaba-inc.com>
2023-01-03 20:01:29 +08:00
github-actions[bot]
8ffd80e4a7 Fix:Dry-run from revision application,Problems caused by resource version lower than the current version (#5249)
Signed-off-by: old.prince <di7zhang@gmail.com>
(cherry picked from commit d5fcb04147)

Co-authored-by: oldprince <di7zhang@gmail.com>
2023-01-03 11:36:57 +08:00
github-actions[bot]
735075f5a6 [Backport release-1.5] Fix: forbid 302 request to avoid SSRF (#5003)
* fix helm chart list endpoint SSRF CVE

Signed-off-by: 楚岳 <wangyike.wyk@alibaba-inc.com>
(cherry picked from commit 8883a6219d)

* revert error log

Signed-off-by: 楚岳 <wangyike.wyk@alibaba-inc.com>
(cherry picked from commit e1e6972b17)

* change with const value

Signed-off-by: 楚岳 <wangyike.wyk@alibaba-inc.com>

fix ci

Signed-off-by: 楚岳 <wangyike.wyk@alibaba-inc.com>
(cherry picked from commit fbeacb0a6b)

Co-authored-by: 楚岳 <wangyike.wyk@alibaba-inc.com>
2022-11-04 20:16:53 +08:00
github-actions[bot]
52d1a4364b fix gitlab addon registry (#4938)
Signed-off-by: 楚岳 <wangyike.wyk@alibaba-inc.com>
(cherry picked from commit f3ee964734)

Co-authored-by: 楚岳 <wangyike.wyk@alibaba-inc.com>
2022-10-27 22:19:34 +08:00
github-actions[bot]
1b7f9aae65 [Backport release-1.5] Chore: wrong endpoint for LoadBalancer type service(revert #4729) (#4906)
* Chore: wrong endpoint for LoadBalancer type service(revert #4729)

Signed-off-by: barnettZQG <barnett.zqg@gmail.com>
(cherry picked from commit 92ed75c863)

* Fix: change the unit test

Signed-off-by: barnettZQG <barnett.zqg@gmail.com>
(cherry picked from commit 7985353e1d)

Co-authored-by: barnettZQG <barnett.zqg@gmail.com>
2022-10-22 13:27:07 +08:00
github-actions[bot]
e94519b788 Chore: change the package name of the readme-generator-for-helm (#4896)
Signed-off-by: barnettZQG <barnett.zqg@gmail.com>
(cherry picked from commit 8b46e6076a)

Co-authored-by: barnettZQG <barnett.zqg@gmail.com>
2022-10-20 16:37:25 +08:00
Somefive
18d755ed72 Fix: #4865 (#4867)
Signed-off-by: Somefive <yd219913@alibaba-inc.com>

Signed-off-by: Somefive <yd219913@alibaba-inc.com>
2022-10-15 13:18:43 +08:00
Somefive
c15d0e4e0f Fix: prevent rerun application while upgrading due to old apprev lack app workflow (#4865)
Signed-off-by: Somefive <yd219913@alibaba-inc.com>

Signed-off-by: Somefive <yd219913@alibaba-inc.com>
2022-10-14 22:16:51 +08:00
github-actions[bot]
82ce9daf38 Fix: support default value of ui schema (#4859)
Signed-off-by: Jianbo Sun <jianbo.sjb@alibaba-inc.com>
(cherry picked from commit e584a35c83)

Co-authored-by: Jianbo Sun <jianbo.sjb@alibaba-inc.com>
2022-10-13 14:45:47 +08:00
github-actions[bot]
6eef47c5e0 [Backport release-1.5] Fix: change LabelResourceRuleFormat from Annotations to Labels (#4855)
* fix: change Annotations to Labels

Signed-off-by: chengleqi <chengleqi5g@hotmail.com>
(cherry picked from commit 73696d8c04)

* fix: add FormatYAML label for test case

Signed-off-by: chengleqi <chengleqi5g@hotmail.com>
(cherry picked from commit 32b6ba2c41)

* add json format rules test case

Signed-off-by: chengleqi <chengleqi5g@hotmail.com>
(cherry picked from commit cf207da2b9)

Co-authored-by: chengleqi <chengleqi5g@hotmail.com>
2022-10-12 15:28:34 +08:00
Jianbo Sun
7728de933b Fix: align sha256sums file with binary name (#4835)
Signed-off-by: Jianbo Sun <jianbo.sjb@alibaba-inc.com>

Signed-off-by: Jianbo Sun <jianbo.sjb@alibaba-inc.com>
2022-10-09 10:41:42 +08:00
github-actions[bot]
5ceb193ebe Chore: add IP for gateway trait (#4834)
Signed-off-by: Jianbo Sun <jianbo.sjb@alibaba-inc.com>
(cherry picked from commit 93b85480a6)

Co-authored-by: Jianbo Sun <jianbo.sjb@alibaba-inc.com>
2022-10-09 10:04:57 +08:00
github-actions[bot]
186ab37547 fix bugs and add tests (#4832)
Signed-off-by: 楚岳 <wangyike.wyk@alibaba-inc.com>
(cherry picked from commit e6c9e3887f)

Co-authored-by: 楚岳 <wangyike.wyk@alibaba-inc.com>
2022-10-08 17:28:47 +08:00
Somefive
5a98541e8e Feat: workflow will rerun when application workflow changes (#4813)
Signed-off-by: Somefive <yd219913@alibaba-inc.com>

Signed-off-by: Somefive <yd219913@alibaba-inc.com>
2022-09-30 21:22:59 +08:00
github-actions[bot]
d599e63ec9 Feat: add image-pull-policy to init-container (#4812)
Signed-off-by: Somefive <yd219913@alibaba-inc.com>
(cherry picked from commit a9ba8bb502)

Co-authored-by: Somefive <yd219913@alibaba-inc.com>
2022-09-29 22:43:00 +08:00
Somefive
055a50fad8 Feat: add port name to CollectServiceEndpoints (#4805)
Signed-off-by: Somefive <yd219913@alibaba-inc.com>

Signed-off-by: Somefive <yd219913@alibaba-inc.com>
2022-09-29 13:58:35 +08:00
github-actions[bot]
7921475af2 Fix: add path clean for request (#4800)
Signed-off-by: Somefive <yd219913@alibaba-inc.com>
(cherry picked from commit 9d4630ea32)

Co-authored-by: Somefive <yd219913@alibaba-inc.com>
2022-09-28 14:23:44 +08:00
github-actions[bot]
caf09b81b0 Fix: it does not check the same name for 'vela env init' (#4798)
Signed-off-by: wuzhongjian <wuzhongjian_yewu@cmss.chinamobile.com>
(cherry picked from commit a38e6d401b)

Co-authored-by: wuzhongjian <wuzhongjian_yewu@cmss.chinamobile.com>
2022-09-28 10:59:26 +08:00
github-actions[bot]
4c525f8e5d [Backport release-1.5] Fix: allow to read definition from user's namespace when force delete (#4789)
* Fix: allow to read definition from user's namespace when force deleting app with configuration

Signed-off-by: Qiaozp <qiaozhongpei.qzp@alibaba-inc.com>
(cherry picked from commit 2f08c36132)

* Fix test

Signed-off-by: Qiaozp <qiaozhongpei.qzp@alibaba-inc.com>
(cherry picked from commit 981950a14d)

* Fix wrong test

Signed-off-by: Qiaozp <qiaozhongpei.qzp@alibaba-inc.com>
(cherry picked from commit 62863f1007)

Co-authored-by: Qiaozp <qiaozhongpei.qzp@alibaba-inc.com>
2022-09-27 11:58:44 +08:00
github-actions[bot]
bdf71bb290 [Backport release-1.5] Fix: memory leak of the apiserver (#4777)
* Fix: memory leak of the apiserver

Signed-off-by: barnettZQG <barnett.zqg@gmail.com>
(cherry picked from commit 0a8a70730f)

* Fix: listen to the context done event

Signed-off-by: barnettZQG <barnett.zqg@gmail.com>
(cherry picked from commit dfb81224cb)

* Fix: remove the shutdown code

Signed-off-by: barnettZQG <barnett.zqg@gmail.com>
(cherry picked from commit a331b2c54a)

Co-authored-by: barnettZQG <barnett.zqg@gmail.com>
2022-09-23 17:14:10 +08:00
Somefive
5873ba4c47 [Backport 1.5] Fix: gc legacy rt with regularization (#4768)
* Fix: gc legacy rt with regularization

Signed-off-by: Somefive <yd219913@alibaba-inc.com>

* Test: add test

Signed-off-by: Somefive <yd219913@alibaba-inc.com>

Signed-off-by: Somefive <yd219913@alibaba-inc.com>
2022-09-23 16:42:47 +08:00
github-actions[bot]
9ded3c9d3e Update definition.go (#4767)
fix bug, use labels to replace annotation

(cherry picked from commit 8f395d843c)

Co-authored-by: Hair1ossTeenager <45008570+Hair1ossTeenager@users.noreply.github.com>
2022-09-21 10:29:37 +08:00
github-actions[bot]
56c2827669 Fix: auth lack perm for rollout (#4764)
Signed-off-by: Somefive <yd219913@alibaba-inc.com>
(cherry picked from commit b538850eec)

Co-authored-by: Somefive <yd219913@alibaba-inc.com>
2022-09-20 20:35:20 +08:00
60 changed files with 778 additions and 166 deletions

View File

@@ -104,7 +104,7 @@ jobs:
name: codecov-umbrella
apiserver-e2e-tests:
runs-on: aliyun
runs-on: aliyun-legacy
needs: [ detect-noop,set-k8s-matrix ]
if: needs.detect-noop.outputs.noop != 'true'
strategy:

View File

@@ -52,7 +52,7 @@ jobs:
e2e-multi-cluster-tests:
runs-on: aliyun
runs-on: aliyun-legacy
needs: [ detect-noop,set-k8s-matrix ]
if: needs.detect-noop.outputs.noop != 'true'
strategy:

View File

@@ -51,7 +51,7 @@ jobs:
fi
e2e-rollout-tests:
runs-on: aliyun
runs-on: aliyun-legacy
needs: [ detect-noop,set-k8s-matrix ]
if: needs.detect-noop.outputs.noop != 'true'
strategy:

View File

@@ -51,7 +51,7 @@ jobs:
fi
e2e-tests:
runs-on: aliyun
runs-on: aliyun-legacy
needs: [ detect-noop,set-k8s-matrix ]
if: needs.detect-noop.outputs.noop != 'true'
strategy:

View File

@@ -97,7 +97,7 @@ jobs:
version: ${{ env.GOLANGCI_VERSION }}
check-diff:
runs-on: aliyun
runs-on: aliyun-legacy
needs: detect-noop
if: needs.detect-noop.outputs.noop != 'true'

View File

@@ -150,11 +150,15 @@ jobs:
- name: Display structure of downloaded files
run: ls -R
working-directory: cli-artifacts
- name: Get version
run: echo "VELA_VERSION=${GITHUB_REF#refs/tags/}" >> $GITHUB_ENV
- shell: bash
working-directory: cli-artifacts
run: |
for file in *
do
sed -i "s/\/vela/-${{ env.VELA_VERSION }}/g" ${file}
sed -i "s/\/kubectl-vela/-${{ env.VELA_VERSION }}/g" ${file}
cat ${file} >> sha256sums.txt
done
- name: Upload Checksums

View File

@@ -4,7 +4,7 @@ on:
- cron: '* * * * *'
jobs:
clean-image:
runs-on: aliyun
runs-on: aliyun-legacy
steps:
- name: Cleanup image
run: docker image prune -f

View File

@@ -104,7 +104,7 @@ spec:
message: "Visiting URL: " + context.outputs.ingress.spec.rules[0].host + ", IP: " + igs[0].ip
}
if igs[0].host == _|_ {
message: "Host not specified, visit the cluster or load balancer in front of the cluster"
message: "Host not specified, visit the cluster or load balancer in front of the cluster with IP: " + igs[0].ip
}
}
if igs[0].ip == _|_ {

View File

@@ -28,8 +28,9 @@ spec:
}]
}]
initContainers: [{
name: parameter.name
image: parameter.image
name: parameter.name
image: parameter.image
imagePullPolicy: parameter.imagePullPolicy
if parameter.cmd != _|_ {
command: parameter.cmd
}
@@ -59,6 +60,9 @@ spec:
// +usage=Specify the image of init container
image: string
// +usage=Specify image pull policy for your service
imagePullPolicy: *"IfNotPresent" | "Always" | "Never"
// +usage=Specify the commands run in the init container
cmd?: [...string]

View File

@@ -33,7 +33,7 @@ kind: ClusterRole
metadata:
name: {{ include "kubevela.fullname" . }}:manager
rules:
- apiGroups: ["core.oam.dev", "terraform.core.oam.dev", "prism.oam.dev"]
- apiGroups: ["core.oam.dev", "terraform.core.oam.dev", "prism.oam.dev", "standard.oam.dev"]
resources: ["*"]
verbs: ["*"]
- apiGroups: ["cluster.open-cluster-management.io"]

View File

@@ -104,7 +104,7 @@ spec:
message: "Visiting URL: " + context.outputs.ingress.spec.rules[0].host + ", IP: " + igs[0].ip
}
if igs[0].host == _|_ {
message: "Host not specified, visit the cluster or load balancer in front of the cluster"
message: "Host not specified, visit the cluster or load balancer in front of the cluster with IP: " + igs[0].ip
}
}
if igs[0].ip == _|_ {

View File

@@ -28,8 +28,9 @@ spec:
}]
}]
initContainers: [{
name: parameter.name
image: parameter.image
name: parameter.name
image: parameter.image
imagePullPolicy: parameter.imagePullPolicy
if parameter.cmd != _|_ {
command: parameter.cmd
}
@@ -59,6 +60,9 @@ spec:
// +usage=Specify the image of init container
image: string
// +usage=Specify image pull policy for your service
imagePullPolicy: *"IfNotPresent" | "Always" | "Never"
// +usage=Specify the commands run in the init container
cmd?: [...string]

View File

@@ -36,7 +36,7 @@ kind: ClusterRole
metadata:
name: {{ include "kubevela.fullname" . }}:manager
rules:
- apiGroups: ["core.oam.dev", "terraform.core.oam.dev", "prism.oam.dev"]
- apiGroups: ["core.oam.dev", "terraform.core.oam.dev", "prism.oam.dev", "standard.oam.dev"]
resources: ["*"]
verbs: ["*"]
- apiGroups: ["cluster.open-cluster-management.io"]

View File

@@ -35,6 +35,7 @@ import (
"github.com/oam-dev/kubevela/pkg/apiserver/config"
"github.com/oam-dev/kubevela/pkg/apiserver/utils/log"
"github.com/oam-dev/kubevela/pkg/features"
"github.com/oam-dev/kubevela/pkg/utils"
"github.com/oam-dev/kubevela/version"
)
@@ -50,6 +51,7 @@ func main() {
flag.DurationVar(&s.serverConfig.LeaderConfig.Duration, "duration", time.Second*5, "the lease lock resource name")
flag.DurationVar(&s.serverConfig.AddonCacheTime, "addon-cache-duration", time.Minute*10, "how long between two addon cache operation")
flag.BoolVar(&s.serverConfig.DisableStatisticCronJob, "disable-statistic-cronJob", false, "close the system statistic info calculating cronJob")
flag.StringVar(&s.serverConfig.PprofAddr, "pprof-addr", "", "The address for pprof to use while exporting profiling results. The default value is empty which means do not expose it. Set it to address like :6666 to expose it.")
flag.Float64Var(&s.serverConfig.KubeQPS, "kube-api-qps", 100, "the qps for kube clients. Low qps may lead to low throughput. High qps may give stress to api-server.")
flag.IntVar(&s.serverConfig.KubeBurst, "kube-api-burst", 300, "the burst for kube clients. Recommend setting it qps*3.")
features.APIServerMutableFeatureGate.AddFlag(flag.CommandLine)
@@ -90,6 +92,11 @@ func main() {
errChan := make(chan error)
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
if s.serverConfig.PprofAddr != "" {
go utils.EnablePprof(s.serverConfig.PprofAddr, errChan)
}
go func() {
if err := s.run(ctx, errChan); err != nil {
errChan <- fmt.Errorf("failed to run apiserver: %w", err)

View File

@@ -22,8 +22,6 @@ import (
goflag "flag"
"fmt"
"io"
"net/http"
"net/http/pprof"
"os"
"path/filepath"
"strconv"
@@ -53,6 +51,7 @@ import (
"github.com/oam-dev/kubevela/pkg/oam"
"github.com/oam-dev/kubevela/pkg/oam/discoverymapper"
"github.com/oam-dev/kubevela/pkg/resourcekeeper"
pkgutils "github.com/oam-dev/kubevela/pkg/utils"
"github.com/oam-dev/kubevela/pkg/utils/common"
"github.com/oam-dev/kubevela/pkg/utils/system"
"github.com/oam-dev/kubevela/pkg/utils/util"
@@ -159,36 +158,7 @@ func main() {
if pprofAddr != "" {
// Start pprof server if enabled
mux := http.NewServeMux()
mux.HandleFunc("/debug/pprof/", pprof.Index)
mux.HandleFunc("/debug/pprof/cmdline", pprof.Cmdline)
mux.HandleFunc("/debug/pprof/profile", pprof.Profile)
mux.HandleFunc("/debug/pprof/symbol", pprof.Symbol)
mux.HandleFunc("/debug/pprof/trace", pprof.Trace)
pprofServer := http.Server{
Addr: pprofAddr,
Handler: mux,
}
klog.InfoS("Starting debug HTTP server", "addr", pprofServer.Addr)
go func() {
go func() {
ctx := context.Background()
<-ctx.Done()
ctx, cancelFunc := context.WithTimeout(context.Background(), 60*time.Minute)
defer cancelFunc()
if err := pprofServer.Shutdown(ctx); err != nil {
klog.Error(err, "Failed to shutdown debug HTTP server")
}
}()
if err := pprofServer.ListenAndServe(); !errors.Is(http.ErrServerClosed, err) {
klog.Error(err, "Failed to start debug HTTP server")
panic(err)
}
}()
go pkgutils.EnablePprof(pprofAddr, nil)
}
if logFilePath != "" {

View File

@@ -80,7 +80,7 @@ ifeq (, $(shell which readme-generator))
@{ \
set -e ;\
echo 'installing readme-generator-for-helm' ;\
npm install -g readme-generator-for-helm ;\
npm install -g @bitnami/readme-generator-for-helm ;\
}
else
@$(OK) readme-generator-for-helm is already installed

View File

@@ -86,7 +86,6 @@ var _ = Describe("test FindWholeAddonPackagesFromRegistry", func() {
Expect(res).To(HaveLen(1))
Expect(res[0].Name).To(Equal("velaux"))
Expect(res[0].InstallPackage).ToNot(BeNil())
Expect(res[0].APISchema).ToNot(BeNil())
})
It("should return one valid result, matching one registry", func() {
res, err := FindWholeAddonPackagesFromRegistry(context.Background(), k8sClient, []string{"velaux"}, []string{"KubeVela"})
@@ -94,7 +93,6 @@ var _ = Describe("test FindWholeAddonPackagesFromRegistry", func() {
Expect(res).To(HaveLen(1))
Expect(res[0].Name).To(Equal("velaux"))
Expect(res[0].InstallPackage).ToNot(BeNil())
Expect(res[0].APISchema).ToNot(BeNil())
})
})
@@ -113,10 +111,8 @@ var _ = Describe("test FindWholeAddonPackagesFromRegistry", func() {
Expect(res).To(HaveLen(2))
Expect(res[0].Name).To(Equal("velaux"))
Expect(res[0].InstallPackage).ToNot(BeNil())
Expect(res[0].APISchema).ToNot(BeNil())
Expect(res[1].Name).To(Equal("traefik"))
Expect(res[1].InstallPackage).ToNot(BeNil())
Expect(res[1].APISchema).ToNot(BeNil())
})
})
@@ -127,7 +123,6 @@ var _ = Describe("test FindWholeAddonPackagesFromRegistry", func() {
Expect(res).To(HaveLen(1))
Expect(res[0].Name).To(Equal("velaux"))
Expect(res[0].InstallPackage).ToNot(BeNil())
Expect(res[0].APISchema).ToNot(BeNil())
})
})
})

View File

@@ -48,6 +48,7 @@ const (
addonAllClusterPolicy = "deploy-addon-to-all-clusters"
renderOutputCuePath = "output"
renderAuxiliaryOutputsPath = "outputs"
defaultCuePackageHeader = "main"
)
type addonCueTemplateRender struct {
@@ -334,6 +335,13 @@ func renderResources(addon *InstallPackage, args map[string]interface{}) ([]comm
}
for _, tmpl := range addon.CUETemplates {
isMainCueTemplate, err := checkCueFileHasPackageHeader(tmpl)
if err != nil {
return nil, err
}
if isMainCueTemplate {
continue
}
comp, err := renderCompAccordingCUETemplate(tmpl, addon, args)
if err != nil && strings.Contains(err.Error(), "var(path=output) not exist") {
continue
@@ -367,3 +375,14 @@ func isDeployToRuntime(addon *InstallPackage) bool {
}
return addon.DeployTo.RuntimeCluster || addon.DeployTo.LegacyRuntimeCluster
}
func checkCueFileHasPackageHeader(cueTemplate ElementFile) (bool, error) {
cueFile, err := parser.ParseFile(cueTemplate.Name, cueTemplate.Data, parser.ParseComments)
if err != nil {
return false, err
}
if cueFile.PackageName() == defaultCuePackageHeader {
return true, nil
}
return false, nil
}

View File

@@ -451,3 +451,99 @@ func TestRenderCueResourceError(t *testing.T) {
assert.NoError(t, err)
assert.Equal(t, len(comp), 2)
}
func TestCheckCueFileHasPackageHeader(t *testing.T) {
testCueTemplateWithPkg := `
package main
kustomizeController: {
// About this name, refer to #429 for details.
name: "fluxcd-kustomize-controller"
type: "webservice"
dependsOn: ["fluxcd-ns"]
properties: {
imagePullPolicy: "IfNotPresent"
image: _base + "fluxcd/kustomize-controller:v0.26.0"
env: [
{
name: "RUNTIME_NAMESPACE"
value: _targetNamespace
},
]
livenessProbe: {
httpGet: {
path: "/healthz"
port: 9440
}
timeoutSeconds: 5
}
readinessProbe: {
httpGet: {
path: "/readyz"
port: 9440
}
timeoutSeconds: 5
}
volumeMounts: {
emptyDir: [
{
name: "temp"
mountPath: "/tmp"
},
]
}
}
traits: [
{
type: "service-account"
properties: {
name: "sa-kustomize-controller"
create: true
privileges: _rules
}
},
{
type: "labels"
properties: {
"control-plane": "controller"
// This label is kept to avoid breaking existing
// KubeVela e2e tests (makefile e2e-setup).
"app": "kustomize-controller"
}
},
{
type: "command"
properties: {
args: controllerArgs
}
},
]
}
`
testCueTemplateWithoutPkg := `
output: {
type: "helm"
name: "nginx-ingress"
properties: {
repoType: "helm"
url: "https://kubernetes.github.io/ingress-nginx"
chart: "ingress-nginx"
version: "4.2.0"
values: {
controller: service: type: parameter["serviceType"]
}
}
}
`
cueTemplate := ElementFile{Name: "test-file.cue", Data: testCueTemplateWithPkg}
ok, err := checkCueFileHasPackageHeader(cueTemplate)
assert.NoError(t, err)
assert.Equal(t, true, ok)
cueTemplate = ElementFile{Name: "test-file-without-pkg.cue", Data: testCueTemplateWithoutPkg}
ok, err = checkCueFileHasPackageHeader(cueTemplate)
assert.NoError(t, err)
assert.Equal(t, false, ok)
}

View File

@@ -46,6 +46,9 @@ type Config struct {
// KubeQPS the QPS of kube client
KubeQPS float64
// PprofAddr the address for pprof to use while exporting profiling results.
PprofAddr string
}
type leaderConfig struct {

View File

@@ -1550,6 +1550,13 @@ func (c *applicationServiceImpl) DryRunAppOrRevision(ctx context.Context, appMod
}
case "REVISION":
app, _, err = c.getAppModelFromRevision(ctx, appModel.Name, dryRunReq.Version)
originalApp := &v1beta1.Application{}
if err := c.KubeClient.Get(ctx, types.NamespacedName{
Name: app.Name,
Namespace: app.Namespace,
}, originalApp); err == nil {
app.ResourceVersion = originalApp.ResourceVersion
}
if err != nil {
return nil, err
}

View File

@@ -18,11 +18,12 @@ package sync
import (
"context"
"encoding/json"
"sync"
"github.com/fatih/color"
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/client-go/dynamic"
dynamicInformer "k8s.io/client-go/dynamic/dynamicinformer"
"k8s.io/client-go/rest"
@@ -58,14 +59,17 @@ func (a *ApplicationSync) Start(ctx context.Context, errorChan chan error) {
factory := dynamicInformer.NewFilteredDynamicSharedInformerFactory(dynamicClient, 0, v1.NamespaceAll, nil)
informer := factory.ForResource(v1beta1.SchemeGroupVersion.WithResource("applications")).Informer()
getApp := func(obj interface{}) *v1beta1.Application {
app := &v1beta1.Application{}
bs, err := json.Marshal(obj)
if err != nil {
log.Logger.Errorf("decode the application failure %s", err.Error())
if app, ok := obj.(*v1beta1.Application); ok {
return app
}
_ = json.Unmarshal(bs, app)
return app
var app v1beta1.Application
if object, ok := obj.(*unstructured.Unstructured); ok {
if err := runtime.DefaultUnstructuredConverter.FromUnstructured(object.Object, &app); err != nil {
log.Logger.Errorf("decode the application failure %s", err.Error())
return &app
}
}
return &app
}
cu := &CR2UX{
ds: a.Store,
@@ -89,6 +93,7 @@ func (a *ApplicationSync) Start(ctx context.Context, errorChan chan error) {
if err := cu.AddOrUpdate(ctx, app.(*v1beta1.Application)); err != nil {
log.Logger.Errorf("fail to add or update application %s", err.Error())
}
a.Queue.Done(app)
}
}()

View File

@@ -82,8 +82,8 @@ func (c Condition) Validate() error {
if c.JSONKey == "" {
return fmt.Errorf("the json key of the condition can not be empty")
}
if c.Action != "enable" && c.Action != "disable" {
return fmt.Errorf("the action of the condition must be enable or disable")
if c.Action != "enable" && c.Action != "disable" && c.Action != "" {
return fmt.Errorf("the action of the condition only supports enable, disable or leave it empty")
}
if c.Op != "" && !StringsContain([]string{"==", "!=", "in"}, c.Op) {
return fmt.Errorf("the op of the condition must be `==` 、`!=` and `in`")

View File

@@ -257,7 +257,7 @@ func (r *Reconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Resu
}
case common.WorkflowStateSkipping:
logCtx.Info("Skip this reconcile")
return ctrl.Result{}, nil
return r.result(nil).requeue(wf.GetBackoffWaitTime()).ret()
}
var phase = common.ApplicationRunning
@@ -299,6 +299,11 @@ func (r *Reconciler) gcResourceTrackers(logCtx monitorContext.Context, handler *
}))
defer subCtx.Commit("finish gc resourceTrackers")
statusUpdater := r.updateStatus
if isPatch {
statusUpdater = r.patchStatus
}
var options []resourcekeeper.GCOption
if !gcOutdated {
options = append(options, resourcekeeper.DisableMarkStageGCOption{}, resourcekeeper.DisableGCComponentRevisionOption{}, resourcekeeper.DisableLegacyGCOption{})
@@ -306,8 +311,10 @@ func (r *Reconciler) gcResourceTrackers(logCtx monitorContext.Context, handler *
finished, waiting, err := handler.resourceKeeper.GarbageCollect(logCtx, options...)
if err != nil {
logCtx.Error(err, "Failed to gc resourcetrackers")
r.Recorder.Event(handler.app, event.Warning(velatypes.ReasonFailedGC, err))
return r.endWithNegativeCondition(logCtx, handler.app, condition.ReconcileError(err), phase)
cond := condition.Deleting()
cond.Message = fmt.Sprintf("error encountered during garbage collection: %s", err.Error())
handler.app.Status.SetConditions(cond)
return r.result(statusUpdater(logCtx, handler.app, phase)).ret()
}
if !finished {
logCtx.Info("GarbageCollecting resourcetrackers unfinished")
@@ -316,13 +323,13 @@ func (r *Reconciler) gcResourceTrackers(logCtx monitorContext.Context, handler *
cond.Message = fmt.Sprintf("Waiting for %s to delete. (At least %d resources are deleting.)", waiting[0].DisplayName(), len(waiting))
}
handler.app.Status.SetConditions(cond)
return r.result(r.patchStatus(logCtx, handler.app, phase)).requeue(baseGCBackoffWaitTime).ret()
return r.result(statusUpdater(logCtx, handler.app, phase)).requeue(baseGCBackoffWaitTime).ret()
}
logCtx.Info("GarbageCollected resourcetrackers")
if !isPatch {
return r.result(r.updateStatus(logCtx, handler.app, common.ApplicationRunningWorkflow)).ret()
phase = common.ApplicationRunningWorkflow
}
return r.result(r.patchStatus(logCtx, handler.app, phase)).ret()
return r.result(statusUpdater(logCtx, handler.app, phase)).ret()
}
type reconcileResult struct {

View File

@@ -23,6 +23,7 @@ import (
"sort"
"strings"
"github.com/hashicorp/go-version"
"github.com/pkg/errors"
appsv1 "k8s.io/api/apps/v1"
corev1 "k8s.io/api/core/v1"
@@ -233,9 +234,6 @@ func (h *AppHandler) gatherRevisionSpec(af *appfile.Appfile) (*v1beta1.Applicati
copiedApp := h.app.DeepCopy()
// We better to remove all object status in the appRevision
copiedApp.Status = common.AppStatus{}
if !metav1.HasAnnotation(h.app.ObjectMeta, oam.AnnotationPublishVersion) {
copiedApp.Spec.Workflow = nil
}
appRev := &v1beta1.ApplicationRevision{
Spec: v1beta1.ApplicationRevisionSpec{
Application: *copiedApp,
@@ -525,6 +523,28 @@ func deepEqualWorkflow(old, new v1alpha1.Workflow) bool {
return apiequality.Semantic.DeepEqual(old.Steps, new.Steps)
}
const velaVersionNumberToCompareWorkflow = "v1.5.7"
func deepEqualAppSpec(old, new *v1beta1.Application) bool {
oldSpec, newSpec := old.Spec.DeepCopy(), new.Spec.DeepCopy()
// legacy code: KubeVela version before v1.5.7 & v1.6.0-alpha.4 does not
// record workflow in application spec in application revision. The comparison
// need to bypass the equality check of workflow to prevent unintended rerun
var curVerNum, publishVersion string
if annotations := old.GetAnnotations(); annotations != nil {
curVerNum = annotations[oam.AnnotationKubeVelaVersion]
publishVersion = annotations[oam.AnnotationPublishVersion]
}
if publishVersion == "" && curVerNum != "" {
cmpVer, _ := version.NewVersion(velaVersionNumberToCompareWorkflow)
if curVer, err := version.NewVersion(curVerNum); err == nil && curVer.LessThan(cmpVer) {
oldSpec.Workflow = nil
newSpec.Workflow = nil
}
}
return apiequality.Semantic.DeepEqual(oldSpec, newSpec)
}
func deepEqualAppInRevision(old, new *v1beta1.ApplicationRevision) bool {
if len(old.Spec.Policies) != len(new.Spec.Policies) {
return false
@@ -542,8 +562,10 @@ func deepEqualAppInRevision(old, new *v1beta1.ApplicationRevision) bool {
return false
}
}
return apiequality.Semantic.DeepEqual(filterSkipAffectAppRevTrait(old.Spec.Application.Spec, old.Spec.TraitDefinitions),
filterSkipAffectAppRevTrait(new.Spec.Application.Spec, new.Spec.TraitDefinitions))
oldApp, newApp := old.Spec.Application.DeepCopy(), new.Spec.Application.DeepCopy()
oldApp.Spec = filterSkipAffectAppRevTrait(oldApp.Spec, old.Spec.TraitDefinitions)
newApp.Spec = filterSkipAffectAppRevTrait(newApp.Spec, new.Spec.TraitDefinitions)
return deepEqualAppSpec(oldApp, newApp)
}
// HandleComponentsRevision manages Component revisions

View File

@@ -22,10 +22,12 @@ import (
"fmt"
"reflect"
"strconv"
"testing"
"time"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"github.com/stretchr/testify/require"
"github.com/google/go-cmp/cmp"
appsv1 "k8s.io/api/apps/v1"
@@ -1171,3 +1173,19 @@ status: {}
}()).Should(BeTrue())
})
})
func TestDeepEqualAppInRevision(t *testing.T) {
oldRev := &v1beta1.ApplicationRevision{}
newRev := &v1beta1.ApplicationRevision{}
newRev.Spec.Application.Spec.Workflow = &v1beta1.Workflow{
Steps: []v1beta1.WorkflowStep{{
Type: "deploy",
Name: "deploy",
}},
}
require.False(t, deepEqualAppInRevision(oldRev, newRev))
metav1.SetMetaDataAnnotation(&oldRev.Spec.Application.ObjectMeta, oam.AnnotationKubeVelaVersion, "v1.6.0-alpha.5")
require.False(t, deepEqualAppInRevision(oldRev, newRev))
metav1.SetMetaDataAnnotation(&oldRev.Spec.Application.ObjectMeta, oam.AnnotationKubeVelaVersion, "v1.5.0")
require.True(t, deepEqualAppInRevision(oldRev, newRev))
}

View File

@@ -292,6 +292,7 @@ func NewTraitAbstractEngine(name string, pd *packages.PackageDiscover) AbstractE
}
// Complete do trait definition's rendering
// nolint:gocyclo
func (td *traitDef) Complete(ctx process.Context, abstractTemplate string, params interface{}) error {
bi := build.NewContext().NewInstance("", nil)
if err := bi.AddFile("-", abstractTemplate); err != nil {
@@ -360,6 +361,9 @@ func (td *traitDef) Complete(ctx process.Context, abstractTemplate string, param
if err != nil {
return errors.WithMessagef(err, "invalid patch of trait %s", td.name)
}
if base == nil {
return fmt.Errorf("patch trait %s into an invalid workload", td.name)
}
if err := base.Unify(p, sets.CreateUnifyOptionsForPatcher(patcher)...); err != nil {
return errors.WithMessagef(err, "invalid patch trait %s into workload", td.name)
}

View File

@@ -1266,6 +1266,38 @@ outputs: abc :{
}
}
func TestTraitCompleteErrorCases(t *testing.T) {
cases := map[string]struct {
ctx process.Context
traitName string
template string
params map[string]interface{}
err string
}{
"patch trait": {
ctx: process.NewContext(process.ContextData{}),
template: `
patch: {
// +patchKey=name
spec: template: spec: containers: [parameter]
}
parameter: {
name: string
image: string
command?: [...string]
}`,
err: "patch trait patch trait into an invalid workload",
},
}
for k, v := range cases {
td := NewTraitAbstractEngine(k, &packages.PackageDiscover{})
err := td.Complete(v.ctx, v.template, v.params)
assert.Error(t, err)
assert.Contains(t, err.Error(), v.err)
}
}
func TestCheckHealth(t *testing.T) {
cases := map[string]struct {
tpContext map[string]interface{}

View File

@@ -235,7 +235,7 @@ func (def *Definition) FromCUE(val *cue.Value, templateString string) error {
labels := map[string]string{}
for k, v := range def.GetLabels() {
if !strings.HasPrefix(k, UserPrefix) {
annotations[k] = v
labels[k] = v
}
}
spec, ok := def.Object["spec"].(map[string]interface{})

View File

@@ -165,10 +165,25 @@ func (h *gcHandler) monitor(stage string) func() {
}
}
func (h *gcHandler) regularizeResourceTracker(rts ...*v1beta1.ResourceTracker) {
for _, rt := range rts {
if rt == nil {
continue
}
for i, mr := range rt.Spec.ManagedResources {
if ok, err := utils.IsClusterScope(mr.GroupVersionKind(), h.Client.RESTMapper()); err == nil && ok {
rt.Spec.ManagedResources[i].Namespace = ""
}
}
}
}
func (h *gcHandler) Init() {
cb := h.monitor("init")
defer cb()
h.cache.registerResourceTrackers(append(h._historyRTs, h._currentRT, h._rootRT)...)
rts := append(h._historyRTs, h._currentRT, h._rootRT)
h.regularizeResourceTracker(rts...)
h.cache.registerResourceTrackers(rts...)
}
func (h *gcHandler) scan(ctx context.Context) (inactiveRTs []*v1beta1.ResourceTracker) {

View File

@@ -27,6 +27,7 @@ import (
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
corev1 "k8s.io/api/core/v1"
rbacv1 "k8s.io/api/rbac/v1"
apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1"
"k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
@@ -221,4 +222,46 @@ var _ = Describe("Test ResourceKeeper garbage collection", func() {
}, 5*time.Second).Should(Succeed())
})
It("Test gc same cluster-scoped resource but legacy resource recorded with namespace", func() {
ctx := context.Background()
cr := &unstructured.Unstructured{Object: map[string]interface{}{
"apiVersion": "rbac.authorization.k8s.io/v1",
"kind": "ClusterRole",
"metadata": map[string]interface{}{
"name": "test-cluster-scoped-resource",
"labels": map[string]interface{}{
oam.LabelAppName: "app",
oam.LabelAppNamespace: namespace,
},
},
}}
Expect(testClient.Create(ctx, cr)).Should(Succeed())
app := &v1beta1.Application{ObjectMeta: metav1.ObjectMeta{Name: "app", Namespace: namespace}}
keeper := &resourceKeeper{
Client: testClient,
app: app,
applicator: apply.NewAPIApplicator(testClient),
cache: newResourceCache(testClient, app),
}
h := gcHandler{resourceKeeper: keeper, cfg: newGCConfig()}
h._currentRT = &v1beta1.ResourceTracker{ObjectMeta: metav1.ObjectMeta{Name: "test-cluster-scoped-resource-v2"}}
Expect(testClient.Create(ctx, h._currentRT)).Should(Succeed())
h._historyRTs = []*v1beta1.ResourceTracker{{ObjectMeta: metav1.ObjectMeta{Name: "test-cluster-scoped-resource-v1"}}}
t := metav1.Now()
h._historyRTs[0].SetDeletionTimestamp(&t)
h._historyRTs[0].SetFinalizers([]string{resourcetracker.Finalizer})
h._currentRT.AddManagedResource(cr, true, false, "")
_cr := cr.DeepCopy()
_cr.SetNamespace(namespace)
h._historyRTs[0].AddManagedResource(_cr, true, false, "")
h.Init()
Expect(h.Finalize(ctx)).Should(Succeed())
Expect(testClient.Get(ctx, client.ObjectKeyFromObject(cr), &rbacv1.ClusterRole{})).Should(Succeed())
h._currentRT.Spec.ManagedResources[0].Name = "not-equal"
keeper.cache = newResourceCache(testClient, app)
h.Init()
Expect(h.Finalize(ctx)).Should(Succeed())
Expect(testClient.Get(ctx, client.ObjectKeyFromObject(cr), &rbacv1.ClusterRole{})).Should(Satisfy(errors.IsNotFound))
})
})

View File

@@ -17,18 +17,15 @@ limitations under the License.
package resourcekeeper
import (
"k8s.io/apimachinery/pkg/api/meta"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"github.com/oam-dev/kubevela/pkg/utils"
)
// ClearNamespaceForClusterScopedResources clear namespace for cluster scoped resources
func (h *resourceKeeper) ClearNamespaceForClusterScopedResources(manifests []*unstructured.Unstructured) {
for _, manifest := range manifests {
mappings, err := h.Client.RESTMapper().RESTMappings(manifest.GroupVersionKind().GroupKind(), manifest.GroupVersionKind().Version)
if err != nil {
continue
}
if len(mappings) > 0 && mappings[0].Scope.Name() == meta.RESTScopeNameRoot {
if ok, err := utils.IsClusterScope(manifest.GroupVersionKind(), h.Client.RESTMapper()); err == nil && ok {
manifest.SetNamespace("")
}
}

View File

@@ -149,6 +149,7 @@
appProtocol?: string
host?: string
port: int
portName?: string
path?: string
inner?: bool
}

View File

@@ -26,6 +26,7 @@ import (
"fmt"
"io"
"net/http"
neturl "net/url"
"os"
"os/exec"
"path/filepath"
@@ -78,9 +79,15 @@ import (
var (
// Scheme defines the default KubeVela schema
Scheme = k8sruntime.NewScheme()
// forbidRedirectFunc general check func for http redirect response
forbidRedirectFunc = func(req *http.Request, via []*http.Request) error {
return errors.New("got a redirect response which is forbidden")
}
//nolint:gosec
// insecureHTTPClient insecure http client
insecureHTTPClient = &http.Client{Transport: &http.Transport{TLSClientConfig: &tls.Config{InsecureSkipVerify: true}}}
insecureHTTPClient = &http.Client{Transport: &http.Transport{TLSClientConfig: &tls.Config{InsecureSkipVerify: true}}, CheckRedirect: forbidRedirectFunc}
// forbidRedirectClient is a http client forbid redirect http request
forbidRedirectClient = &http.Client{CheckRedirect: forbidRedirectFunc}
)
const (
@@ -161,11 +168,14 @@ func GetClient() (client.Client, error) {
// HTTPGetResponse use HTTP option and default client to send request and get raw response
func HTTPGetResponse(ctx context.Context, url string, opts *HTTPOption) (*http.Response, error) {
// Change NewRequest to NewRequestWithContext and pass context it
if _, err := neturl.ParseRequestURI(url); err != nil {
return nil, err
}
req, err := http.NewRequestWithContext(ctx, http.MethodGet, url, nil)
if err != nil {
return nil, err
}
httpClient := http.DefaultClient
httpClient := forbidRedirectClient
if opts != nil && len(opts.Username) != 0 && len(opts.Password) != 0 {
req.SetBasicAuth(opts.Username, opts.Password)
}
@@ -193,7 +203,7 @@ func HTTPGetResponse(ctx context.Context, url string, opts *HTTPOption) (*http.R
}
tr.TLSClientConfig = tlsConfig
defer tr.CloseIdleConnections()
httpClient = &http.Client{Transport: &tr}
httpClient = &http.Client{Transport: &tr, CheckRedirect: forbidRedirectFunc}
}
return httpClient.Do(req)
}

View File

@@ -26,6 +26,7 @@ import (
"os"
"os/exec"
"path/filepath"
"strings"
"testing"
"time"
@@ -223,6 +224,25 @@ func TestHttpGetCaFile(t *testing.T) {
}
}
func TestHttpGetForbidRedirect(t *testing.T) {
var ctx = context.Background()
testServer := &http.Server{Addr: ":19090"}
http.HandleFunc("/redirect", func(writer http.ResponseWriter, request *http.Request) {
http.Redirect(writer, request, "http://192.168.1.1", http.StatusFound)
})
go func() {
err := testServer.ListenAndServe()
assert.NoError(t, err)
}()
time.Sleep(time.Millisecond)
_, err := HTTPGetWithOption(ctx, "http://127.0.0.1:19090/redirect", nil)
assert.Error(t, err)
assert.True(t, strings.Contains(err.Error(), "got a redirect response which is forbidden"))
}
func TestGetCUEParameterValue(t *testing.T) {
type want struct {
err error

11
pkg/utils/env/env.go vendored
View File

@@ -73,6 +73,17 @@ func CreateEnv(envArgs *types.EnvMeta) error {
}
}
ctx := context.TODO()
var nsList v1.NamespaceList
err = c.List(ctx, &nsList, client.MatchingLabels{oam.LabelControlPlaneNamespaceUsage: oam.VelaNamespaceUsageEnv,
oam.LabelNamespaceOfEnvName: envArgs.Name})
if err != nil {
return err
}
if len(nsList.Items) > 0 {
return fmt.Errorf("env name %s already exists", envArgs.Name)
}
namespace, err := utils.GetNamespace(ctx, c, envArgs.Namespace)
if err != nil && !apierrors.IsNotFound(err) {
return err

View File

@@ -225,7 +225,7 @@ func (h *Helper) GetIndexInfo(repoURL string, skipCache bool, opts *common.HTTPO
}
i := &repo.IndexFile{}
if err := yaml.UnmarshalStrict(body, i); err != nil {
return nil, fmt.Errorf("parse index file from %s failure %w", repoURL, err)
return nil, fmt.Errorf("parse index file from %s failure", repoURL)
}
if h.cache != nil {

View File

@@ -26,7 +26,9 @@ import (
authv1 "k8s.io/api/authentication/v1"
corev1 "k8s.io/api/core/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/api/meta"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime/schema"
k8stypes "k8s.io/apimachinery/pkg/types"
"k8s.io/client-go/rest"
"sigs.k8s.io/controller-runtime/pkg/client"
@@ -189,3 +191,10 @@ func CreateOrUpdate(ctx context.Context, cli client.Client, obj client.Object) (
func EscapeResourceNameToLabelValue(resourceName string) string {
return strings.ReplaceAll(resourceName, ":", "_")
}
// IsClusterScope check if the gvk is cluster scoped
func IsClusterScope(gvk schema.GroupVersionKind, mapper meta.RESTMapper) (bool, error) {
mappings, err := mapper.RESTMappings(gvk.GroupKind(), gvk.Version)
isClusterScope := len(mappings) > 0 && mappings[0].Scope.Name() == meta.RESTScopeNameRoot
return isClusterScope, err
}

View File

@@ -22,6 +22,7 @@ import (
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
v1 "k8s.io/api/core/v1"
rbacv1 "k8s.io/api/rbac/v1"
apierror "k8s.io/apimachinery/pkg/api/errors"
"sigs.k8s.io/controller-runtime/pkg/client"
@@ -162,4 +163,13 @@ var _ = Describe("Test Create Or Update Namespace functions", func() {
Expect(gotNS.Labels).Should(HaveKeyWithValue(k, v))
}
})
It("Test IsClusterScope", func() {
ok, err := IsClusterScope(v1.SchemeGroupVersion.WithKind("ConfigMap"), k8sClient.RESTMapper())
Expect(err).Should(Succeed())
Expect(ok).Should(BeFalse())
ok, err = IsClusterScope(rbacv1.SchemeGroupVersion.WithKind("ClusterRole"), k8sClient.RESTMapper())
Expect(err).Should(Succeed())
Expect(ok).Should(BeTrue())
})
})

51
pkg/utils/pprof.go Normal file
View File

@@ -0,0 +1,51 @@
/*
Copyright 2022 The KubeVela Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package utils
import (
"net/http"
"net/http/pprof"
"k8s.io/klog/v2"
)
// EnablePprof listen to the pprofAddr and export the profiling results
// If the errChan is nil, this function will panic when the listening error occurred.
func EnablePprof(pprofAddr string, errChan chan error) {
// Start pprof server if enabled
mux := http.NewServeMux()
mux.HandleFunc("/debug/pprof/", pprof.Index)
mux.HandleFunc("/debug/pprof/cmdline", pprof.Cmdline)
mux.HandleFunc("/debug/pprof/profile", pprof.Profile)
mux.HandleFunc("/debug/pprof/symbol", pprof.Symbol)
mux.HandleFunc("/debug/pprof/trace", pprof.Trace)
pprofServer := http.Server{
Addr: pprofAddr,
Handler: mux,
}
klog.InfoS("Starting debug HTTP server", "addr", pprofServer.Addr)
if err := pprofServer.ListenAndServe(); err != nil {
klog.Error(err, "Failed to start debug HTTP server")
if errChan != nil {
errChan <- err
} else {
panic(err)
}
}
}

View File

@@ -43,10 +43,10 @@ import (
"github.com/oam-dev/kubevela/pkg/workflow/types"
)
// GeneratorServiceEndpoints generator service endpoints is available for common component type,
// CollectServiceEndpoints generator service endpoints is available for common component type,
// such as webservice or helm
// it can not support the cloud service component currently
func (h *provider) GeneratorServiceEndpoints(wfctx wfContext.Context, v *value.Value, act types.Action) error {
func (h *provider) CollectServiceEndpoints(wfctx wfContext.Context, v *value.Value, act types.Action) error {
ctx := context.Background()
val, err := v.LookupValue("app")
@@ -189,13 +189,14 @@ func generatorFromService(service corev1.Service, selectorNodeIP func() string,
ResourceVersion: service.ResourceVersion,
}
formatEndpoint := func(host, appProtocol string, portProtocol corev1.Protocol, portNum int32, inner bool) querytypes.ServiceEndpoint {
formatEndpoint := func(host, appProtocol string, portName string, portProtocol corev1.Protocol, portNum int32, inner bool) querytypes.ServiceEndpoint {
return querytypes.ServiceEndpoint{
Endpoint: querytypes.Endpoint{
Protocol: portProtocol,
AppProtocol: &appProtocol,
Host: host,
Port: int(portNum),
PortName: portName,
Path: path,
Inner: inner,
},
@@ -210,22 +211,22 @@ func generatorFromService(service corev1.Service, selectorNodeIP func() string,
appp := judgeAppProtocol(port.Port)
for _, ingress := range service.Status.LoadBalancer.Ingress {
if ingress.Hostname != "" {
serviceEndpoints = append(serviceEndpoints, formatEndpoint(ingress.Hostname, appp, port.Protocol, port.NodePort, false))
serviceEndpoints = append(serviceEndpoints, formatEndpoint(ingress.Hostname, appp, port.Name, port.Protocol, port.Port, false))
}
if ingress.IP != "" {
serviceEndpoints = append(serviceEndpoints, formatEndpoint(ingress.IP, appp, port.Protocol, port.NodePort, false))
serviceEndpoints = append(serviceEndpoints, formatEndpoint(ingress.IP, appp, port.Name, port.Protocol, port.Port, false))
}
}
}
case corev1.ServiceTypeNodePort:
for _, port := range service.Spec.Ports {
appp := judgeAppProtocol(port.Port)
serviceEndpoints = append(serviceEndpoints, formatEndpoint(selectorNodeIP(), appp, port.Protocol, port.NodePort, false))
serviceEndpoints = append(serviceEndpoints, formatEndpoint(selectorNodeIP(), appp, port.Name, port.Protocol, port.NodePort, false))
}
case corev1.ServiceTypeClusterIP, corev1.ServiceTypeExternalName:
for _, port := range service.Spec.Ports {
appp := judgeAppProtocol(port.Port)
serviceEndpoints = append(serviceEndpoints, formatEndpoint(fmt.Sprintf("%s.%s", service.Name, service.Namespace), appp, port.Protocol, port.Port, true))
serviceEndpoints = append(serviceEndpoints, formatEndpoint(fmt.Sprintf("%s.%s", service.Name, service.Namespace), appp, port.Name, port.Protocol, port.Port, true))
}
}
return serviceEndpoints

View File

@@ -165,7 +165,7 @@ var _ = Describe("Test Query Provider", func() {
{
"name": "seldon-ambassador-2",
"ports": []corev1.ServicePort{
{Port: 80, TargetPort: intstr.FromInt(80), Name: "80port", NodePort: 30010},
{Port: 80, TargetPort: intstr.FromInt(80), Name: "80port"},
},
"type": corev1.ServiceTypeLoadBalancer,
"status": corev1.ServiceStatus{
@@ -244,15 +244,15 @@ var _ = Describe("Test Query Provider", func() {
pr := &provider{
cli: k8sClient,
}
err = pr.GeneratorServiceEndpoints(nil, v, nil)
err = pr.CollectServiceEndpoints(nil, v, nil)
Expect(err).Should(BeNil())
urls := []string{
"http://1.1.1.1:30010/seldon/default/sdep2",
"http://1.1.1.1/seldon/default/sdep2",
"http://clusterip-2.default",
"clusterip-2.default:81",
"http://2.2.2.2:30020",
"http://1.1.1.1:30010",
"http://2.2.2.2:8080",
"http://1.1.1.1",
}
endValue, err := v.Field("list")
Expect(err).Should(BeNil())

View File

@@ -305,6 +305,6 @@ func Install(p providers.Providers, cli client.Client, cfg *rest.Config) {
"collectResources": prd.CollectResources,
"searchEvents": prd.SearchEvents,
"collectLogsInPod": prd.CollectLogsInPod,
"collectServiceEndpoints": prd.GeneratorServiceEndpoints,
"collectServiceEndpoints": prd.CollectServiceEndpoints,
})
}

View File

@@ -928,7 +928,7 @@ options: {
pr := &provider{
cli: k8sClient,
}
err = pr.GeneratorServiceEndpoints(nil, v, nil)
err = pr.CollectServiceEndpoints(nil, v, nil)
Expect(err).Should(BeNil())
gatewayIP := selectorNodeIP(ctx, "", k8sClient)
urls := []string{
@@ -937,13 +937,13 @@ options: {
"https://ingress.domain.path/test",
"https://ingress.domain.path/test2",
fmt.Sprintf("http://%s:30229", gatewayIP),
"http://10.10.10.10:30080",
"http://text.example.com:30080",
"10.10.10.10:30081",
"text.example.com:30081",
"http://10.10.10.10",
"http://text.example.com",
"10.10.10.10:81",
"text.example.com:81",
fmt.Sprintf("http://%s:30002", gatewayIP),
"http://ingress.domain.helm",
"http://1.1.1.1:30011/seldon/default/sdep",
"http://1.1.1.1/seldon/default/sdep",
"http://gateway.domain",
"http://gateway.domain/api",
"https://demo.kubevela.net",

View File

@@ -879,8 +879,8 @@ func mergeCustomRules(ctx context.Context, k8sClient client.Client) error {
format string
err error
)
if item.Annotations != nil {
format = item.Annotations[oam.LabelResourceRuleFormat]
if item.Labels != nil {
format = item.Labels[oam.LabelResourceRuleFormat]
}
switch format {
case oam.ResourceTopologyFormatJSON:

View File

@@ -1448,6 +1448,26 @@ var _ = Describe("test merge globalRules", func() {
defaultLabelSelector: true
- apiVersion: apps/v1
kind: ControllerRevision
`
clickhouseJsonStr := `
[
{
"parentResourceType": {
"group": "clickhouse.altinity.com",
"kind": "ClickHouseInstallation"
},
"childrenResourceType": [
{
"apiVersion": "apps/v1",
"kind": "StatefulSet"
},
{
"apiVersion": "v1",
"kind": "Service"
}
]
}
]
`
daemonSetStr := `
- parentResourceType:
@@ -1481,20 +1501,29 @@ childrenResourceType:
It("test merge rules", func() {
Expect(k8sClient.Create(ctx, &v1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: "vela-system"}})).Should(SatisfyAny(BeNil(), util.AlreadyExistMatcher{}))
cloneSetConfigMap := v1.ConfigMap{TypeMeta: metav1.TypeMeta{APIVersion: "v1", Kind: "ConfigMap"},
ObjectMeta: metav1.ObjectMeta{Namespace: types3.DefaultKubeVelaNS, Name: "cloneset", Labels: map[string]string{oam.LabelResourceRules: "true"}},
Data: map[string]string{relationshipKey: cloneSetStr},
ObjectMeta: metav1.ObjectMeta{Namespace: types3.DefaultKubeVelaNS, Name: "cloneset", Labels: map[string]string{
oam.LabelResourceRules: "true",
oam.LabelResourceRuleFormat: oam.ResourceTopologyFormatYAML,
}},
Data: map[string]string{relationshipKey: cloneSetStr},
}
Expect(k8sClient.Create(ctx, &cloneSetConfigMap)).Should(BeNil())
daemonSetConfigMap := v1.ConfigMap{TypeMeta: metav1.TypeMeta{APIVersion: "v1", Kind: "ConfigMap"},
ObjectMeta: metav1.ObjectMeta{Namespace: types3.DefaultKubeVelaNS, Name: "daemonset", Labels: map[string]string{oam.LabelResourceRules: "true"}},
Data: map[string]string{relationshipKey: daemonSetStr},
ObjectMeta: metav1.ObjectMeta{Namespace: types3.DefaultKubeVelaNS, Name: "daemonset", Labels: map[string]string{
oam.LabelResourceRules: "true",
oam.LabelResourceRuleFormat: oam.ResourceTopologyFormatYAML,
}},
Data: map[string]string{relationshipKey: daemonSetStr},
}
Expect(k8sClient.Create(ctx, &daemonSetConfigMap)).Should(BeNil())
stsConfigMap := v1.ConfigMap{TypeMeta: metav1.TypeMeta{APIVersion: "v1", Kind: "ConfigMap"},
ObjectMeta: metav1.ObjectMeta{Namespace: types3.DefaultKubeVelaNS, Name: "sts", Labels: map[string]string{oam.LabelResourceRules: "true"}},
Data: map[string]string{relationshipKey: stsStr},
ObjectMeta: metav1.ObjectMeta{Namespace: types3.DefaultKubeVelaNS, Name: "sts", Labels: map[string]string{
oam.LabelResourceRules: "true",
oam.LabelResourceRuleFormat: oam.ResourceTopologyFormatYAML,
}},
Data: map[string]string{relationshipKey: stsStr},
}
Expect(k8sClient.Create(ctx, &stsConfigMap)).Should(BeNil())
@@ -1504,6 +1533,15 @@ childrenResourceType:
}
Expect(k8sClient.Create(ctx, &missConfigedCm)).Should(BeNil())
clickhouseJsonCm := v1.ConfigMap{TypeMeta: metav1.TypeMeta{APIVersion: "v1", Kind: "ConfigMap"},
ObjectMeta: metav1.ObjectMeta{Namespace: types3.DefaultKubeVelaNS, Name: "clickhouse", Labels: map[string]string{
oam.LabelResourceRules: "true",
oam.LabelResourceRuleFormat: oam.ResourceTopologyFormatJSON,
}},
Data: map[string]string{relationshipKey: clickhouseJsonStr},
}
Expect(k8sClient.Create(ctx, &clickhouseJsonCm)).Should(BeNil())
Expect(mergeCustomRules(ctx, k8sClient)).Should(BeNil())
childrenResources, ok := globalRule.GetRule(GroupResourceType{Group: "apps.kruise.io", Kind: "CloneSet"})
Expect(ok).Should(BeTrue())
@@ -1539,6 +1577,19 @@ childrenResourceType:
Expect(revisionCR).ShouldNot(BeNil())
Expect(revisionCR.listOptions).Should(BeNil())
chChildrenResources, ok := globalRule.GetRule(GroupResourceType{Group: "clickhouse.altinity.com", Kind: "ClickHouseInstallation"})
Expect(ok).Should(BeTrue())
Expect(chChildrenResources.DefaultGenListOptionFunc).Should(BeNil())
Expect(len(*chChildrenResources.SubResources)).Should(BeEquivalentTo(2))
chSts := chChildrenResources.SubResources.Get(ResourceType{APIVersion: "apps/v1", Kind: "StatefulSet"})
Expect(chSts).ShouldNot(BeNil())
Expect(chSts.listOptions).Should(BeNil())
chSvc := chChildrenResources.SubResources.Get(ResourceType{APIVersion: "v1", Kind: "Service"})
Expect(chSvc).ShouldNot(BeNil())
Expect(chSvc.listOptions).Should(BeNil())
// clear data
Expect(k8sClient.Delete(context.TODO(), &missConfigedCm)).Should(BeNil())
Expect(k8sClient.Delete(context.TODO(), &stsConfigMap)).Should(BeNil())

View File

@@ -92,6 +92,10 @@ type Endpoint struct {
// Default is 80.
Port int `json:"port"`
// +optional
// the name of the port
PortName string `json:"portName,omitempty"`
// the path for the endpoint
Path string `json:"path,omitempty"`

View File

@@ -149,13 +149,6 @@ func (w *workflow) ExecuteSteps(ctx monitorContext.Context, appRev *oamcore.Appl
return common.WorkflowStateSucceeded, nil
}
if cacheValue, ok := StepStatusCache.Load(cacheKey); ok {
// handle cache resource
if len(wfStatus.Steps) < cacheValue.(int) {
return common.WorkflowStateSkipping, nil
}
}
wfCtx, err := w.makeContext(w.app.Name)
if err != nil {
ctx.Error(err, "make context")
@@ -164,6 +157,13 @@ func (w *workflow) ExecuteSteps(ctx monitorContext.Context, appRev *oamcore.Appl
}
w.wfCtx = wfCtx
if cacheValue, ok := StepStatusCache.Load(cacheKey); ok {
// handle cache resource
if len(wfStatus.Steps) < cacheValue.(int) {
return common.WorkflowStateSkipping, nil
}
}
e := newEngine(ctx, wfCtx, w, wfStatus)
err = e.Run(taskRunners, w.dagMode)

View File

@@ -31,19 +31,20 @@ import (
)
const (
addonRegistryType = "type"
addonEndpoint = "endpoint"
addonOssBucket = "bucket"
addonPath = "path"
addonGitToken = "gitToken"
addonOssType = "OSS"
addonGitType = "git"
addonGiteeType = "gitee"
addonGitlabType = "gitlab"
addonHelmType = "helm"
addonUsername = "username"
addonPassword = "password"
addonRepoName = "repoName"
addonRegistryType = "type"
addonEndpoint = "endpoint"
addonOssBucket = "bucket"
addonPath = "path"
addonGitToken = "gitToken"
addonOssType = "OSS"
addonGitType = "git"
addonGiteeType = "gitee"
addonGitlabType = "gitlab"
addonHelmType = "helm"
addonUsername = "username"
addonPassword = "password"
// only gitlab registry need set this flag
addonRepoName = "gitlabRepoName"
addonHelmInsecureSkipTLS = "insecureSkipTLS"
)
@@ -67,10 +68,12 @@ func NewAddonRegistryCommand(c common.Args, ioStreams cmdutil.IOStreams) *cobra.
// NewAddAddonRegistryCommand return an addon registry create command
func NewAddAddonRegistryCommand(c common.Args, ioStreams cmdutil.IOStreams) *cobra.Command {
cmd := &cobra.Command{
Use: "add",
Short: "Add an addon registry.",
Long: "Add an addon registry.",
Example: `"vela addon registry add <my-registry-name> --type OSS --endpoint=<URL> --bucket=<bukect-name> or vela addon registry add my-repo --type git --endpoint=<URL> --path=<OSS-ptah> --gitToken=<git token>"`,
Use: "add",
Short: "Add an addon registry.",
Long: "Add an addon registry.",
Example: `add a helm repo registry: vela addon registry add --type=helm my-repo --endpoint=<URL>
add a github registry: vela addon registry add my-repo --type git --endpoint=<URL> --path=<ptah> --token=<git token>"
add a gitlab registry: vela addon registry add my-repo --type gitlab --endpoint=<URL> --gitlabRepoName=<repoName> --path=<path> --token=<git token>`,
RunE: func(cmd *cobra.Command, args []string) error {
registry, err := getRegistryFromArgs(cmd, args)
if err != nil {
@@ -298,6 +301,7 @@ func parseArgsFromFlag(cmd *cobra.Command) {
cmd.Flags().StringP(addonGitToken, "", "", "specify the github repo token")
cmd.Flags().StringP(addonUsername, "", "", "specify the Helm addon registry username")
cmd.Flags().StringP(addonPassword, "", "", "specify the Helm addon registry password")
cmd.Flags().StringP(addonRepoName, "", "", "specify the gitlab addon registry repoName")
cmd.Flags().BoolP(addonHelmInsecureSkipTLS, "", false,
"specify the Helm addon registry skip tls verify")
}

View File

@@ -1071,12 +1071,12 @@ func hasAddon(addons []*pkgaddon.UIData, name string) bool {
return false
}
func transClusters(cstr string) []string {
func transClusters(cstr string) []interface{} {
if len(cstr) == 0 {
return nil
}
cstr = strings.TrimPrefix(strings.TrimSuffix(cstr, "}"), "{")
var clusterL []string
var clusterL []interface{}
clusterList := strings.Split(cstr, ",")
for _, v := range clusterList {
clusterL = append(clusterL, strings.TrimSpace(v))

View File

@@ -169,7 +169,7 @@ var _ = Describe("Addon status or info", func() {
Expect(ds.DeleteRegistry(context.Background(), "KubeVela")).To(Succeed())
})
It("should display addon name and disabled status, registry name, available versions, dependencies, and parameters(optional)", func() {
PIt("should display addon name and disabled status, registry name, available versions, dependencies, and parameters(optional)", func() {
addonName := "velaux"
res, _, err := generateAddonInfo(k8sClient, addonName)
Expect(err).Should(BeNil())

View File

@@ -192,19 +192,19 @@ func TestAddonUpgradeCmdWithErrLocalPath(t *testing.T) {
func TestTransCluster(t *testing.T) {
testcase := []struct {
str string
res []string
res []interface{}
}{
{
str: "{cluster1, cluster2}",
res: []string{"cluster1", "cluster2"},
res: []interface{}{"cluster1", "cluster2"},
},
{
str: "{cluster1,cluster2}",
res: []string{"cluster1", "cluster2"},
res: []interface{}{"cluster1", "cluster2"},
},
{
str: "{cluster1, cluster2 }",
res: []string{"cluster1", "cluster2"},
res: []interface{}{"cluster1", "cluster2"},
},
}
for _, s := range testcase {

View File

@@ -436,10 +436,10 @@ var _ = Describe("Test velaQL", func() {
"https://ingress.domain.path/test",
"https://ingress.domain.path/test2",
fmt.Sprintf("http://%s:30229", gatewayIP),
"http://10.10.10.10:30180",
"http://text.example.com:30180",
"10.10.10.10:30181",
"text.example.com:30181",
"http://10.10.10.10",
"http://text.example.com",
"10.10.10.10:81",
"text.example.com:81",
// helmRelease
fmt.Sprintf("http://%s:30002", gatewayIP),
"http://ingress.domain.helm",

View File

@@ -230,14 +230,17 @@ func prepareToForceDeleteTerraformComponents(ctx context.Context, k8sClient clie
for _, c := range app.Spec.Components {
var def corev1beta1.ComponentDefinition
if err := k8sClient.Get(ctx, client.ObjectKey{Name: c.Type, Namespace: types.DefaultKubeVelaNS}, &def); err != nil {
return err
if !apierrors.IsNotFound(err) {
return err
}
if err := k8sClient.Get(ctx, client.ObjectKey{Name: c.Type, Namespace: namespace}, &def); err != nil {
return err
}
}
if def.Spec.Schematic != nil && def.Spec.Schematic.Terraform != nil {
var conf terraformapi.Configuration
if err := k8sClient.Get(ctx, client.ObjectKey{Name: c.Name, Namespace: namespace}, &conf); err != nil {
if !apierrors.IsNotFound(err) {
return err
}
return err
}
conf.Spec.ForceDelete = &forceDelete
if err := k8sClient.Update(ctx, &conf); err != nil {

View File

@@ -19,7 +19,7 @@ import (
"context"
"testing"
terraformapi "github.com/oam-dev/terraform-controller/api/v1beta1"
terraformapi "github.com/oam-dev/terraform-controller/api/v1beta2"
"github.com/stretchr/testify/assert"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
@@ -55,7 +55,7 @@ func TestPrepareToForceDeleteTerraformComponents(t *testing.T) {
def1 := &v1beta1.ComponentDefinition{
TypeMeta: metav1.TypeMeta{
Kind: "ComponentDefinition",
APIVersion: "core.oam.dev/v1beta1",
APIVersion: "core.oam.dev/v1beta2",
},
ObjectMeta: metav1.ObjectMeta{
Name: "d1",
@@ -75,6 +75,16 @@ func TestPrepareToForceDeleteTerraformComponents(t *testing.T) {
Namespace: "default",
},
}
userNamespace := "another-namespace"
def2 := def1.DeepCopy()
def2.SetNamespace(userNamespace)
app2 := app1.DeepCopy()
app2.SetNamespace(userNamespace)
app2.SetName("app2")
conf2 := conf1.DeepCopy()
conf2.SetNamespace(userNamespace)
k8sClient1 := fake.NewClientBuilder().WithScheme(s).WithObjects(app1, def1, conf1).Build()
k8sClient2 := fake.NewClientBuilder().Build()
@@ -83,6 +93,7 @@ func TestPrepareToForceDeleteTerraformComponents(t *testing.T) {
k8sClient4 := fake.NewClientBuilder().WithScheme(s).WithObjects(app1, def1).Build()
k8sClient5 := fake.NewClientBuilder().WithScheme(s).WithObjects(app2, def2, conf2).Build()
type args struct {
k8sClient client.Client
namespace string
@@ -141,16 +152,27 @@ func TestPrepareToForceDeleteTerraformComponents(t *testing.T) {
"app1",
},
want: want{
errMsg: "no kind is registered for the type",
errMsg: "configurations.terraform.core.oam.dev \"c1\" not found",
},
},
"can read definition from application namespace": {
args: args{
k8sClient5,
userNamespace,
"app2",
},
want: want{},
},
}
for name, tc := range testcases {
t.Run(name, func(t *testing.T) {
err := prepareToForceDeleteTerraformComponents(ctx, tc.args.k8sClient, tc.args.namespace, tc.args.name)
if err != nil || tc.want.errMsg != "" {
if err != nil {
assert.NotEmpty(t, tc.want.errMsg)
assert.Contains(t, err.Error(), tc.want.errMsg)
} else {
assert.Empty(t, tc.want.errMsg)
}
})
}

View File

@@ -20,7 +20,6 @@ import (
"context"
"encoding/json"
"fmt"
"io/ioutil"
"os"
"strings"
"time"
@@ -42,6 +41,7 @@ import (
"github.com/oam-dev/kubevela/apis/core.oam.dev/common"
"github.com/oam-dev/kubevela/apis/core.oam.dev/v1alpha1"
"github.com/oam-dev/kubevela/apis/core.oam.dev/v1beta1"
kubevelatypes "github.com/oam-dev/kubevela/apis/types"
"github.com/oam-dev/kubevela/pkg/multicluster"
"github.com/oam-dev/kubevela/pkg/oam"
"github.com/oam-dev/kubevela/pkg/utils"
@@ -124,7 +124,7 @@ var _ = Describe("Test multicluster scenario", func() {
_, err := execCommand("cluster", "join", "/tmp/worker.kubeconfig", "--name", testClusterName)
Expect(err).Should(Succeed())
app := &v1beta1.Application{}
bs, err := ioutil.ReadFile("./testdata/app/example-lite-envbinding-app.yaml")
bs, err := os.ReadFile("./testdata/app/example-lite-envbinding-app.yaml")
Expect(err).Should(Succeed())
appYaml := strings.ReplaceAll(string(bs), "TEST_CLUSTER_NAME", testClusterName)
Expect(yaml.Unmarshal([]byte(appYaml), app)).Should(Succeed())
@@ -230,7 +230,7 @@ var _ = Describe("Test multicluster scenario", func() {
// 4. Component selector.
By("apply application")
app := &v1beta1.Application{}
bs, err := ioutil.ReadFile("./testdata/app/example-envbinding-app.yaml")
bs, err := os.ReadFile("./testdata/app/example-envbinding-app.yaml")
Expect(err).Should(Succeed())
appYaml := strings.ReplaceAll(strings.ReplaceAll(string(bs), "TEST_NAMESPACE", testNamespace), "PROD_NAMESPACE", prodNamespace)
Expect(yaml.Unmarshal([]byte(appYaml), app)).Should(Succeed())
@@ -277,7 +277,7 @@ var _ = Describe("Test multicluster scenario", func() {
// 5. add env
By("apply application")
app := &v1beta1.Application{}
bs, err := ioutil.ReadFile("./testdata/app/example-envbinding-app-wo-workflow.yaml")
bs, err := os.ReadFile("./testdata/app/example-envbinding-app-wo-workflow.yaml")
Expect(err).Should(Succeed())
appYaml := strings.ReplaceAll(string(bs), "TEST_NAMESPACE", testNamespace)
Expect(yaml.Unmarshal([]byte(appYaml), app)).Should(Succeed())
@@ -364,7 +364,7 @@ var _ = Describe("Test multicluster scenario", func() {
It("Test helm addon relied feature", func() {
By("apply application")
app := &v1beta1.Application{}
bs, err := ioutil.ReadFile("./testdata/app/app-apply-in-order.yaml")
bs, err := os.ReadFile("./testdata/app/app-apply-in-order.yaml")
Expect(err).Should(Succeed())
Expect(yaml.Unmarshal([]byte(bs), app)).Should(Succeed())
app.SetNamespace(testNamespace)
@@ -568,7 +568,7 @@ var _ = Describe("Test multicluster scenario", func() {
})
It("Test applications with bad resource", func() {
bs, err := ioutil.ReadFile("./testdata/app/app-bad-resource.yaml")
bs, err := os.ReadFile("./testdata/app/app-bad-resource.yaml")
Expect(err).Should(Succeed())
appYaml := strings.ReplaceAll(string(bs), "TEST_NAMESPACE", testNamespace)
app := &v1beta1.Application{}
@@ -588,7 +588,7 @@ var _ = Describe("Test multicluster scenario", func() {
})
It("Test applications with env and storage trait", func() {
bs, err := ioutil.ReadFile("./testdata/app/app-with-env-and-storage.yaml")
bs, err := os.ReadFile("./testdata/app/app-with-env-and-storage.yaml")
Expect(err).Should(Succeed())
appYaml := strings.ReplaceAll(string(bs), "TEST_NAMESPACE", testNamespace)
app := &v1beta1.Application{}
@@ -601,7 +601,7 @@ var _ = Describe("Test multicluster scenario", func() {
})
It("Test applications with gc policy change (onAppUpdate -> never)", func() {
bs, err := ioutil.ReadFile("./testdata/app/app-gc-policy-change.yaml")
bs, err := os.ReadFile("./testdata/app/app-gc-policy-change.yaml")
Expect(err).Should(Succeed())
appYaml := strings.ReplaceAll(string(bs), "TEST_NAMESPACE", testNamespace)
app := &v1beta1.Application{}
@@ -657,7 +657,7 @@ var _ = Describe("Test multicluster scenario", func() {
})
It("Test Application with env in webservice and labels & storage trait", func() {
bs, err := ioutil.ReadFile("./testdata/app/app-with-env-labels-storage.yaml")
bs, err := os.ReadFile("./testdata/app/app-with-env-labels-storage.yaml")
Expect(err).Should(Succeed())
app := &v1beta1.Application{}
Expect(yaml.Unmarshal(bs, app)).Should(Succeed())
@@ -673,5 +673,119 @@ var _ = Describe("Test multicluster scenario", func() {
Expect(deploy.Spec.Template.Spec.Containers[0].Env[0].Value).Should(Equal("testValue"))
Expect(len(deploy.Spec.Template.Spec.Volumes)).Should(Equal(1))
})
It("Test application with workflow change will rerun", func() {
By("create application")
bs, err := os.ReadFile("./testdata/app/app-lite-with-workflow.yaml")
Expect(err).Should(Succeed())
app := &v1beta1.Application{}
Expect(yaml.Unmarshal(bs, app)).Should(Succeed())
app.SetNamespace(testNamespace)
Expect(k8sClient.Create(hubCtx, app)).Should(Succeed())
Eventually(func(g Gomega) {
g.Expect(k8sClient.Get(hubCtx, client.ObjectKeyFromObject(app), app)).Should(Succeed())
g.Expect(app.Status.Phase).Should(Equal(common.ApplicationRunning))
}, 20*time.Second).Should(Succeed())
Expect(k8sClient.Get(hubCtx, client.ObjectKey{Namespace: testNamespace, Name: "data-worker"}, &appsv1.Deployment{})).Should(Succeed())
Eventually(func(g Gomega) {
g.Expect(k8sClient.Get(hubCtx, client.ObjectKeyFromObject(app), app)).Should(Succeed())
app.Spec.Workflow.Steps[0].Properties = &runtime.RawExtension{Raw: []byte(`{"policies":["worker"]}`)}
g.Expect(k8sClient.Update(hubCtx, app)).Should(Succeed())
}, 10*time.Second).Should(Succeed())
Eventually(func(g Gomega) {
g.Expect(k8sClient.Get(hubCtx, client.ObjectKey{Namespace: testNamespace, Name: "data-worker"}, &appsv1.Deployment{})).Should(Satisfy(kerrors.IsNotFound))
g.Expect(k8sClient.Get(workerCtx, client.ObjectKey{Namespace: testNamespace, Name: "data-worker"}, &appsv1.Deployment{})).Should(Succeed())
}, 20*time.Second).Should(Succeed())
})
It("Test application with failed gc and restart workflow", func() {
By("duplicate cluster")
secret := &corev1.Secret{}
const secretName = "disconnection-test"
Expect(k8sClient.Get(hubCtx, types.NamespacedName{Namespace: kubevelatypes.DefaultKubeVelaNS, Name: WorkerClusterName}, secret)).Should(Succeed())
secret.SetName(secretName)
secret.SetResourceVersion("")
Expect(k8sClient.Create(hubCtx, secret)).Should(Succeed())
defer func() {
_ = k8sClient.Delete(hubCtx, secret)
}()
By("create cluster normally")
bs, err := os.ReadFile("./testdata/app/app-disconnection-test.yaml")
Expect(err).Should(Succeed())
app := &v1beta1.Application{}
Expect(yaml.Unmarshal(bs, app)).Should(Succeed())
app.SetNamespace(namespace)
Expect(k8sClient.Create(hubCtx, app)).Should(Succeed())
key := client.ObjectKeyFromObject(app)
Eventually(func(g Gomega) {
g.Expect(k8sClient.Get(hubCtx, key, app)).Should(Succeed())
g.Expect(app.Status.Phase).Should(Equal(common.ApplicationRunning))
}).WithTimeout(30 * time.Second).WithPolling(2 * time.Second).Should(Succeed())
By("disconnect cluster")
Expect(k8sClient.Get(hubCtx, types.NamespacedName{Namespace: kubevelatypes.DefaultKubeVelaNS, Name: secretName}, secret)).Should(Succeed())
secret.Data["tls.crt"] = []byte("-")
Expect(k8sClient.Update(hubCtx, secret)).Should(Succeed())
By("update application")
Expect(k8sClient.Get(hubCtx, key, app)).Should(Succeed())
app.Spec.Policies = nil
Expect(k8sClient.Update(hubCtx, app)).Should(Succeed())
Eventually(func(g Gomega) {
g.Expect(k8sClient.Get(hubCtx, key, app)).Should(Succeed())
g.Expect(app.Status.ObservedGeneration).Should(Equal(app.Generation))
g.Expect(app.Status.Phase).Should(Equal(common.ApplicationRunning))
rts := &v1beta1.ResourceTrackerList{}
g.Expect(k8sClient.List(hubCtx, rts, client.MatchingLabels{oam.LabelAppName: key.Name, oam.LabelAppNamespace: key.Namespace})).Should(Succeed())
cnt := 0
for _, item := range rts.Items {
if item.Spec.Type == v1beta1.ResourceTrackerTypeVersioned {
cnt++
}
}
g.Expect(cnt).Should(Equal(2))
}).WithTimeout(30 * time.Second).WithPolling(2 * time.Second).Should(Succeed())
By("try update application again")
Expect(k8sClient.Get(hubCtx, key, app)).Should(Succeed())
if app.Annotations == nil {
app.Annotations = map[string]string{}
}
app.Annotations[oam.AnnotationPublishVersion] = "test"
Expect(k8sClient.Update(hubCtx, app)).Should(Succeed())
Eventually(func(g Gomega) {
g.Expect(k8sClient.Get(hubCtx, key, app)).Should(Succeed())
g.Expect(app.Status.LatestRevision).ShouldNot(BeNil())
g.Expect(app.Status.LatestRevision.Revision).Should(Equal(int64(3)))
g.Expect(app.Status.ObservedGeneration).Should(Equal(app.Generation))
g.Expect(app.Status.Phase).Should(Equal(common.ApplicationRunning))
}).WithTimeout(1 * time.Minute).WithPolling(2 * time.Second).Should(Succeed())
By("clear disconnection cluster secret")
Expect(k8sClient.Get(hubCtx, types.NamespacedName{Namespace: kubevelatypes.DefaultKubeVelaNS, Name: secretName}, secret)).Should(Succeed())
Expect(k8sClient.Delete(hubCtx, secret)).Should(Succeed())
By("update application again")
Eventually(func(g Gomega) {
g.Expect(k8sClient.Get(hubCtx, key, app)).Should(Succeed())
app.Annotations[oam.AnnotationPublishVersion] = "test2"
g.Expect(k8sClient.Update(hubCtx, app)).Should(Succeed())
}).WithTimeout(10 * time.Second).WithPolling(2 * time.Second).Should(Succeed())
By("wait gc application completed")
Eventually(func(g Gomega) {
rts := &v1beta1.ResourceTrackerList{}
g.Expect(k8sClient.List(hubCtx, rts, client.MatchingLabels{oam.LabelAppName: key.Name, oam.LabelAppNamespace: key.Namespace})).Should(Succeed())
cnt := 0
for _, item := range rts.Items {
if item.Spec.Type == v1beta1.ResourceTrackerTypeVersioned {
cnt++
}
}
g.Expect(cnt).Should(Equal(1))
}).WithTimeout(3 * time.Minute).WithPolling(10 * time.Second).Should(Succeed())
})
})
})

View File

@@ -0,0 +1,17 @@
apiVersion: core.oam.dev/v1beta1
kind: Application
metadata:
name: app-disconnection-test
spec:
components:
- type: k8s-objects
name: app-dis-cm
properties:
objects:
- apiVersion: v1
kind: ConfigMap
policies:
- type: topology
name: disconnection-test
properties:
clusters: ["disconnection-test"]

View File

@@ -0,0 +1,28 @@
apiVersion: core.oam.dev/v1beta1
kind: Application
metadata:
name: example-lite-app
spec:
components:
- name: data-worker
type: worker
properties:
image: busybox
cmd:
- sleep
- '1000000'
policies:
- name: local
type: topology
properties:
clusters: ["local"]
- name: worker
type: topology
properties:
clusters: ["cluster-worker"]
workflow:
steps:
- name: deploy
type: deploy
properties:
policies: ["local"]

View File

@@ -19,7 +19,7 @@ gateway: {
message: "Visiting URL: " + context.outputs.ingress.spec.rules[0].host + ", IP: " + igs[0].ip
}
if igs[0].host == _|_ {
message: "Host not specified, visit the cluster or load balancer in front of the cluster"
message: "Host not specified, visit the cluster or load balancer in front of the cluster with IP: " + igs[0].ip
}
}
if igs[0].ip == _|_ {

View File

@@ -19,8 +19,9 @@ template: {
}]
}]
initContainers: [{
name: parameter.name
image: parameter.image
name: parameter.name
image: parameter.image
imagePullPolicy: parameter.imagePullPolicy
if parameter.cmd != _|_ {
command: parameter.cmd
}
@@ -50,6 +51,9 @@ template: {
// +usage=Specify the image of init container
image: string
// +usage=Specify image pull policy for your service
imagePullPolicy: *"IfNotPresent" | "Always" | "Never"
// +usage=Specify the commands run in the init container
cmd?: [...string]