Compare commits

...

40 Commits

Author SHA1 Message Date
Tianxin Dong
7df2b34a0b Fix: fix signedKey using platform id (#4653)
Signed-off-by: FogDong <dongtianxin.tx@alibaba-inc.com>

Signed-off-by: FogDong <dongtianxin.tx@alibaba-inc.com>
2022-08-24 19:33:24 +08:00
github-actions[bot]
2a03e16098 fix: add supported but missing provider (#4650)
Signed-off-by: Yuedong Wu <57584831+lunarwhite@users.noreply.github.com>
(cherry picked from commit fa96c917a8)

Co-authored-by: Yuedong Wu <57584831+lunarwhite@users.noreply.github.com>
2022-08-24 09:28:35 +08:00
github-actions[bot]
110927ed97 Fix: fix writing logs to file (#4588)
Signed-off-by: FogDong <dongtianxin.tx@alibaba-inc.com>
(cherry picked from commit d4b3bbf049)

Co-authored-by: FogDong <dongtianxin.tx@alibaba-inc.com>
2022-08-10 10:00:39 +08:00
github-actions[bot]
90fbfa0f81 Feat: definition support controller requirement (#4578)
Signed-off-by: yangsoon <songyang.song@alibaba-inc.com>
(cherry picked from commit 714f218f90)

Co-authored-by: yangsoon <songyang.song@alibaba-inc.com>
2022-08-08 16:07:44 +08:00
github-actions[bot]
7fb045328d [Backport release-1.4] Fix: reject applications with empty policy properties (#4565)
* Fix: reject applications with empty policies

Signed-off-by: Charlie Chiang <charlie_c_0129@outlook.com>
(cherry picked from commit 337032511e)

* Style: change err msg

Signed-off-by: Charlie Chiang <charlie_c_0129@outlook.com>
(cherry picked from commit 2bb5c0245a)

* Fix: use 400 instead of 422 to show err msg

Signed-off-by: Charlie Chiang <charlie_c_0129@outlook.com>
(cherry picked from commit 553ac92c62)

* Test: fix tests

Signed-off-by: Charlie Chiang <charlie_c_0129@outlook.com>
(cherry picked from commit 0ce352d13b)

Co-authored-by: Charlie Chiang <charlie_c_0129@outlook.com>
2022-08-05 15:04:47 +08:00
Somefive
766c5852c6 Fix: address vela-core crash due to empty policy properties (#4473) (#4480)
* Fix: fix topology core crash

Signed-off-by: Charlie Chiang <charlie_c_0129@outlook.com>

* Test: add tests

Signed-off-by: Charlie Chiang <charlie_c_0129@outlook.com>

* Fix: same problem in other places

Signed-off-by: Charlie Chiang <charlie_c_0129@outlook.com>

* Style: remove empty line

Signed-off-by: Charlie Chiang <charlie_c_0129@outlook.com>

* Feat: raise error when empty topology is used

Signed-off-by: Charlie Chiang <charlie_c_0129@outlook.com>

* Feat: raise error when empty override policy is used

Signed-off-by: Charlie Chiang <charlie_c_0129@outlook.com>

Co-authored-by: Charlie Chiang <charlie_c_0129@outlook.com>
2022-07-27 13:48:19 +08:00
github-actions[bot]
4cb9a14b18 Fix: fix logs to record the right publish version (#4476)
Signed-off-by: yangsoon <songyang.song@alibaba-inc.com>
(cherry picked from commit 4846104c8f)

Co-authored-by: yangsoon <songyang.song@alibaba-inc.com>
2022-07-27 01:13:07 +08:00
github-actions[bot]
9a1e75cf48 Fix: The apply failure error is ignored when the workflow is executed (#4461)
Signed-off-by: yangsoon <songyang.song@alibaba-inc.com>
(cherry picked from commit b1d8e6c88b)

Co-authored-by: yangsoon <songyang.song@alibaba-inc.com>
2022-07-25 22:19:26 +08:00
Tianxin Dong
192dc8966d Fix: fix backoff time after default backoff times (#4414)
Signed-off-by: FogDong <dongtianxin.tx@alibaba-inc.com>
2022-07-20 17:48:28 +08:00
Jianbo Sun
b596b70ebe Fix: addon function converted (#4411)
Signed-off-by: Jianbo Sun <jianbo.sjb@alibaba-inc.com>
2022-07-19 19:54:44 +08:00
github-actions[bot]
0cd370e867 Fix: fix volumes duplicate in list (#4390)
Signed-off-by: FogDong <dongtianxin.tx@alibaba-inc.com>
(cherry picked from commit 08fb73aa95)

Co-authored-by: FogDong <dongtianxin.tx@alibaba-inc.com>
2022-07-15 20:11:49 +08:00
github-actions[bot]
d9adc73e5c Fix: add usage comment for ref-objects (#4386)
Signed-off-by: Somefive <yd219913@alibaba-inc.com>
(cherry picked from commit 0418c83117)

Co-authored-by: Somefive <yd219913@alibaba-inc.com>
2022-07-14 18:51:05 +08:00
github-actions[bot]
4a2d9807c8 [Backport release-1.4] Fix: fail directly when app terminated (#4385)
* fail directly when app terminated

Signed-off-by: 楚岳 <wangyike.wyk@alibaba-inc.com>
(cherry picked from commit 842c211cf6)

* support suspend

Signed-off-by: 楚岳 <wangyike.wyk@alibaba-inc.com>

fix typo

Signed-off-by: 楚岳 <wangyike.wyk@alibaba-inc.com>
(cherry picked from commit 307ef372f1)

Co-authored-by: 楚岳 <wangyike.wyk@alibaba-inc.com>
2022-07-14 18:50:23 +08:00
github-actions[bot]
840cb8ce58 Fix: several minor bugs (#4381)
Signed-off-by: Somefive <yd219913@alibaba-inc.com>
(cherry picked from commit c5f10a5723)

Co-authored-by: Somefive <yd219913@alibaba-inc.com>
2022-07-14 11:22:41 +08:00
github-actions[bot]
5a64fec916 Fix: abuse timeout context in terraform provider (#4375)
Signed-off-by: Jianbo Sun <jianbo.sjb@alibaba-inc.com>
(cherry picked from commit ab56b3c274)

Co-authored-by: Jianbo Sun <jianbo.sjb@alibaba-inc.com>
2022-07-13 15:48:13 +08:00
Jianbo Sun
657a374ded Fix: add the job of independently publishing chart packages (#4360) (#4361)
* Fix: add the job of independently publishing chart packages

Signed-off-by: barnettZQG <barnett.zqg@gmail.com>

* Fix: add the job of independently publishing chart packages

Signed-off-by: barnettZQG <barnett.zqg@gmail.com>

Co-authored-by: barnettZQG <barnett.zqg@gmail.com>
2022-07-12 12:31:05 +08:00
Tianxin Dong
dfe12cd9ca [Backport-1.4]: optimize imports packages to reduce 75% cpu with better performance (#4355)
* Feat: optimize imports packages

Signed-off-by: FogDong <dongtianxin.tx@alibaba-inc.com>

* fix test

Signed-off-by: FogDong <dongtianxin.tx@alibaba-inc.com>
2022-07-11 18:33:53 +08:00
github-actions[bot]
cd42f67848 Fix: init container bug (#4354)
Signed-off-by: Somefive <yd219913@alibaba-inc.com>
(cherry picked from commit 3f116c7f10)

Co-authored-by: Somefive <yd219913@alibaba-inc.com>
2022-07-11 17:03:49 +08:00
github-actions[bot]
61d2c588e3 Fix: health check use original ns if no override and original exists (#4353)
Signed-off-by: Somefive <yd219913@alibaba-inc.com>
(cherry picked from commit f7923b5ac9)

Co-authored-by: Somefive <yd219913@alibaba-inc.com>
2022-07-11 16:49:24 +08:00
github-actions[bot]
b3dad698a5 Fix: enhance sidecar & init traits (#4343)
Signed-off-by: Somefive <yd219913@alibaba-inc.com>
(cherry picked from commit dc9b18d119)

Co-authored-by: Somefive <yd219913@alibaba-inc.com>
2022-07-08 19:09:55 +08:00
github-actions[bot]
ec5159c2ca Fix: disable apprev status update when apprev disabled (#4338)
Signed-off-by: Somefive <yd219913@alibaba-inc.com>
(cherry picked from commit b4c8e3265a)

Co-authored-by: Somefive <yd219913@alibaba-inc.com>
2022-07-07 15:51:35 +08:00
github-actions[bot]
a7b2b221e0 [Backport release-1.4] Fix: more cluster system info range. (#4333)
* more collect info

Signed-off-by: 楚岳 <wangyike.wyk@alibaba-inc.com>
(cherry picked from commit b27764e072)

* fix comments

Signed-off-by: 楚岳 <wangyike.wyk@alibaba-inc.com>
(cherry picked from commit 57805aa844)

Co-authored-by: 楚岳 <wangyike.wyk@alibaba-inc.com>
2022-07-06 16:15:31 +08:00
github-actions[bot]
caa495a5d9 [Backport release-1.4] Fix: ref-objects parameter with invalid field definition (#4330)
* fix: ref-objects parameter with invalid field definition

which cause validating webhook failed when use ref-objects component

Signed-off-by: jiangshantao <jiangshantao-dbg@qq.com>
(cherry picked from commit 13f328f362)

* fix: run make reviewable

Signed-off-by: jiangshantao <jiangshantao-dbg@qq.com>
(cherry picked from commit e09410af90)

Co-authored-by: jst <jst@meitu.com>
2022-07-06 14:18:17 +08:00
github-actions[bot]
15bea4fb64 Fix: fail to query the application logs with the special characters (#4308)
Signed-off-by: barnettZQG <barnett.zqg@gmail.com>
(cherry picked from commit 0101396a42)

Co-authored-by: barnettZQG <barnett.zqg@gmail.com>
2022-07-01 20:15:09 +08:00
github-actions[bot]
1a094a4eea Fix: Jfrog Webhook Handler Cannot Get Right Image (#4306)
Merge branch 'release-1.4'

Apply suggestions from code review

Co-authored-by: lqs429521992 <lqs429521992@qq.com>

Update webhook.go

Fix: format

Signed-off-by: Jianbo Sun <jianbo.sjb@alibaba-inc.com>
(cherry picked from commit b0d0a81731)

Co-authored-by: qingsliu <lqs429521992@qq.com>
2022-07-01 20:08:40 +08:00
github-actions[bot]
65b6f47330 Fix: fix the goroutine leak in http request (#4304)
Signed-off-by: FogDong <dongtianxin.tx@alibaba-inc.com>
(cherry picked from commit 559ef83abd)

Co-authored-by: FogDong <dongtianxin.tx@alibaba-inc.com>
2022-07-01 17:57:34 +08:00
github-actions[bot]
3a4cd2dca6 Fix: kube apply ignore userinfo for rt (#4300)
Signed-off-by: Somefive <yd219913@alibaba-inc.com>
(cherry picked from commit 2a9e741d4c)

Co-authored-by: Somefive <yd219913@alibaba-inc.com>
2022-07-01 17:32:52 +08:00
github-actions[bot]
9fabd950e5 Chore: avoid update version file when publish smaller version (#4287)
Signed-off-by: qiaozp <chivalry.pp@gmail.com>
(cherry picked from commit 696234e1aa)

Co-authored-by: qiaozp <chivalry.pp@gmail.com>
2022-06-30 15:51:17 +08:00
github-actions[bot]
0a012b4d34 Feat: enhance ServiceAccount trait to support privileges (#4278)
Signed-off-by: Somefive <yd219913@alibaba-inc.com>
(cherry picked from commit f8d4aca499)

Co-authored-by: Somefive <yd219913@alibaba-inc.com>
2022-06-29 15:00:32 +08:00
github-actions[bot]
7c231e6c48 Fix: support stdin and url for vela ql (#4277)
Signed-off-by: Jianbo Sun <jianbo.sjb@alibaba-inc.com>
(cherry picked from commit 235a4471c0)

Co-authored-by: Jianbo Sun <jianbo.sjb@alibaba-inc.com>
2022-06-29 14:53:26 +08:00
github-actions[bot]
36b6c3e7b5 Fix: trim quot char for velaql output (#4269)
Signed-off-by: Jianbo Sun <jianbo.sjb@alibaba-inc.com>
(cherry picked from commit fdd0c93dba)

Co-authored-by: Jianbo Sun <jianbo.sjb@alibaba-inc.com>
2022-06-27 16:52:26 +08:00
github-actions[bot]
4cc019722c [Backport release-1.4] Feat: enhance velq ql and support cue file (#4266)
* Feat: enhance velq ql and support cue file

Signed-off-by: Jianbo Sun <jianbo.sjb@alibaba-inc.com>
(cherry picked from commit 70ed417326)

* add statement

Signed-off-by: Jianbo Sun <jianbo.sjb@alibaba-inc.com>
(cherry picked from commit 25260eb9ef)

Co-authored-by: Jianbo Sun <jianbo.sjb@alibaba-inc.com>
2022-06-27 16:17:21 +08:00
github-actions[bot]
b040ae65da [Backport release-1.4] Fix: provider can't be added since 1.4 as context abused && Feat: add cache for remote terraform module in vela show (#4263)
* Fix: provider can't be added since 1.4 as context abused

Signed-off-by: Jianbo Sun <jianbo.sjb@alibaba-inc.com>
(cherry picked from commit b05fb26418)

* Feat: add cache for remote terraform module in vela show

Signed-off-by: Jianbo Sun <jianbo.sjb@alibaba-inc.com>
(cherry picked from commit 4722028530)

* Fix: add message for terraform resource in error state

Signed-off-by: Jianbo Sun <jianbo.sjb@alibaba-inc.com>
(cherry picked from commit 438145b12e)

Co-authored-by: Jianbo Sun <jianbo.sjb@alibaba-inc.com>
2022-06-27 11:32:20 +08:00
github-actions[bot]
f0fb4ed099 Feat: omit service output if there's nothing (#4262)
Signed-off-by: Sumit Tembe <sumit.tembe@outlook.com>
(cherry picked from commit 8911143389)

Co-authored-by: Sumit Tembe <sumit.tembe@outlook.com>
2022-06-27 10:31:49 +08:00
github-actions[bot]
7f89d12059 environment from configmap or secret not mandatory in task and crontask componentdefinition (#4254)
Signed-off-by: Carmendelope <carmen@napptive.com>
(cherry picked from commit 35f130ea08)

Co-authored-by: Carmendelope <carmen@napptive.com>
2022-06-25 09:00:24 +08:00
github-actions[bot]
3c61bcb8f0 Fix: vela status print wrong STATUS (#4249)
Signed-off-by: StevenLeiZhang <zhangleiic@163.com>
(cherry picked from commit 5f4616a453)

Co-authored-by: StevenLeiZhang <zhangleiic@163.com>
2022-06-24 17:16:58 +08:00
github-actions[bot]
a14b536fd1 [Backport release-1.4] Feat: skip validating version check (#4247)
* skip validating version check

Signed-off-by: 楚岳 <wangyike.wyk@alibaba-inc.com>
(cherry picked from commit 4b94a967ae)

* add comments

Signed-off-by: 楚岳 <wangyike.wyk@alibaba-inc.com>
(cherry picked from commit bbdc19c472)

* fix comments

Signed-off-by: 楚岳 <wangyike.wyk@alibaba-inc.com>
(cherry picked from commit b5c705013d)

* fix test

Signed-off-by: 楚岳 <wangyike.wyk@alibaba-inc.com>
(cherry picked from commit adff7ef309)

* fix commments

Signed-off-by: 楚岳 <wangyike.wyk@alibaba-inc.com>
(cherry picked from commit cf4afee0d8)

* add compatible logic for old controller

Signed-off-by: 楚岳 <wangyike.wyk@alibaba-inc.com>
(cherry picked from commit 3f5a6d33b9)

* modify minimal

Signed-off-by: 楚岳 <wangyike.wyk@alibaba-inc.com>
(cherry picked from commit b020725112)

Co-authored-by: 楚岳 <wangyike.wyk@alibaba-inc.com>
2022-06-24 14:13:17 +08:00
github-actions[bot]
ba5a726854 [Backport release-1.4] Fix: fixed the problems of display definition in web and support displaying WorkflowStep and Policy (#4241)
* Fix: fixed the problems of display definition in web

Some ComponentDefinitions, TraitDefinitions, WorkflowDefinitions
failed to show the usage in web browser

Signed-off-by: Zheng Xi Zhou <zhengxi.zzx@alibaba-inc.com>
(cherry picked from commit c8985ccc3e)

* set printable type for {}

Signed-off-by: Zheng Xi Zhou <zhengxi.zzx@alibaba-inc.com>
(cherry picked from commit 87efb357b2)

* support WorkflowSteps and Policies

Signed-off-by: Zheng Xi Zhou <zhengxi.zzx@alibaba-inc.com>
(cherry picked from commit 8fb83152bc)

Co-authored-by: Zheng Xi Zhou <zhengxi.zzx@alibaba-inc.com>
2022-06-23 19:46:51 +08:00
github-actions[bot]
ffb9d06427 Fix: json-patch & json-merge-patch open result (#4229)
Signed-off-by: Somefive <yd219913@alibaba-inc.com>
(cherry picked from commit 4ea3ca8d5b)

Co-authored-by: Somefive <yd219913@alibaba-inc.com>
2022-06-22 20:05:19 +08:00
barnettZQG
819dc26ace Chore: change the acr registry address (#4214) (#4216)
Signed-off-by: barnettZQG <barnett.zqg@gmail.com>
2022-06-22 14:46:13 +08:00
115 changed files with 1820 additions and 553 deletions

89
.github/workflows/chart.yaml vendored Normal file
View File

@@ -0,0 +1,89 @@
name: Publish Chart
on:
push:
tags:
- "v*"
workflow_dispatch: { }
env:
BUCKET: ${{ secrets.OSS_BUCKET }}
ENDPOINT: ${{ secrets.OSS_ENDPOINT }}
ACCESS_KEY: ${{ secrets.OSS_ACCESS_KEY }}
ACCESS_KEY_SECRET: ${{ secrets.OSS_ACCESS_KEY_SECRET }}
ARTIFACT_HUB_REPOSITORY_ID: ${{ secrets.ARTIFACT_HUB_REPOSITORY_ID }}
jobs:
publish-charts:
env:
HELM_CHARTS_DIR: charts
HELM_CHART: charts/vela-core
MINIMAL_HELM_CHART: charts/vela-minimal
LEGACY_HELM_CHART: legacy/charts/vela-core-legacy
VELA_ROLLOUT_HELM_CHART: runtime/rollout/charts
LOCAL_OSS_DIRECTORY: .oss/
runs-on: ubuntu-20.04
steps:
- uses: actions/checkout@master
- name: Get git revision
id: vars
shell: bash
run: |
echo "::set-output name=git_revision::$(git rev-parse --short HEAD)"
- name: Install Helm
uses: azure/setup-helm@v1
with:
version: v3.4.0
- name: Setup node
uses: actions/setup-node@v2
with:
node-version: '14'
- name: Generate helm doc
run: |
make helm-doc-gen
- name: Prepare legacy chart
run: |
rsync -r $LEGACY_HELM_CHART $HELM_CHARTS_DIR
rsync -r $HELM_CHART/* $LEGACY_HELM_CHART --exclude=Chart.yaml --exclude=crds
- name: Prepare vela chart
run: |
rsync -r $VELA_ROLLOUT_HELM_CHART $HELM_CHARTS_DIR
- name: Get the version
id: get_version
run: |
VERSION=${GITHUB_REF#refs/tags/}
echo ::set-output name=VERSION::${VERSION}
- name: Tag helm chart image
run: |
image_tag=${{ steps.get_version.outputs.VERSION }}
chart_version=${{ steps.get_version.outputs.VERSION }}
sed -i "s/latest/${image_tag}/g" $HELM_CHART/values.yaml
sed -i "s/latest/${image_tag}/g" $MINIMAL_HELM_CHART/values.yaml
sed -i "s/latest/${image_tag}/g" $LEGACY_HELM_CHART/values.yaml
sed -i "s/latest/${image_tag}/g" $VELA_ROLLOUT_HELM_CHART/values.yaml
chart_smever=${chart_version#"v"}
sed -i "s/0.1.0/$chart_smever/g" $HELM_CHART/Chart.yaml
sed -i "s/0.1.0/$chart_smever/g" $MINIMAL_HELM_CHART/Chart.yaml
sed -i "s/0.1.0/$chart_smever/g" $LEGACY_HELM_CHART/Chart.yaml
sed -i "s/0.1.0/$chart_smever/g" $VELA_ROLLOUT_HELM_CHART/Chart.yaml
- name: Install ossutil
run: wget http://gosspublic.alicdn.com/ossutil/1.7.0/ossutil64 && chmod +x ossutil64 && mv ossutil64 ossutil
- name: Configure Alibaba Cloud OSSUTIL
run: ./ossutil --config-file .ossutilconfig config -i ${ACCESS_KEY} -k ${ACCESS_KEY_SECRET} -e ${ENDPOINT} -c .ossutilconfig
- name: sync cloud to local
run: ./ossutil --config-file .ossutilconfig sync oss://$BUCKET/core $LOCAL_OSS_DIRECTORY
- name: add artifacthub stuff to the repo
run: |
rsync $HELM_CHART/README.md $LEGACY_HELM_CHART/README.md
rsync $HELM_CHART/README.md $VELA_ROLLOUT_HELM_CHART/README.md
sed -i "s/ARTIFACT_HUB_REPOSITORY_ID/$ARTIFACT_HUB_REPOSITORY_ID/g" hack/artifacthub/artifacthub-repo.yml
rsync hack/artifacthub/artifacthub-repo.yml $LOCAL_OSS_DIRECTORY
- name: Package helm charts
run: |
helm package $HELM_CHART --destination $LOCAL_OSS_DIRECTORY
helm package $MINIMAL_HELM_CHART --destination $LOCAL_OSS_DIRECTORY
helm package $LEGACY_HELM_CHART --destination $LOCAL_OSS_DIRECTORY
helm package $VELA_ROLLOUT_HELM_CHART --destination $LOCAL_OSS_DIRECTORY
helm repo index --url https://$BUCKET.$ENDPOINT/core $LOCAL_OSS_DIRECTORY
- name: sync local to cloud
run: ./ossutil --config-file .ossutilconfig sync $LOCAL_OSS_DIRECTORY oss://$BUCKET/core -f

View File

@@ -8,11 +8,8 @@ on:
workflow_dispatch: {}
env:
BUCKET: ${{ secrets.OSS_BUCKET }}
ENDPOINT: ${{ secrets.OSS_ENDPOINT }}
ACCESS_KEY: ${{ secrets.OSS_ACCESS_KEY }}
ACCESS_KEY_SECRET: ${{ secrets.OSS_ACCESS_KEY_SECRET }}
ARTIFACT_HUB_REPOSITORY_ID: ${{ secrets.ARTIFACT_HUB_REPOSITORY_ID }}
jobs:
publish-core-images:
@@ -47,8 +44,8 @@ jobs:
- name: Login Alibaba Cloud ACR
uses: docker/login-action@v1
with:
registry: kubevela-registry.cn-hangzhou.cr.aliyuncs.com
username: ${{ secrets.ACR_USERNAME }}@aliyun-inner.com
registry: ${{ secrets.ACR_DOMAIN }}
username: ${{ secrets.ACR_USERNAME }}
password: ${{ secrets.ACR_PASSWORD }}
- uses: docker/setup-qemu-action@v1
- uses: docker/setup-buildx-action@v1
@@ -72,7 +69,7 @@ jobs:
tags: |-
docker.io/oamdev/vela-core:${{ steps.get_version.outputs.VERSION }}
ghcr.io/${{ github.repository_owner }}/oamdev/vela-core:${{ steps.get_version.outputs.VERSION }}
kubevela-registry.cn-hangzhou.cr.aliyuncs.com/oamdev/vela-core:${{ steps.get_version.outputs.VERSION }}
${{ secrets.ACR_DOMAIN }}/oamdev/vela-core:${{ steps.get_version.outputs.VERSION }}
- uses: docker/build-push-action@v2
name: Build & Pushing CLI for Dockerhub, GHCR and ACR
@@ -91,7 +88,7 @@ jobs:
tags: |-
docker.io/oamdev/vela-cli:${{ steps.get_version.outputs.VERSION }}
ghcr.io/${{ github.repository_owner }}/oamdev/vela-cli:${{ steps.get_version.outputs.VERSION }}
kubevela-registry.cn-hangzhou.cr.aliyuncs.com/oamdev/vela-cli:${{ steps.get_version.outputs.VERSION }}
${{ secrets.ACR_DOMAIN }}/oamdev/vela-cli:${{ steps.get_version.outputs.VERSION }}
publish-addon-images:
runs-on: ubuntu-latest
@@ -125,8 +122,8 @@ jobs:
- name: Login Alibaba Cloud ACR
uses: docker/login-action@v1
with:
registry: kubevela-registry.cn-hangzhou.cr.aliyuncs.com
username: ${{ secrets.ACR_USERNAME }}@aliyun-inner.com
registry: ${{ secrets.ACR_DOMAIN }}
username: ${{ secrets.ACR_USERNAME }}
password: ${{ secrets.ACR_PASSWORD }}
- uses: docker/setup-qemu-action@v1
- uses: docker/setup-buildx-action@v1
@@ -150,7 +147,7 @@ jobs:
tags: |-
docker.io/oamdev/vela-apiserver:${{ steps.get_version.outputs.VERSION }}
ghcr.io/${{ github.repository_owner }}/oamdev/vela-apiserver:${{ steps.get_version.outputs.VERSION }}
kubevela-registry.cn-hangzhou.cr.aliyuncs.com/oamdev/vela-apiserver:${{ steps.get_version.outputs.VERSION }}
${{ secrets.ACR_DOMAIN }}/oamdev/vela-apiserver:${{ steps.get_version.outputs.VERSION }}
- uses: docker/build-push-action@v2
name: Build & Pushing runtime rollout Dockerhub, GHCR and ACR
@@ -169,91 +166,7 @@ jobs:
tags: |-
docker.io/oamdev/vela-rollout:${{ steps.get_version.outputs.VERSION }}
ghcr.io/${{ github.repository_owner }}/oamdev/vela-rollout:${{ steps.get_version.outputs.VERSION }}
kubevela-registry.cn-hangzhou.cr.aliyuncs.com/oamdev/vela-rollout:${{ steps.get_version.outputs.VERSION }}
publish-charts:
env:
HELM_CHARTS_DIR: charts
HELM_CHART: charts/vela-core
MINIMAL_HELM_CHART: charts/vela-minimal
LEGACY_HELM_CHART: legacy/charts/vela-core-legacy
VELA_ROLLOUT_HELM_CHART: runtime/rollout/charts
LOCAL_OSS_DIRECTORY: .oss/
runs-on: ubuntu-20.04
steps:
- uses: actions/checkout@master
- name: Get git revision
id: vars
shell: bash
run: |
echo "::set-output name=git_revision::$(git rev-parse --short HEAD)"
- name: Install Helm
uses: azure/setup-helm@v1
with:
version: v3.4.0
- name: Setup node
uses: actions/setup-node@v2
with:
node-version: '14'
- name: Generate helm doc
run: |
make helm-doc-gen
- name: Prepare legacy chart
run: |
rsync -r $LEGACY_HELM_CHART $HELM_CHARTS_DIR
rsync -r $HELM_CHART/* $LEGACY_HELM_CHART --exclude=Chart.yaml --exclude=crds
- name: Prepare vela chart
run: |
rsync -r $VELA_ROLLOUT_HELM_CHART $HELM_CHARTS_DIR
- uses: oprypin/find-latest-tag@v1
with:
repository: oam-dev/kubevela
releases-only: true
id: latest_tag
- name: Tag helm chart image
run: |
latest_repo_tag=${{ steps.latest_tag.outputs.tag }}
sub="."
major="$(cut -d"$sub" -f1 <<<"$latest_repo_tag")"
minor="$(cut -d"$sub" -f2 <<<"$latest_repo_tag")"
patch="0"
current_repo_tag="$major.$minor.$patch"
image_tag=${GITHUB_REF#refs/tags/}
chart_version=$latest_repo_tag
if [[ ${GITHUB_REF} == "refs/heads/master" ]]; then
image_tag=latest
chart_version=${current_repo_tag}-nightly-build
fi
sed -i "s/latest/${image_tag}/g" $HELM_CHART/values.yaml
sed -i "s/latest/${image_tag}/g" $MINIMAL_HELM_CHART/values.yaml
sed -i "s/latest/${image_tag}/g" $LEGACY_HELM_CHART/values.yaml
sed -i "s/latest/${image_tag}/g" $VELA_ROLLOUT_HELM_CHART/values.yaml
chart_smever=${chart_version#"v"}
sed -i "s/0.1.0/$chart_smever/g" $HELM_CHART/Chart.yaml
sed -i "s/0.1.0/$chart_smever/g" $MINIMAL_HELM_CHART/Chart.yaml
sed -i "s/0.1.0/$chart_smever/g" $LEGACY_HELM_CHART/Chart.yaml
sed -i "s/0.1.0/$chart_smever/g" $VELA_ROLLOUT_HELM_CHART/Chart.yaml
- name: Install ossutil
run: wget http://gosspublic.alicdn.com/ossutil/1.7.0/ossutil64 && chmod +x ossutil64 && mv ossutil64 ossutil
- name: Configure Alibaba Cloud OSSUTIL
run: ./ossutil --config-file .ossutilconfig config -i ${ACCESS_KEY} -k ${ACCESS_KEY_SECRET} -e ${ENDPOINT} -c .ossutilconfig
- name: sync cloud to local
run: ./ossutil --config-file .ossutilconfig sync oss://$BUCKET/core $LOCAL_OSS_DIRECTORY
- name: add artifacthub stuff to the repo
run: |
rsync $HELM_CHART/README.md $LEGACY_HELM_CHART/README.md
rsync $HELM_CHART/README.md $VELA_ROLLOUT_HELM_CHART/README.md
sed -i "s/ARTIFACT_HUB_REPOSITORY_ID/$ARTIFACT_HUB_REPOSITORY_ID/g" hack/artifacthub/artifacthub-repo.yml
rsync hack/artifacthub/artifacthub-repo.yml $LOCAL_OSS_DIRECTORY
- name: Package helm charts
run: |
helm package $HELM_CHART --destination $LOCAL_OSS_DIRECTORY
helm package $MINIMAL_HELM_CHART --destination $LOCAL_OSS_DIRECTORY
helm package $LEGACY_HELM_CHART --destination $LOCAL_OSS_DIRECTORY
helm package $VELA_ROLLOUT_HELM_CHART --destination $LOCAL_OSS_DIRECTORY
helm repo index --url https://$BUCKET.$ENDPOINT/core $LOCAL_OSS_DIRECTORY
- name: sync local to cloud
run: ./ossutil --config-file .ossutilconfig sync $LOCAL_OSS_DIRECTORY oss://$BUCKET/core -f
${{ secrets.ACR_DOMAIN }}/oamdev/vela-rollout:${{ steps.get_version.outputs.VERSION }}
publish-capabilities:
env:

View File

@@ -123,6 +123,11 @@ jobs:
- name: sync the latest version file
if: ${{ !contains(env.VELA_VERSION,'alpha') && !contains(env.VELA_VERSION,'beta') }}
run: |
LATEST_VERSION=$(curl -fsSl https://static.kubevela.net/binary/vela/latest_version)
verlte() {
[ "$1" = "`echo -e "$1\n$2" | sort -V | head -n1`" ]
}
verlte ${{ env.VELA_VERSION }} $LATEST_VERSION && echo "${{ env.VELA_VERSION }} <= $LATEST_VERSION, skip update" && exit 0
echo ${{ env.VELA_VERSION }} > ./latest_version
./ossutil --config-file .ossutilconfig cp -u ./latest_version oss://$BUCKET/binary/vela/latest_version

View File

@@ -4,7 +4,7 @@ apiVersion: core.oam.dev/v1beta1
kind: TraitDefinition
metadata:
annotations:
definition.oam.dev/description: affinity specify affinity and tolerationon K8s pod for your workload which follows the pod spec in path 'spec.template'.
definition.oam.dev/description: Affinity specifies affinity and toleration K8s pod for your workload which follows the pod spec in path 'spec.template'.
labels:
custom.definition.oam.dev/ui-hidden: "true"
name: affinity

View File

@@ -196,14 +196,14 @@ spec:
// +usage=Specifies a source the value of this var should come from
valueFrom?: {
// +usage=Selects a key of a secret in the pod's namespace
secretKeyRef: {
secretKeyRef?: {
// +usage=The name of the secret in the pod's namespace to select from
name: string
// +usage=The key of the secret to select from. Must be a valid secret key
key: string
}
// +usage=Selects a key of a config map in the pod's namespace
configMapKeyRef: {
configMapKeyRef?: {
// +usage=The name of the config map in the pod's namespace to select from
name: string
// +usage=The key of the config map to select from. Must be a valid secret key

View File

@@ -43,7 +43,7 @@ spec:
volumeMounts: [{
name: parameter.mountName
mountPath: parameter.initMountPath
}]
}] + parameter.extraVolumeMounts
}]
// +patchKey=name
volumes: [{
@@ -97,5 +97,13 @@ spec:
// +usage=Specify the mount path of init container
initMountPath: string
// +usage=Specify the extra volume mounts for the init container
extraVolumeMounts: [...{
// +usage=The name of the volume to be mounted
name: string
// +usage=The mountPath for mount in the init container
mountPath: string
}]
}

View File

@@ -14,12 +14,18 @@ spec:
cue:
template: |
#K8sObject: {
apiVersion: string
kind: string
metadata: {
name: string
...
}
// +usage=The resource type for the Kubernetes objects
resource?: string
// +usage=The group name for the Kubernetes objects
group?: string
// +usage=If specified, fetch the Kubernetes objects with the name, exclusive to labelSelector
name?: string
// +usage=If specified, fetch the Kubernetes objects from the namespace. Otherwise, fetch from the application's namespace.
namespace?: string
// +usage=If specified, fetch the Kubernetes objects from the cluster. Otherwise, fetch from the local cluster.
cluster?: string
// +usage=If specified, fetch the Kubernetes objects according to the label selector, exclusive to name
labelSelector?: [string]: string
...
}
output: parameter.objects[0]
@@ -30,7 +36,12 @@ spec:
}
}
}
parameter: objects: [...#K8sObject]
parameter: {
// +usage=If specified, application will fetch native Kubernetes objects according to the object description
objects?: [...#K8sObject]
// +usage=If specified, the objects in the urls will be loaded.
urls?: [...string]
}
status:
customStatus: |-
if context.output.apiVersion == "apps/v1" && context.output.kind == "Deployment" {

View File

@@ -14,10 +14,114 @@ spec:
schematic:
cue:
template: |
#Privileges: {
// +usage=Specify the verbs to be allowed for the resource
verbs: [...string]
// +usage=Specify the apiGroups of the resource
apiGroups?: [...string]
// +usage=Specify the resources to be allowed
resources?: [...string]
// +usage=Specify the resourceNames to be allowed
resourceNames?: [...string]
// +usage=Specify the resource url to be allowed
nonResourceURLs?: [...string]
// +usage=Specify the scope of the privileges, default to be namespace scope
scope: *"namespace" | "cluster"
}
parameter: {
// +usage=Specify the name of ServiceAccount
name: string
// +usage=Specify whether to create new ServiceAccount or not
create: *false | bool
// +usage=Specify the privileges of the ServiceAccount, if not empty, RoleBindings(ClusterRoleBindings) will be created
privileges?: [...#Privileges]
}
// +patchStrategy=retainKeys
patch: spec: template: spec: serviceAccountName: parameter.name
_clusterPrivileges: [ for p in parameter.privileges if p.scope == "cluster" {p}]
_namespacePrivileges: [ for p in parameter.privileges if p.scope == "namespace" {p}]
outputs: {
if parameter.create {
"service-account": {
apiVersion: "v1"
kind: "ServiceAccount"
metadata: name: parameter.name
}
}
if parameter.privileges != _|_ {
if len(_clusterPrivileges) > 0 {
"cluster-role": {
apiVersion: "rbac.authorization.k8s.io/v1"
kind: "ClusterRole"
metadata: name: "\(context.namespace):\(parameter.name)"
rules: [ for p in _clusterPrivileges {
verbs: p.verbs
if p.apiGroups != _|_ {
apiGroups: p.apiGroups
}
if p.resources != _|_ {
resources: p.resources
}
if p.resourceNames != _|_ {
resourceNames: p.resourceNames
}
if p.nonResourceURLs != _|_ {
nonResourceURLs: p.nonResourceURLs
}
}]
}
"cluster-role-binding": {
apiVersion: "rbac.authorization.k8s.io/v1"
kind: "ClusterRoleBinding"
metadata: name: "\(context.namespace):\(parameter.name)"
roleRef: {
apiGroup: "rbac.authorization.k8s.io"
kind: "ClusterRole"
name: "\(context.namespace):\(parameter.name)"
}
subjects: [{
kind: "ServiceAccount"
name: parameter.name
namespace: "\(context.namespace)"
}]
}
}
if len(_namespacePrivileges) > 0 {
role: {
apiVersion: "rbac.authorization.k8s.io/v1"
kind: "Role"
metadata: name: parameter.name
rules: [ for p in _namespacePrivileges {
verbs: p.verbs
if p.apiGroups != _|_ {
apiGroups: p.apiGroups
}
if p.resources != _|_ {
resources: p.resources
}
if p.resourceNames != _|_ {
resourceNames: p.resourceNames
}
if p.nonResourceURLs != _|_ {
nonResourceURLs: p.nonResourceURLs
}
}]
}
"role-binding": {
apiVersion: "rbac.authorization.k8s.io/v1"
kind: "RoleBinding"
metadata: name: parameter.name
roleRef: {
apiGroup: "rbac.authorization.k8s.io"
kind: "Role"
name: parameter.name
}
subjects: [{
kind: "ServiceAccount"
name: parameter.name
}]
}
}
}
}

View File

@@ -82,6 +82,11 @@ spec:
// +usage=The key of the config map to select from. Must be a valid secret key
key: string
}
// +usage=Specify the field reference for env
fieldRef?: {
// +usage=Specify the field path for env
fieldPath: string
}
}
}]

View File

@@ -64,6 +64,9 @@ spec:
{
name: "pvc-" + v.name
mountPath: v.mountPath
if v.subPath != _|_ {
subPath: v.subPath
}
}
}
},
@@ -73,6 +76,9 @@ spec:
{
name: "configmap-" + v.name
mountPath: v.mountPath
if v.subPath != _|_ {
subPath: v.subPath
}
}
},
] | []
@@ -103,6 +109,9 @@ spec:
{
name: "secret-" + v.name
mountPath: v.mountPath
if v.subPath != _|_ {
subPath: v.subPath
}
}
},
] | []
@@ -133,6 +142,9 @@ spec:
{
name: "emptydir-" + v.name
mountPath: v.mountPath
if v.subPath != _|_ {
subPath: v.subPath
}
}
},
] | []
@@ -141,12 +153,28 @@ spec:
{
name: "pvc-" + v.name
devicePath: v.mountPath
if v.subPath != _|_ {
subPath: v.subPath
}
}
},
] | []
volumesList: pvcVolumesList + configMapVolumesList + secretVolumesList + emptyDirVolumesList
deDupVolumesArray: [
for val in [
for i, vi in volumesList {
for j, vj in volumesList if j < i && vi.name == vj.name {
_ignore: true
}
vi
},
] if val._ignore == _|_ {
val
},
]
patch: spec: template: spec: {
// +patchKey=name
volumes: pvcVolumesList + configMapVolumesList + secretVolumesList + emptyDirVolumesList
volumes: deDupVolumesArray
containers: [{
// +patchKey=name
@@ -234,6 +262,7 @@ spec:
name: string
mountOnly: *false | bool
mountPath: string
subPath?: string
volumeMode: *"Filesystem" | string
volumeName?: string
accessModes: *["ReadWriteOnce"] | [...string]
@@ -275,6 +304,7 @@ spec:
configMapKey: string
}]
mountPath?: string
subPath?: string
defaultMode: *420 | int
readOnly: *false | bool
data?: {...}
@@ -298,6 +328,7 @@ spec:
secretKey: string
}]
mountPath?: string
subPath?: string
defaultMode: *420 | int
readOnly: *false | bool
stringData?: {...}
@@ -313,6 +344,7 @@ spec:
emptyDir?: [...{
name: string
mountPath: string
subPath?: string
medium: *"" | "Memory"
}]
}

View File

@@ -149,14 +149,14 @@ spec:
// +usage=Specifies a source the value of this var should come from
valueFrom?: {
// +usage=Selects a key of a secret in the pod's namespace
secretKeyRef: {
secretKeyRef?: {
// +usage=The name of the secret in the pod's namespace to select from
name: string
// +usage=The key of the secret to select from. Must be a valid secret key
key: string
}
// +usage=Selects a key of a config map in the pod's namespace
configMapKeyRef: {
configMapKeyRef?: {
// +usage=The name of the config map in the pod's namespace to select from
name: string
// +usage=The key of the config map to select from. Must be a valid secret key

View File

@@ -20,7 +20,10 @@ spec:
for v in parameter.volumeMounts.pvc {
{
mountPath: v.mountPath
name: v.name
if v.subPath != _|_ {
subPath: v.subPath
}
name: v.name
}
},
] | []
@@ -29,7 +32,10 @@ spec:
for v in parameter.volumeMounts.configMap {
{
mountPath: v.mountPath
name: v.name
if v.subPath != _|_ {
subPath: v.subPath
}
name: v.name
}
},
] | []
@@ -38,7 +44,10 @@ spec:
for v in parameter.volumeMounts.secret {
{
mountPath: v.mountPath
name: v.name
if v.subPath != _|_ {
subPath: v.subPath
}
name: v.name
}
},
] | []
@@ -47,7 +56,10 @@ spec:
for v in parameter.volumeMounts.emptyDir {
{
mountPath: v.mountPath
name: v.name
if v.subPath != _|_ {
subPath: v.subPath
}
name: v.name
}
},
] | []
@@ -56,7 +68,10 @@ spec:
for v in parameter.volumeMounts.hostPath {
{
mountPath: v.mountPath
name: v.name
if v.subPath != _|_ {
subPath: v.subPath
}
name: v.name
}
},
] | []
@@ -119,6 +134,19 @@ spec:
},
] | []
}
volumesList: volumesArray.pvc + volumesArray.configMap + volumesArray.secret + volumesArray.emptyDir + volumesArray.hostPath
deDupVolumesArray: [
for val in [
for i, vi in volumesList {
for j, vj in volumesList if j < i && vi.name == vj.name {
_ignore: true
}
vi
},
] if val._ignore == _|_ {
val
},
]
output: {
apiVersion: "apps/v1"
kind: "Deployment"
@@ -262,7 +290,7 @@ spec:
}
if parameter["volumeMounts"] != _|_ {
volumes: volumesArray.pvc + volumesArray.configMap + volumesArray.secret + volumesArray.emptyDir + volumesArray.hostPath
volumes: deDupVolumesArray
}
}
}
@@ -375,6 +403,7 @@ spec:
pvc?: [...{
name: string
mountPath: string
subPath?: string
// +usage=The name of the PVC
claimName: string
}]
@@ -382,6 +411,7 @@ spec:
configMap?: [...{
name: string
mountPath: string
subPath?: string
defaultMode: *420 | int
cmName: string
items?: [...{
@@ -394,6 +424,7 @@ spec:
secret?: [...{
name: string
mountPath: string
subPath?: string
defaultMode: *420 | int
secretName: string
items?: [...{
@@ -406,12 +437,14 @@ spec:
emptyDir?: [...{
name: string
mountPath: string
subPath?: string
medium: *"" | "Memory"
}]
// +usage=Mount HostPath type volume
hostPath?: [...{
name: string
mountPath: string
subPath?: string
path: string
}]
}

View File

@@ -122,6 +122,7 @@ metadata:
name: {{ include "kubevela.fullname" . }}
namespace: {{ .Release.Namespace }}
labels:
controller.oam.dev/name: vela-core
{{- include "kubevela.labels" . | nindent 4 }}
spec:
replicas: {{ .Values.replicaCount }}

View File

@@ -4,7 +4,7 @@ apiVersion: core.oam.dev/v1beta1
kind: TraitDefinition
metadata:
annotations:
definition.oam.dev/description: affinity specify affinity and tolerationon K8s pod for your workload which follows the pod spec in path 'spec.template'.
definition.oam.dev/description: Affinity specifies affinity and toleration K8s pod for your workload which follows the pod spec in path 'spec.template'.
labels:
custom.definition.oam.dev/ui-hidden: "true"
name: affinity

View File

@@ -196,14 +196,14 @@ spec:
// +usage=Specifies a source the value of this var should come from
valueFrom?: {
// +usage=Selects a key of a secret in the pod's namespace
secretKeyRef: {
secretKeyRef?: {
// +usage=The name of the secret in the pod's namespace to select from
name: string
// +usage=The key of the secret to select from. Must be a valid secret key
key: string
}
// +usage=Selects a key of a config map in the pod's namespace
configMapKeyRef: {
configMapKeyRef?: {
// +usage=The name of the config map in the pod's namespace to select from
name: string
// +usage=The key of the config map to select from. Must be a valid secret key

View File

@@ -43,7 +43,7 @@ spec:
volumeMounts: [{
name: parameter.mountName
mountPath: parameter.initMountPath
}]
}] + parameter.extraVolumeMounts
}]
// +patchKey=name
volumes: [{
@@ -97,5 +97,13 @@ spec:
// +usage=Specify the mount path of init container
initMountPath: string
// +usage=Specify the extra volume mounts for the init container
extraVolumeMounts: [...{
// +usage=The name of the volume to be mounted
name: string
// +usage=The mountPath for mount in the init container
mountPath: string
}]
}

View File

@@ -14,12 +14,18 @@ spec:
cue:
template: |
#K8sObject: {
apiVersion: string
kind: string
metadata: {
name: string
...
}
// +usage=The resource type for the Kubernetes objects
resource?: string
// +usage=The group name for the Kubernetes objects
group?: string
// +usage=If specified, fetch the Kubernetes objects with the name, exclusive to labelSelector
name?: string
// +usage=If specified, fetch the Kubernetes objects from the namespace. Otherwise, fetch from the application's namespace.
namespace?: string
// +usage=If specified, fetch the Kubernetes objects from the cluster. Otherwise, fetch from the local cluster.
cluster?: string
// +usage=If specified, fetch the Kubernetes objects according to the label selector, exclusive to name
labelSelector?: [string]: string
...
}
output: parameter.objects[0]
@@ -30,7 +36,12 @@ spec:
}
}
}
parameter: objects: [...#K8sObject]
parameter: {
// +usage=If specified, application will fetch native Kubernetes objects according to the object description
objects?: [...#K8sObject]
// +usage=If specified, the objects in the urls will be loaded.
urls?: [...string]
}
status:
customStatus: |-
if context.output.apiVersion == "apps/v1" && context.output.kind == "Deployment" {

View File

@@ -14,10 +14,114 @@ spec:
schematic:
cue:
template: |
#Privileges: {
// +usage=Specify the verbs to be allowed for the resource
verbs: [...string]
// +usage=Specify the apiGroups of the resource
apiGroups?: [...string]
// +usage=Specify the resources to be allowed
resources?: [...string]
// +usage=Specify the resourceNames to be allowed
resourceNames?: [...string]
// +usage=Specify the resource url to be allowed
nonResourceURLs?: [...string]
// +usage=Specify the scope of the privileges, default to be namespace scope
scope: *"namespace" | "cluster"
}
parameter: {
// +usage=Specify the name of ServiceAccount
name: string
// +usage=Specify whether to create new ServiceAccount or not
create: *false | bool
// +usage=Specify the privileges of the ServiceAccount, if not empty, RoleBindings(ClusterRoleBindings) will be created
privileges?: [...#Privileges]
}
// +patchStrategy=retainKeys
patch: spec: template: spec: serviceAccountName: parameter.name
_clusterPrivileges: [ for p in parameter.privileges if p.scope == "cluster" {p}]
_namespacePrivileges: [ for p in parameter.privileges if p.scope == "namespace" {p}]
outputs: {
if parameter.create {
"service-account": {
apiVersion: "v1"
kind: "ServiceAccount"
metadata: name: parameter.name
}
}
if parameter.privileges != _|_ {
if len(_clusterPrivileges) > 0 {
"cluster-role": {
apiVersion: "rbac.authorization.k8s.io/v1"
kind: "ClusterRole"
metadata: name: "\(context.namespace):\(parameter.name)"
rules: [ for p in _clusterPrivileges {
verbs: p.verbs
if p.apiGroups != _|_ {
apiGroups: p.apiGroups
}
if p.resources != _|_ {
resources: p.resources
}
if p.resourceNames != _|_ {
resourceNames: p.resourceNames
}
if p.nonResourceURLs != _|_ {
nonResourceURLs: p.nonResourceURLs
}
}]
}
"cluster-role-binding": {
apiVersion: "rbac.authorization.k8s.io/v1"
kind: "ClusterRoleBinding"
metadata: name: "\(context.namespace):\(parameter.name)"
roleRef: {
apiGroup: "rbac.authorization.k8s.io"
kind: "ClusterRole"
name: "\(context.namespace):\(parameter.name)"
}
subjects: [{
kind: "ServiceAccount"
name: parameter.name
namespace: "\(context.namespace)"
}]
}
}
if len(_namespacePrivileges) > 0 {
role: {
apiVersion: "rbac.authorization.k8s.io/v1"
kind: "Role"
metadata: name: parameter.name
rules: [ for p in _namespacePrivileges {
verbs: p.verbs
if p.apiGroups != _|_ {
apiGroups: p.apiGroups
}
if p.resources != _|_ {
resources: p.resources
}
if p.resourceNames != _|_ {
resourceNames: p.resourceNames
}
if p.nonResourceURLs != _|_ {
nonResourceURLs: p.nonResourceURLs
}
}]
}
"role-binding": {
apiVersion: "rbac.authorization.k8s.io/v1"
kind: "RoleBinding"
metadata: name: parameter.name
roleRef: {
apiGroup: "rbac.authorization.k8s.io"
kind: "Role"
name: parameter.name
}
subjects: [{
kind: "ServiceAccount"
name: parameter.name
}]
}
}
}
}

View File

@@ -82,6 +82,11 @@ spec:
// +usage=The key of the config map to select from. Must be a valid secret key
key: string
}
// +usage=Specify the field reference for env
fieldRef?: {
// +usage=Specify the field path for env
fieldPath: string
}
}
}]

View File

@@ -64,6 +64,9 @@ spec:
{
name: "pvc-" + v.name
mountPath: v.mountPath
if v.subPath != _|_ {
subPath: v.subPath
}
}
}
},
@@ -73,6 +76,9 @@ spec:
{
name: "configmap-" + v.name
mountPath: v.mountPath
if v.subPath != _|_ {
subPath: v.subPath
}
}
},
] | []
@@ -103,6 +109,9 @@ spec:
{
name: "secret-" + v.name
mountPath: v.mountPath
if v.subPath != _|_ {
subPath: v.subPath
}
}
},
] | []
@@ -133,6 +142,9 @@ spec:
{
name: "emptydir-" + v.name
mountPath: v.mountPath
if v.subPath != _|_ {
subPath: v.subPath
}
}
},
] | []
@@ -141,12 +153,28 @@ spec:
{
name: "pvc-" + v.name
devicePath: v.mountPath
if v.subPath != _|_ {
subPath: v.subPath
}
}
},
] | []
volumesList: pvcVolumesList + configMapVolumesList + secretVolumesList + emptyDirVolumesList
deDupVolumesArray: [
for val in [
for i, vi in volumesList {
for j, vj in volumesList if j < i && vi.name == vj.name {
_ignore: true
}
vi
},
] if val._ignore == _|_ {
val
},
]
patch: spec: template: spec: {
// +patchKey=name
volumes: pvcVolumesList + configMapVolumesList + secretVolumesList + emptyDirVolumesList
volumes: deDupVolumesArray
containers: [{
// +patchKey=name
@@ -234,6 +262,7 @@ spec:
name: string
mountOnly: *false | bool
mountPath: string
subPath?: string
volumeMode: *"Filesystem" | string
volumeName?: string
accessModes: *["ReadWriteOnce"] | [...string]
@@ -275,6 +304,7 @@ spec:
configMapKey: string
}]
mountPath?: string
subPath?: string
defaultMode: *420 | int
readOnly: *false | bool
data?: {...}
@@ -298,6 +328,7 @@ spec:
secretKey: string
}]
mountPath?: string
subPath?: string
defaultMode: *420 | int
readOnly: *false | bool
stringData?: {...}
@@ -313,6 +344,7 @@ spec:
emptyDir?: [...{
name: string
mountPath: string
subPath?: string
medium: *"" | "Memory"
}]
}

View File

@@ -149,14 +149,14 @@ spec:
// +usage=Specifies a source the value of this var should come from
valueFrom?: {
// +usage=Selects a key of a secret in the pod's namespace
secretKeyRef: {
secretKeyRef?: {
// +usage=The name of the secret in the pod's namespace to select from
name: string
// +usage=The key of the secret to select from. Must be a valid secret key
key: string
}
// +usage=Selects a key of a config map in the pod's namespace
configMapKeyRef: {
configMapKeyRef?: {
// +usage=The name of the config map in the pod's namespace to select from
name: string
// +usage=The key of the config map to select from. Must be a valid secret key

View File

@@ -20,7 +20,10 @@ spec:
for v in parameter.volumeMounts.pvc {
{
mountPath: v.mountPath
name: v.name
if v.subPath != _|_ {
subPath: v.subPath
}
name: v.name
}
},
] | []
@@ -29,7 +32,10 @@ spec:
for v in parameter.volumeMounts.configMap {
{
mountPath: v.mountPath
name: v.name
if v.subPath != _|_ {
subPath: v.subPath
}
name: v.name
}
},
] | []
@@ -38,7 +44,10 @@ spec:
for v in parameter.volumeMounts.secret {
{
mountPath: v.mountPath
name: v.name
if v.subPath != _|_ {
subPath: v.subPath
}
name: v.name
}
},
] | []
@@ -47,7 +56,10 @@ spec:
for v in parameter.volumeMounts.emptyDir {
{
mountPath: v.mountPath
name: v.name
if v.subPath != _|_ {
subPath: v.subPath
}
name: v.name
}
},
] | []
@@ -56,7 +68,10 @@ spec:
for v in parameter.volumeMounts.hostPath {
{
mountPath: v.mountPath
name: v.name
if v.subPath != _|_ {
subPath: v.subPath
}
name: v.name
}
},
] | []
@@ -119,6 +134,19 @@ spec:
},
] | []
}
volumesList: volumesArray.pvc + volumesArray.configMap + volumesArray.secret + volumesArray.emptyDir + volumesArray.hostPath
deDupVolumesArray: [
for val in [
for i, vi in volumesList {
for j, vj in volumesList if j < i && vi.name == vj.name {
_ignore: true
}
vi
},
] if val._ignore == _|_ {
val
},
]
output: {
apiVersion: "apps/v1"
kind: "Deployment"
@@ -262,7 +290,7 @@ spec:
}
if parameter["volumeMounts"] != _|_ {
volumes: volumesArray.pvc + volumesArray.configMap + volumesArray.secret + volumesArray.emptyDir + volumesArray.hostPath
volumes: deDupVolumesArray
}
}
}
@@ -375,6 +403,7 @@ spec:
pvc?: [...{
name: string
mountPath: string
subPath?: string
// +usage=The name of the PVC
claimName: string
}]
@@ -382,6 +411,7 @@ spec:
configMap?: [...{
name: string
mountPath: string
subPath?: string
defaultMode: *420 | int
cmName: string
items?: [...{
@@ -394,6 +424,7 @@ spec:
secret?: [...{
name: string
mountPath: string
subPath?: string
defaultMode: *420 | int
secretName: string
items?: [...{
@@ -406,12 +437,14 @@ spec:
emptyDir?: [...{
name: string
mountPath: string
subPath?: string
medium: *"" | "Memory"
}]
// +usage=Mount HostPath type volume
hostPath?: [...{
name: string
mountPath: string
subPath?: string
path: string
}]
}

View File

@@ -125,6 +125,7 @@ metadata:
name: {{ include "kubevela.fullname" . }}
namespace: {{ .Release.Namespace }}
labels:
controller.oam.dev/name: vela-core
{{- include "kubevela.labels" . | nindent 4 }}
spec:
replicas: {{ .Values.replicaCount }}

View File

@@ -19,6 +19,7 @@ package main
import (
"context"
"errors"
goflag "flag"
"fmt"
"io"
"net/http"
@@ -138,6 +139,7 @@ func main() {
flag.DurationVar(&clusterMetricsInterval, "cluster-metrics-interval", 15*time.Second, "The interval that ClusterMetricsMgr will collect metrics from clusters, default value is 15 seconds.")
flag.BoolVar(&controllerArgs.EnableCompatibility, "enable-asi-compatibility", false, "enable compatibility for asi")
flag.BoolVar(&controllerArgs.IgnoreAppWithoutControllerRequirement, "ignore-app-without-controller-version", false, "If true, application controller will not process the app without 'app.oam.dev/controller-version-require' annotation")
flag.BoolVar(&controllerArgs.IgnoreDefinitionWithoutControllerRequirement, "ignore-definition-without-controller-version", false, "If true, trait/component/workflowstep definition controller will not process the definition without 'definition.oam.dev/controller-version-require' annotation")
standardcontroller.AddOptimizeFlags()
standardcontroller.AddAdmissionFlags()
flag.IntVar(&resourcekeeper.MaxDispatchConcurrent, "max-dispatch-concurrent", 10, "Set the max dispatch concurrent number, default is 10")
@@ -146,9 +148,10 @@ func main() {
flag.IntVar(&custom.MaxWorkflowStepErrorRetryTimes, "max-workflow-step-error-retry-times", 10, "Set the max workflow step error retry times, default is 10")
utilfeature.DefaultMutableFeatureGate.AddFlag(flag.CommandLine)
flag.Parse()
// setup logging
klog.InitFlags(nil)
flag.CommandLine.AddGoFlagSet(goflag.CommandLine)
flag.Parse()
if logDebug {
_ = flag.Set("v", strconv.Itoa(int(commonconfig.LogDebug)))
}

View File

@@ -25,3 +25,7 @@ spec:
- name: my-mount
mountPath: /test
claimName: myclaim
- name: my-mount
mountPath: /test2
subPath: /sub
claimName: myclaim

View File

@@ -16,8 +16,9 @@ spec:
pvc:
- name: test1
mountPath: /test/mount/pvc
- name: test2
- name: test1
mountPath: /test/mount2/pvc
subPath: /sub
configMap:
- name: test1
mountPath: /test/mount/cm

View File

@@ -1058,21 +1058,22 @@ func Convert2SecName(name string) string {
// Installer helps addon enable, dependency-check, dispatch resources
type Installer struct {
ctx context.Context
config *rest.Config
addon *InstallPackage
cli client.Client
apply apply.Applicator
r *Registry
registryMeta map[string]SourceMeta
args map[string]interface{}
cache *Cache
dc *discovery.DiscoveryClient
ctx context.Context
config *rest.Config
addon *InstallPackage
cli client.Client
apply apply.Applicator
r *Registry
registryMeta map[string]SourceMeta
args map[string]interface{}
cache *Cache
dc *discovery.DiscoveryClient
skipVersionValidate bool
}
// NewAddonInstaller will create an installer for addon
func NewAddonInstaller(ctx context.Context, cli client.Client, discoveryClient *discovery.DiscoveryClient, apply apply.Applicator, config *rest.Config, r *Registry, args map[string]interface{}, cache *Cache) Installer {
return Installer{
func NewAddonInstaller(ctx context.Context, cli client.Client, discoveryClient *discovery.DiscoveryClient, apply apply.Applicator, config *rest.Config, r *Registry, args map[string]interface{}, cache *Cache, opts ...InstallOption) Installer {
i := Installer{
ctx: ctx,
config: config,
cli: cli,
@@ -1082,14 +1083,21 @@ func NewAddonInstaller(ctx context.Context, cli client.Client, discoveryClient *
cache: cache,
dc: discoveryClient,
}
for _, opt := range opts {
opt(&i)
}
return i
}
func (h *Installer) enableAddon(addon *InstallPackage) error {
var err error
h.addon = addon
err = checkAddonVersionMeetRequired(h.ctx, addon.SystemRequirements, h.cli, h.dc)
if err != nil {
return VersionUnMatchError{addonName: addon.Name, err: err}
if !h.skipVersionValidate {
err = checkAddonVersionMeetRequired(h.ctx, addon.SystemRequirements, h.cli, h.dc)
if err != nil {
return VersionUnMatchError{addonName: addon.Name, err: err}
}
}
if err = h.installDependency(addon); err != nil {
@@ -1445,10 +1453,23 @@ func checkSemVer(actual string, require string) (bool, error) {
}
func fetchVelaCoreImageTag(ctx context.Context, k8sClient client.Client) (string, error) {
deploy := &appsv1.Deployment{}
if err := k8sClient.Get(ctx, types2.NamespacedName{Namespace: types.DefaultKubeVelaNS, Name: types.KubeVelaControllerDeployment}, deploy); err != nil {
deployList := &appsv1.DeploymentList{}
if err := k8sClient.List(ctx, deployList, client.MatchingLabels{oam.LabelControllerName: oam.ApplicationControllerName}); err != nil {
return "", err
}
deploy := appsv1.Deployment{}
if len(deployList.Items) == 0 {
// backward compatible logic old version which vela-core controller has no this label
if err := k8sClient.Get(ctx, types2.NamespacedName{Namespace: types.DefaultKubeVelaNS, Name: types.KubeVelaControllerDeployment}, &deploy); err != nil {
if apierrors.IsNotFound(err) {
return "", errors.New("can't find a running KubeVela instance, please install it first")
}
return "", err
}
} else {
deploy = deployList.Items[0]
}
var tag string
for _, c := range deploy.Spec.Template.Spec.Containers {
if c.Name == types.DefaultKubeVelaReleaseName {

View File

@@ -196,7 +196,7 @@ var _ = Describe("Addon func test", func() {
It("fetchVelaCoreImageTag func test", func() {
deploy = appsv1.Deployment{}
tag, err := fetchVelaCoreImageTag(ctx, k8sClient)
Expect(err).Should(util.NotFoundMatcher{})
Expect(err).ShouldNot(BeNil())
Expect(tag).Should(BeEquivalentTo(""))
Expect(yaml.Unmarshal([]byte(deployYaml), &deploy)).Should(BeNil())
@@ -217,7 +217,7 @@ var _ = Describe("Addon func test", func() {
It("checkAddonVersionMeetRequired func test", func() {
deploy = appsv1.Deployment{}
Expect(checkAddonVersionMeetRequired(ctx, &SystemRequirements{VelaVersion: ">=v1.2.1"}, k8sClient, dc)).Should(util.NotFoundMatcher{})
Expect(checkAddonVersionMeetRequired(ctx, &SystemRequirements{VelaVersion: ">=v1.2.1"}, k8sClient, dc)).ShouldNot(BeNil())
Expect(yaml.Unmarshal([]byte(deployYaml), &deploy)).Should(BeNil())
deploy.SetNamespace(types.DefaultKubeVelaNS)
Expect(k8sClient.Create(ctx, &deploy)).Should(BeNil())
@@ -408,6 +408,8 @@ kind: Deployment
metadata:
name: kubevela-vela-core
namespace: vela-system
labels:
controller.oam.dev/name: vela-core
spec:
progressDeadlineSeconds: 600
replicas: 1

View File

@@ -33,6 +33,7 @@ import (
"github.com/crossplane/crossplane-runtime/pkg/test"
"github.com/google/go-github/v32/github"
"github.com/stretchr/testify/assert"
appsv1 "k8s.io/api/apps/v1"
corev1 "k8s.io/api/core/v1"
kerrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
@@ -47,6 +48,7 @@ import (
"github.com/oam-dev/kubevela/apis/core.oam.dev/common"
"github.com/oam-dev/kubevela/apis/core.oam.dev/v1beta1"
"github.com/oam-dev/kubevela/apis/types"
"github.com/oam-dev/kubevela/pkg/oam"
version2 "github.com/oam-dev/kubevela/version"
)
@@ -791,6 +793,33 @@ func TestCheckAddonVersionMeetRequired(t *testing.T) {
MockGet: test.NewMockGetFn(nil, func(obj client.Object) error {
return nil
}),
MockList: test.NewMockListFn(nil, func(obj client.ObjectList) error {
robj := obj.(*appsv1.DeploymentList)
list := &appsv1.DeploymentList{
Items: []appsv1.Deployment{
{
ObjectMeta: metav1.ObjectMeta{
Labels: map[string]string{
oam.LabelControllerName: oam.ApplicationControllerName,
},
},
Spec: appsv1.DeploymentSpec{
Template: corev1.PodTemplateSpec{
Spec: corev1.PodSpec{
Containers: []corev1.Container{
{
Image: "vela-core:v1.2.5",
},
},
},
},
},
},
},
}
list.DeepCopyInto(robj)
return nil
}),
}
ctx := context.Background()
assert.NoError(t, checkAddonVersionMeetRequired(ctx, &SystemRequirements{VelaVersion: ">=1.2.4"}, k8sClient, nil))

BIN
pkg/addon/example-1.0.1.tgz Normal file

Binary file not shown.

View File

@@ -54,8 +54,8 @@ const (
)
// EnableAddon will enable addon with dependency check, source is where addon from.
func EnableAddon(ctx context.Context, name string, version string, cli client.Client, discoveryClient *discovery.DiscoveryClient, apply apply.Applicator, config *rest.Config, r Registry, args map[string]interface{}, cache *Cache) error {
h := NewAddonInstaller(ctx, cli, discoveryClient, apply, config, &r, args, cache)
func EnableAddon(ctx context.Context, name string, version string, cli client.Client, discoveryClient *discovery.DiscoveryClient, apply apply.Applicator, config *rest.Config, r Registry, args map[string]interface{}, cache *Cache, opts ...InstallOption) error {
h := NewAddonInstaller(ctx, cli, discoveryClient, apply, config, &r, args, cache, opts...)
pkg, err := h.loadInstallPackage(name, version)
if err != nil {
return err
@@ -93,7 +93,7 @@ func DisableAddon(ctx context.Context, cli client.Client, name string, config *r
}
// EnableAddonByLocalDir enable an addon from local dir
func EnableAddonByLocalDir(ctx context.Context, name string, dir string, cli client.Client, dc *discovery.DiscoveryClient, applicator apply.Applicator, config *rest.Config, args map[string]interface{}) error {
func EnableAddonByLocalDir(ctx context.Context, name string, dir string, cli client.Client, dc *discovery.DiscoveryClient, applicator apply.Applicator, config *rest.Config, args map[string]interface{}, opts ...InstallOption) error {
absDir, err := filepath.Abs(dir)
if err != nil {
return err
@@ -112,7 +112,7 @@ func EnableAddonByLocalDir(ctx context.Context, name string, dir string, cli cli
if err != nil {
return err
}
h := NewAddonInstaller(ctx, cli, dc, applicator, config, &Registry{Name: LocalAddonRegistryName}, args, nil)
h := NewAddonInstaller(ctx, cli, dc, applicator, config, &Registry{Name: LocalAddonRegistryName}, args, nil, opts...)
needEnableAddonNames, err := h.checkDependency(pkg)
if err != nil {
return err

View File

@@ -228,3 +228,11 @@ func usingAppsInfo(apps []v1beta1.Application) string {
func IsVersionRegistry(r Registry) bool {
return r.Helm != nil
}
// InstallOption define additional option for installation
type InstallOption func(installer *Installer)
// SkipValidateVersion means skip validating system version
func SkipValidateVersion(installer *Installer) {
installer.skipVersionValidate = true
}

View File

@@ -32,6 +32,7 @@ const (
// SystemInfo systemInfo model
type SystemInfo struct {
BaseModel
SignedKey string `json:"signedKey"`
InstallID string `json:"installID"`
EnableCollection bool `json:"enableCollection"`
LoginType string `json:"loginType"`

View File

@@ -57,7 +57,8 @@ const (
GrantTypeRefresh = "refresh"
)
var signedKey = ""
// signedKey is the signed key of JWT
var signedKey string
// AuthenticationService is the service of authentication
type AuthenticationService interface {

View File

@@ -63,6 +63,7 @@ func (u systemInfoServiceImpl) Get(ctx context.Context) (*model.SystemInfo, erro
}
return info, nil
}
info.SignedKey = rand.String(32)
installID := rand.String(16)
info.InstallID = installID
info.EnableCollection = true
@@ -159,7 +160,7 @@ func (u systemInfoServiceImpl) Init(ctx context.Context) error {
if err != nil {
return err
}
signedKey = info.InstallID
signedKey = info.SignedKey
_, err = initDexConfig(ctx, u.KubeClient, "http://velaux.com")
return err
}

View File

@@ -18,6 +18,8 @@ package service
import (
"context"
"encoding/base64"
"strings"
"k8s.io/client-go/rest"
"sigs.k8s.io/controller-runtime/pkg/client"
@@ -79,5 +81,9 @@ func (v *velaQLServiceImpl) QueryView(ctx context.Context, velaQL string) (*apis
log.Logger.Errorf("decode the velaQL response to json failure %s", err.Error())
return nil, bcode.ErrParseQuery2Json
}
if strings.Contains(velaQL, "collect-logs") {
enc, _ := base64.StdEncoding.DecodeString(resp["logs"].(string))
resp["logs"] = string(enc)
}
return &resp, err
}

View File

@@ -20,6 +20,7 @@ import (
"context"
"errors"
"fmt"
"strings"
"time"
"github.com/emicklei/go-restful/v3"
@@ -426,6 +427,11 @@ func (j *jfrogHandlerImpl) handle(ctx context.Context, webhookTrigger *model.App
return nil, err
}
image := fmt.Sprintf("%s/%s:%s", jfrogReq.Data.RepoKey, jfrogReq.Data.ImageName, jfrogReq.Data.Tag)
pathArray := strings.Split(jfrogReq.Data.Path, "/")
if len(pathArray) > 2 {
image = fmt.Sprintf("%s/%s:%s", jfrogReq.Data.RepoKey, strings.Join(pathArray[:len(pathArray)-2], "/"), jfrogReq.Data.Tag)
}
if jfrogReq.Data.URL != "" {
image = fmt.Sprintf("%s/%s", jfrogReq.Data.URL, image)
}
@@ -441,7 +447,7 @@ func (j *jfrogHandlerImpl) handle(ctx context.Context, webhookTrigger *model.App
TriggerType: apisv1.TriggerTypeWebhook,
Force: true,
ImageInfo: &model.ImageInfo{
Type: model.PayloadTypeHarbor,
Type: model.PayloadTypeJFrog,
Resource: &model.ImageResource{
Digest: jfrogReq.Data.Digest,
Tag: jfrogReq.Data.Tag,

View File

@@ -322,7 +322,17 @@ func genClusterCountInfo(num int) string {
return "<10"
case num < 50:
return "<50"
case num < 100:
return "<100"
case num < 150:
return "<150"
case num < 200:
return "<200"
case num < 300:
return "<300"
case num < 500:
return "<500"
default:
return ">=50"
return ">=500"
}
}

View File

@@ -211,8 +211,28 @@ func TestGenClusterCountInfo(t *testing.T) {
res: "<50",
},
{
count: 100,
res: ">=50",
count: 90,
res: "<100",
},
{
count: 137,
res: "<150",
},
{
count: 170,
res: "<200",
},
{
count: 270,
res: "<300",
},
{
count: 400,
res: "<500",
},
{
count: 520,
res: ">=500",
},
}
for _, testcase := range testcases {

View File

@@ -97,21 +97,21 @@ func (wl *Workload) EvalContext(ctx process.Context) error {
}
// EvalStatus eval workload status
func (wl *Workload) EvalStatus(ctx process.Context, cli client.Client, ns string) (string, error) {
func (wl *Workload) EvalStatus(ctx process.Context, cli client.Client, accessor util.NamespaceAccessor) (string, error) {
// if the standard workload is managed by trait always return empty message
if wl.SkipApplyWorkload {
return "", nil
}
return wl.engine.Status(ctx, cli, ns, wl.FullTemplate.CustomStatus, wl.Params)
return wl.engine.Status(ctx, cli, accessor, wl.FullTemplate.CustomStatus, wl.Params)
}
// EvalHealth eval workload health check
func (wl *Workload) EvalHealth(ctx process.Context, client client.Client, namespace string) (bool, error) {
func (wl *Workload) EvalHealth(ctx process.Context, client client.Client, accessor util.NamespaceAccessor) (bool, error) {
// if health of template is not set or standard workload is managed by trait always return true
if wl.FullTemplate.Health == "" || wl.SkipApplyWorkload {
return true, nil
}
return wl.engine.HealthCheck(ctx, client, namespace, wl.FullTemplate.Health)
return wl.engine.HealthCheck(ctx, client, accessor, wl.FullTemplate.Health)
}
// Scope defines the scope of workload
@@ -145,16 +145,16 @@ func (trait *Trait) EvalContext(ctx process.Context) error {
}
// EvalStatus eval trait status
func (trait *Trait) EvalStatus(ctx process.Context, cli client.Client, ns string) (string, error) {
return trait.engine.Status(ctx, cli, ns, trait.CustomStatusFormat, trait.Params)
func (trait *Trait) EvalStatus(ctx process.Context, cli client.Client, accessor util.NamespaceAccessor) (string, error) {
return trait.engine.Status(ctx, cli, accessor, trait.CustomStatusFormat, trait.Params)
}
// EvalHealth eval trait health check
func (trait *Trait) EvalHealth(ctx process.Context, client client.Client, namespace string) (bool, error) {
func (trait *Trait) EvalHealth(ctx process.Context, client client.Client, accessor util.NamespaceAccessor) (bool, error) {
if trait.FullTemplate.Health == "" {
return true, nil
}
return trait.engine.HealthCheck(ctx, client, namespace, trait.HealthCheckPolicy)
return trait.engine.HealthCheck(ctx, client, accessor, trait.HealthCheckPolicy)
}
// Appfile describes application

View File

@@ -365,6 +365,9 @@ func (p *Parser) parsePoliciesFromRevision(ctx context.Context, af *Appfile) (er
return err
}
for _, policy := range af.Policies {
if policy.Properties == nil && policy.Type != v1alpha1.DebugPolicyType {
return fmt.Errorf("policy %s named %s must not have empty properties", policy.Type, policy.Name)
}
switch policy.Type {
case v1alpha1.GarbageCollectPolicyType:
case v1alpha1.ApplyOncePolicyType:
@@ -390,6 +393,9 @@ func (p *Parser) parsePolicies(ctx context.Context, af *Appfile) (err error) {
return err
}
for _, policy := range af.Policies {
if policy.Properties == nil && policy.Type != v1alpha1.DebugPolicyType {
return fmt.Errorf("policy %s named %s must not have empty properties", policy.Type, policy.Name)
}
switch policy.Type {
case v1alpha1.GarbageCollectPolicyType:
case v1alpha1.ApplyOncePolicyType:

View File

@@ -243,6 +243,20 @@ spec:
image: "busybox"
`
const appfileYamlEmptyPolicy = `
apiVersion: core.oam.dev/v1beta1
kind: Application
metadata:
name: application-sample
namespace: default
spec:
components: []
policies:
- type: garbage-collect
name: somename
properties:
`
var _ = Describe("Test application parser", func() {
It("Test we can parse an application to an appFile", func() {
o := v1beta1.Application{}
@@ -282,6 +296,14 @@ var _ = Describe("Test application parser", func() {
Expect(err).ShouldNot(HaveOccurred())
_, err = NewApplicationParser(&tclient, dm, pd).GenerateAppFile(context.TODO(), &notfound)
Expect(err).Should(HaveOccurred())
By("app with empty policy")
emptyPolicy := v1beta1.Application{}
err = yaml.Unmarshal([]byte(appfileYamlEmptyPolicy), &emptyPolicy)
Expect(err).ShouldNot(HaveOccurred())
_, err = NewApplicationParser(&tclient, dm, pd).GenerateAppFile(context.TODO(), &emptyPolicy)
Expect(err).Should(HaveOccurred())
Expect(err.Error()).Should(ContainSubstring("have empty properties"))
})
})

View File

@@ -47,6 +47,11 @@ func ContextWithUserInfo(ctx context.Context, app *v1beta1.Application) context.
return request.WithUser(ctx, GetUserInfoInAnnotation(&app.ObjectMeta))
}
// ContextClearUserInfo clear user info in context
func ContextClearUserInfo(ctx context.Context) context.Context {
return request.WithUser(ctx, nil)
}
// SetUserInfoInAnnotation set username and group from userInfo into annotations
// it will clear the existing service account annotation in avoid of permission leak
func SetUserInfoInAnnotation(obj *metav1.ObjectMeta, userInfo authv1.UserInfo) {

View File

@@ -51,7 +51,7 @@ func (c *HTTPCmd) Run(meta *registry.Meta) (res interface{}, err error) {
var (
r io.Reader
client = &http.Client{
Transport: &http.Transport{},
Transport: http.DefaultTransport,
Timeout: time.Second * 3,
}
)

View File

@@ -86,4 +86,7 @@ type Args struct {
// IgnoreAppWithoutControllerRequirement indicates that application controller will not process the app without 'app.oam.dev/controller-version-require' annotation.
IgnoreAppWithoutControllerRequirement bool
// IgnoreDefinitionWithoutControllerRequirement indicates that trait/component/workflowstep definition controller will not process the definition without 'definition.oam.dev/controller-version-require' annotation.
IgnoreDefinitionWithoutControllerRequirement bool
}

View File

@@ -136,7 +136,7 @@ func (r *Reconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Resu
if annotations := app.GetAnnotations(); annotations == nil || annotations[oam.AnnotationKubeVelaVersion] == "" {
metav1.SetMetaDataAnnotation(&app.ObjectMeta, oam.AnnotationKubeVelaVersion, version.VelaVersion)
}
logCtx.AddTag("publish_version", app.GetAnnotations()[oam.AnnotationKubeVelaVersion])
logCtx.AddTag("publish_version", app.GetAnnotations()[oam.AnnotationPublishVersion])
appParser := appfile.NewApplicationParser(r.Client, r.dm, r.pd)
handler, err := NewAppHandler(logCtx, r, app, appParser)

View File

@@ -38,6 +38,7 @@ import (
"github.com/oam-dev/kubevela/pkg/monitor/metrics"
"github.com/oam-dev/kubevela/pkg/multicluster"
"github.com/oam-dev/kubevela/pkg/oam"
"github.com/oam-dev/kubevela/pkg/oam/util"
"github.com/oam-dev/kubevela/pkg/resourcekeeper"
)
@@ -215,17 +216,14 @@ func (h *AppHandler) ProduceArtifacts(ctx context.Context, comps []*types.Compon
// nolint
func (h *AppHandler) collectHealthStatus(ctx context.Context, wl *appfile.Workload, appRev *v1beta1.ApplicationRevision, overrideNamespace string) (*common.ApplicationComponentStatus, bool, error) {
namespace := h.app.Namespace
if overrideNamespace != "" {
namespace = overrideNamespace
}
accessor := util.NewApplicationResourceNamespaceAccessor(h.app.Namespace, overrideNamespace)
var (
status = common.ApplicationComponentStatus{
Name: wl.Name,
WorkloadDefinition: wl.FullTemplate.Reference.Definition,
Healthy: true,
Namespace: namespace,
Namespace: accessor.Namespace(),
Cluster: multicluster.ClusterNameInContext(ctx),
}
appName = appRev.Spec.Application.Name
@@ -235,10 +233,10 @@ func (h *AppHandler) collectHealthStatus(ctx context.Context, wl *appfile.Worklo
if wl.CapabilityCategory == types.TerraformCategory {
var configuration terraforv1beta2.Configuration
if err := h.r.Client.Get(ctx, client.ObjectKey{Name: wl.Name, Namespace: namespace}, &configuration); err != nil {
if err := h.r.Client.Get(ctx, client.ObjectKey{Name: wl.Name, Namespace: accessor.Namespace()}, &configuration); err != nil {
if kerrors.IsNotFound(err) {
var legacyConfiguration terraforv1beta1.Configuration
if err := h.r.Client.Get(ctx, client.ObjectKey{Name: wl.Name, Namespace: namespace}, &legacyConfiguration); err != nil {
if err := h.r.Client.Get(ctx, client.ObjectKey{Name: wl.Name, Namespace: accessor.Namespace()}, &legacyConfiguration); err != nil {
return nil, false, errors.WithMessagef(err, "app=%s, comp=%s, check health error", appName, wl.Name)
}
isHealth = setStatus(&status, legacyConfiguration.Status.ObservedGeneration, legacyConfiguration.Generation,
@@ -251,12 +249,12 @@ func (h *AppHandler) collectHealthStatus(ctx context.Context, wl *appfile.Worklo
appRev.Name, configuration.Status.Apply.State, configuration.Status.Apply.Message)
}
} else {
if ok, err := wl.EvalHealth(wl.Ctx, h.r.Client, namespace); !ok || err != nil {
if ok, err := wl.EvalHealth(wl.Ctx, h.r.Client, accessor); !ok || err != nil {
isHealth = false
status.Healthy = false
}
status.Message, err = wl.EvalStatus(wl.Ctx, h.r.Client, namespace)
status.Message, err = wl.EvalStatus(wl.Ctx, h.r.Client, accessor)
if err != nil {
return nil, false, errors.WithMessagef(err, "app=%s, comp=%s, evaluate workload status message error", appName, wl.Name)
}
@@ -264,24 +262,25 @@ func (h *AppHandler) collectHealthStatus(ctx context.Context, wl *appfile.Worklo
var traitStatusList []common.ApplicationTraitStatus
for _, tr := range wl.Traits {
traitOverrideNamespace := overrideNamespace
if tr.FullTemplate.TraitDefinition.Spec.ControlPlaneOnly {
namespace = appRev.GetNamespace()
traitOverrideNamespace = appRev.GetNamespace()
wl.Ctx.SetCtx(context.WithValue(wl.Ctx.GetCtx(), multicluster.ClusterContextKey, multicluster.ClusterLocalName))
}
_accessor := util.NewApplicationResourceNamespaceAccessor(h.app.Namespace, traitOverrideNamespace)
var traitStatus = common.ApplicationTraitStatus{
Type: tr.Name,
Healthy: true,
}
if ok, err := tr.EvalHealth(wl.Ctx, h.r.Client, namespace); !ok || err != nil {
if ok, err := tr.EvalHealth(wl.Ctx, h.r.Client, _accessor); !ok || err != nil {
isHealth = false
traitStatus.Healthy = false
}
traitStatus.Message, err = tr.EvalStatus(wl.Ctx, h.r.Client, namespace)
traitStatus.Message, err = tr.EvalStatus(wl.Ctx, h.r.Client, _accessor)
if err != nil {
return nil, false, errors.WithMessagef(err, "app=%s, comp=%s, trait=%s, evaluate status message error", appName, wl.Name, tr.Name)
}
traitStatusList = append(traitStatusList, traitStatus)
namespace = appRev.GetNamespace()
wl.Ctx.SetCtx(context.WithValue(wl.Ctx.GetCtx(), multicluster.ClusterContextKey, status.Cluster))
}
@@ -305,12 +304,12 @@ func setStatus(status *common.ApplicationComponentStatus, observedGeneration, ge
}
return true
}
status.Message = message
if !isLatest() || state != terraformtypes.Available {
status.Healthy = false
return false
}
status.Healthy = true
status.Message = message
return true
}

View File

@@ -41,6 +41,7 @@ import (
"github.com/oam-dev/kubevela/pkg/oam/util"
"github.com/oam-dev/kubevela/pkg/policy/envbinding"
"github.com/oam-dev/kubevela/pkg/utils"
"github.com/oam-dev/kubevela/pkg/velaql/providers/query"
"github.com/oam-dev/kubevela/pkg/workflow/providers"
"github.com/oam-dev/kubevela/pkg/workflow/providers/http"
"github.com/oam-dev/kubevela/pkg/workflow/providers/kube"
@@ -81,6 +82,7 @@ func (h *AppHandler) GenerateApplicationSteps(ctx monitorContext.Context,
terraformProvider.Install(handlerProviders, app, func(comp common.ApplicationComponent) (*appfile.Workload, error) {
return appParser.ParseWorkloadFromRevision(comp, appRev)
})
query.Install(handlerProviders, h.r.Client, nil)
var tasks []wfTypes.TaskRunner
for _, step := range af.WorkflowSteps {
@@ -188,7 +190,7 @@ func convertStepProperties(step *v1beta1.WorkflowStep, app *v1beta1.Application)
}
func checkDependsOnValidComponent(dependsOnComponentNames, allComponentNames []string) (string, bool) {
// does not depends on other components
// does not depend on other components
if dependsOnComponentNames == nil {
return "", true
}

View File

@@ -967,7 +967,7 @@ func (h historiesByComponentRevision) Less(i, j int) bool {
// UpdateApplicationRevisionStatus update application revision status
func (h *AppHandler) UpdateApplicationRevisionStatus(ctx context.Context, appRev *v1beta1.ApplicationRevision, succeed bool, wfStatus *common.WorkflowStatus) {
if appRev == nil {
if appRev == nil || DisableAllApplicationRevision {
return
}
appRev.Status.Succeeded = succeed

View File

@@ -43,17 +43,24 @@ import (
"github.com/oam-dev/kubevela/pkg/oam"
"github.com/oam-dev/kubevela/pkg/oam/discoverymapper"
"github.com/oam-dev/kubevela/pkg/oam/util"
"github.com/oam-dev/kubevela/version"
)
// Reconciler reconciles a ComponentDefinition object
type Reconciler struct {
client.Client
dm discoverymapper.DiscoveryMapper
pd *packages.PackageDiscover
Scheme *runtime.Scheme
record event.Recorder
dm discoverymapper.DiscoveryMapper
pd *packages.PackageDiscover
Scheme *runtime.Scheme
record event.Recorder
options
}
type options struct {
defRevLimit int
concurrentReconciles int
ignoreDefNoCtrlReq bool
controllerVersion string
}
// Reconcile is the main logic for ComponentDefinition controller
@@ -68,6 +75,11 @@ func (r *Reconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Resu
return ctrl.Result{}, client.IgnoreNotFound(err)
}
if !r.matchControllerRequirement(&componentDefinition) {
klog.InfoS("skip componentDefinition: not match the controller requirement of componentDefinition", "componentDefinition", klog.KObj(&componentDefinition))
return ctrl.Result{}, nil
}
// refresh package discover when componentDefinition is registered
if componentDefinition.Spec.Workload.Type != types.AutoDetectWorkloadDefinition {
err := utils.RefreshPackageDiscover(ctx, r.Client, r.dm, r.pd, &componentDefinition)
@@ -187,12 +199,32 @@ func (r *Reconciler) SetupWithManager(mgr ctrl.Manager) error {
// Setup adds a controller that reconciles ComponentDefinition.
func Setup(mgr ctrl.Manager, args oamctrl.Args) error {
r := Reconciler{
Client: mgr.GetClient(),
Scheme: mgr.GetScheme(),
dm: args.DiscoveryMapper,
pd: args.PackageDiscover,
defRevLimit: args.DefRevisionLimit,
concurrentReconciles: args.ConcurrentReconciles,
Client: mgr.GetClient(),
Scheme: mgr.GetScheme(),
dm: args.DiscoveryMapper,
pd: args.PackageDiscover,
options: parseOptions(args),
}
return r.SetupWithManager(mgr)
}
func parseOptions(args oamctrl.Args) options {
return options{
defRevLimit: args.DefRevisionLimit,
concurrentReconciles: args.ConcurrentReconciles,
ignoreDefNoCtrlReq: args.IgnoreDefinitionWithoutControllerRequirement,
controllerVersion: version.VelaVersion,
}
}
func (r *Reconciler) matchControllerRequirement(componentDefinition *v1beta1.ComponentDefinition) bool {
if componentDefinition.Annotations != nil {
if requireVersion, ok := componentDefinition.Annotations[oam.AnnotationControllerRequirement]; ok {
return requireVersion == r.controllerVersion
}
}
if r.ignoreDefNoCtrlReq {
return false
}
return true
}

View File

@@ -90,11 +90,13 @@ var _ = BeforeSuite(func(done Done) {
Expect(err).ToNot(HaveOccurred())
r = Reconciler{
Client: mgr.GetClient(),
Scheme: mgr.GetScheme(),
dm: dm,
pd: pd,
defRevLimit: defRevisionLimit,
Client: mgr.GetClient(),
Scheme: mgr.GetScheme(),
dm: dm,
pd: pd,
options: options{
defRevLimit: defRevisionLimit,
},
}
Expect(r.SetupWithManager(mgr)).ToNot(HaveOccurred())
var ctx context.Context

View File

@@ -44,6 +44,7 @@ import (
af "github.com/oam-dev/kubevela/pkg/appfile"
"github.com/oam-dev/kubevela/pkg/cue/process"
"github.com/oam-dev/kubevela/pkg/oam"
"github.com/oam-dev/kubevela/pkg/oam/util"
)
const (
@@ -478,7 +479,8 @@ func CUEBasedHealthCheck(ctx context.Context, c client.Client, wlRef WorkloadRef
okToCheckTrait = true
return
}
isHealthy, err := wl.EvalHealth(pCtx, c, ns)
accessor := util.NewApplicationResourceNamespaceAccessor(ns, "")
isHealthy, err := wl.EvalHealth(pCtx, c, accessor)
if err != nil {
wlHealth.HealthStatus = StatusUnhealthy
wlHealth.Diagnosis = errors.Wrap(err, errHealthCheck).Error()
@@ -490,7 +492,7 @@ func CUEBasedHealthCheck(ctx context.Context, c client.Client, wlRef WorkloadRef
// TODO(wonderflow): we should add a custom way to let the template say why it's unhealthy, only a bool flag is not enough
wlHealth.HealthStatus = StatusUnhealthy
}
wlHealth.CustomStatusMsg, err = wl.EvalStatus(pCtx, c, ns)
wlHealth.CustomStatusMsg, err = wl.EvalStatus(pCtx, c, accessor)
if err != nil {
wlHealth.Diagnosis = errors.Wrap(err, errHealthCheck).Error()
}
@@ -522,7 +524,8 @@ func CUEBasedHealthCheck(ctx context.Context, c client.Client, wlRef WorkloadRef
traits[i] = tHealth
continue
}
isHealthy, err := tr.EvalHealth(pCtx, c, ns)
accessor := util.NewApplicationResourceNamespaceAccessor("", ns)
isHealthy, err := tr.EvalHealth(pCtx, c, accessor)
if err != nil {
tHealth.HealthStatus = StatusUnhealthy
tHealth.Diagnosis = errors.Wrap(err, errHealthCheck).Error()
@@ -535,7 +538,7 @@ func CUEBasedHealthCheck(ctx context.Context, c client.Client, wlRef WorkloadRef
// TODO(wonderflow): we should add a custom way to let the template say why it's unhealthy, only a bool flag is not enough
tHealth.HealthStatus = StatusUnhealthy
}
tHealth.CustomStatusMsg, err = tr.EvalStatus(pCtx, c, ns)
tHealth.CustomStatusMsg, err = tr.EvalStatus(pCtx, c, accessor)
if err != nil {
tHealth.Diagnosis = errors.Wrap(err, errHealthCheck).Error()
}

View File

@@ -42,17 +42,24 @@ import (
"github.com/oam-dev/kubevela/pkg/oam"
"github.com/oam-dev/kubevela/pkg/oam/discoverymapper"
"github.com/oam-dev/kubevela/pkg/oam/util"
"github.com/oam-dev/kubevela/version"
)
// Reconciler reconciles a TraitDefinition object
type Reconciler struct {
client.Client
dm discoverymapper.DiscoveryMapper
pd *packages.PackageDiscover
Scheme *runtime.Scheme
record event.Recorder
dm discoverymapper.DiscoveryMapper
pd *packages.PackageDiscover
Scheme *runtime.Scheme
record event.Recorder
options
}
type options struct {
defRevLimit int
concurrentReconciles int
ignoreDefNoCtrlReq bool
controllerVersion string
}
// Reconcile is the main logic for TraitDefinition controller
@@ -67,6 +74,11 @@ func (r *Reconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Resu
return ctrl.Result{}, client.IgnoreNotFound(err)
}
if !r.matchControllerRequirement(&traitdefinition) {
klog.InfoS("skip traitDefinition: not match the controller requirement of traitDefinition", "traitDefinition", klog.KObj(&traitdefinition))
return ctrl.Result{}, nil
}
// this is a placeholder for finalizer here in the future
if traitdefinition.DeletionTimestamp != nil {
klog.InfoS("The TraitDefinition is being deleted", "traitDefinition", klog.KRef(req.Namespace, req.Name))
@@ -193,12 +205,32 @@ func (r *Reconciler) SetupWithManager(mgr ctrl.Manager) error {
// Setup adds a controller that reconciles TraitDefinition.
func Setup(mgr ctrl.Manager, args oamctrl.Args) error {
r := Reconciler{
Client: mgr.GetClient(),
Scheme: mgr.GetScheme(),
dm: args.DiscoveryMapper,
pd: args.PackageDiscover,
defRevLimit: args.DefRevisionLimit,
concurrentReconciles: args.ConcurrentReconciles,
Client: mgr.GetClient(),
Scheme: mgr.GetScheme(),
dm: args.DiscoveryMapper,
pd: args.PackageDiscover,
options: parseOptions(args),
}
return r.SetupWithManager(mgr)
}
func parseOptions(args oamctrl.Args) options {
return options{
defRevLimit: args.DefRevisionLimit,
concurrentReconciles: args.ConcurrentReconciles,
ignoreDefNoCtrlReq: args.IgnoreDefinitionWithoutControllerRequirement,
controllerVersion: version.VelaVersion,
}
}
func (r *Reconciler) matchControllerRequirement(traitDefinition *v1beta1.TraitDefinition) bool {
if traitDefinition.Annotations != nil {
if requireVersion, ok := traitDefinition.Annotations[oam.AnnotationControllerRequirement]; ok {
return requireVersion == r.controllerVersion
}
}
if r.ignoreDefNoCtrlReq {
return false
}
return true
}

View File

@@ -90,11 +90,13 @@ var _ = BeforeSuite(func(done Done) {
Expect(err).ToNot(HaveOccurred())
r = Reconciler{
Client: mgr.GetClient(),
Scheme: mgr.GetScheme(),
dm: dm,
pd: pd,
defRevLimit: defRevisionLimit,
Client: mgr.GetClient(),
Scheme: mgr.GetScheme(),
dm: dm,
pd: pd,
options: options{
defRevLimit: defRevisionLimit,
},
}
Expect(r.SetupWithManager(mgr)).ToNot(HaveOccurred())
var ctx context.Context

View File

@@ -90,11 +90,13 @@ var _ = BeforeSuite(func(done Done) {
Expect(err).ToNot(HaveOccurred())
r = Reconciler{
Client: mgr.GetClient(),
Scheme: mgr.GetScheme(),
dm: dm,
pd: pd,
defRevLimit: defRevisionLimit,
Client: mgr.GetClient(),
Scheme: mgr.GetScheme(),
dm: dm,
pd: pd,
options: options{
defRevLimit: defRevisionLimit,
},
}
Expect(r.SetupWithManager(mgr)).ToNot(HaveOccurred())
var ctx context.Context

View File

@@ -42,17 +42,24 @@ import (
"github.com/oam-dev/kubevela/pkg/oam"
"github.com/oam-dev/kubevela/pkg/oam/discoverymapper"
"github.com/oam-dev/kubevela/pkg/oam/util"
"github.com/oam-dev/kubevela/version"
)
// Reconciler reconciles a WorkflowStepDefinition object
type Reconciler struct {
client.Client
dm discoverymapper.DiscoveryMapper
pd *packages.PackageDiscover
Scheme *runtime.Scheme
record event.Recorder
dm discoverymapper.DiscoveryMapper
pd *packages.PackageDiscover
Scheme *runtime.Scheme
record event.Recorder
options
}
type options struct {
defRevLimit int
concurrentReconciles int
ignoreDefNoCtrlReq bool
controllerVersion string
}
// Reconcile is the main logic for WorkflowStepDefinition controller
@@ -68,6 +75,11 @@ func (r *Reconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Resu
return ctrl.Result{}, client.IgnoreNotFound(err)
}
if !r.matchControllerRequirement(&wfstepdefinition) {
klog.InfoS("skip workflowStepDefinition: not match the controller requirement of workflowStepDefinition", "workflowStepDefinition", klog.KObj(&wfstepdefinition))
return ctrl.Result{}, nil
}
// this is a placeholder for finalizer here in the future
if wfstepdefinition.DeletionTimestamp != nil {
return ctrl.Result{}, nil
@@ -192,11 +204,32 @@ func (r *Reconciler) SetupWithManager(mgr ctrl.Manager) error {
// Setup adds a controller that reconciles WorkflowStepDefinition.
func Setup(mgr ctrl.Manager, args oamctrl.Args) error {
r := Reconciler{
Client: mgr.GetClient(),
Scheme: mgr.GetScheme(),
dm: args.DiscoveryMapper,
pd: args.PackageDiscover,
defRevLimit: args.DefRevisionLimit,
Client: mgr.GetClient(),
Scheme: mgr.GetScheme(),
dm: args.DiscoveryMapper,
pd: args.PackageDiscover,
options: parseOptions(args),
}
return r.SetupWithManager(mgr)
}
func parseOptions(args oamctrl.Args) options {
return options{
defRevLimit: args.DefRevisionLimit,
concurrentReconciles: args.ConcurrentReconciles,
ignoreDefNoCtrlReq: args.IgnoreDefinitionWithoutControllerRequirement,
controllerVersion: version.VelaVersion,
}
}
func (r *Reconciler) matchControllerRequirement(wfstepdefinition *v1beta1.WorkflowStepDefinition) bool {
if wfstepdefinition.Annotations != nil {
if requireVersion, ok := wfstepdefinition.Annotations[oam.AnnotationControllerRequirement]; ok {
return requireVersion == r.controllerVersion
}
}
if r.ignoreDefNoCtrlReq {
return false
}
return true
}

View File

@@ -30,7 +30,7 @@ import (
"cuelang.org/go/cue/build"
"github.com/getkin/kin-openapi/openapi3"
"github.com/pkg/errors"
git "gopkg.in/src-d/go-git.v4"
"gopkg.in/src-d/go-git.v4"
v1 "k8s.io/api/core/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
@@ -215,25 +215,26 @@ func GetOpenAPISchemaFromTerraformComponentDefinition(configuration string) ([]b
// GetTerraformConfigurationFromRemote gets Terraform Configuration(HCL)
func GetTerraformConfigurationFromRemote(name, remoteURL, remotePath string) (string, error) {
tmpPath := filepath.Join("./tmp/terraform", name)
// Check if the directory exists. If yes, remove it.
if _, err := os.Stat(tmpPath); err == nil {
err := os.RemoveAll(tmpPath)
if err != nil {
return "", errors.Wrap(err, "failed to remove the directory")
}
}
_, err := git.PlainClone(tmpPath, false, &git.CloneOptions{
URL: remoteURL,
Progress: nil,
})
userHome, err := os.UserHomeDir()
if err != nil {
return "", err
}
cachePath := filepath.Join(userHome, ".vela", "terraform", name)
// Check if the directory exists. If yes, remove it.
entities, err := os.ReadDir(cachePath)
if err != nil || len(entities) == 0 {
fmt.Printf("loading terraform module %s into %s from %s\n", name, cachePath, remoteURL)
if _, err = git.PlainClone(cachePath, false, &git.CloneOptions{
URL: remoteURL,
Progress: os.Stdout,
}); err != nil {
return "", err
}
}
tfPath := filepath.Join(tmpPath, remotePath, "variables.tf")
tfPath := filepath.Join(cachePath, remotePath, "variables.tf")
if _, err := os.Stat(tfPath); err != nil {
tfPath = filepath.Join(tmpPath, remotePath, "main.tf")
tfPath = filepath.Join(cachePath, remotePath, "main.tf")
if _, err := os.Stat(tfPath); err != nil {
return "", errors.Wrap(err, "failed to find main.tf or variables.tf in Terraform configurations of the remote repository")
}
@@ -242,10 +243,6 @@ func GetTerraformConfigurationFromRemote(name, remoteURL, remotePath string) (st
if err != nil {
return "", errors.Wrap(err, "failed to read Terraform configuration")
}
if err := os.RemoveAll(tmpPath); err != nil {
return "", err
}
return string(conf), nil
}

View File

@@ -17,24 +17,16 @@
package utils
import (
"context"
"io/ioutil"
"os"
"path/filepath"
"strings"
"testing"
. "github.com/agiledragon/gomonkey/v2"
"github.com/pkg/errors"
"gopkg.in/src-d/go-git.v4"
"gotest.tools/assert"
)
func TestGetTerraformConfigurationFromRemote(t *testing.T) {
// If you hit a panic on macOS as below, please fix it by referencing https://github.com/eisenxp/macos-golink-wrapper.
// panic: permission denied [recovered]
// panic: permission denied
type want struct {
config string
errMsg string
@@ -46,8 +38,6 @@ func TestGetTerraformConfigurationFromRemote(t *testing.T) {
path string
data []byte
variableFile string
// mockWorkingPath will create `/tmp/terraform`
mockWorkingPath bool
}
cases := map[string]struct {
args args
@@ -57,7 +47,7 @@ func TestGetTerraformConfigurationFromRemote(t *testing.T) {
args: args{
name: "valid",
url: "https://github.com/kubevela-contrib/terraform-modules.git",
path: "",
path: "unittest/",
data: []byte(`
variable "aaa" {
type = list(object({
@@ -85,7 +75,7 @@ variable "aaa" {
args: args{
name: "aws-subnet",
url: "https://github.com/kubevela-contrib/terraform-modules.git",
path: "aws/subnet",
path: "unittest/aws/subnet",
data: []byte(`
variable "aaa" {
type = list(object({
@@ -109,47 +99,20 @@ variable "aaa" {
}`,
},
},
"working path exists": {
args: args{
variableFile: "main.tf",
mockWorkingPath: true,
},
want: want{
errMsg: "failed to remove the directory",
},
},
}
for name, tc := range cases {
t.Run(name, func(t *testing.T) {
if tc.args.mockWorkingPath {
err := os.MkdirAll("./tmp/terraform", 0755)
assert.NilError(t, err)
defer os.RemoveAll("./tmp/terraform")
patch1 := ApplyFunc(os.Remove, func(_ string) error {
return errors.New("failed")
})
defer patch1.Reset()
patch2 := ApplyFunc(os.Open, func(_ string) (*os.File, error) {
return nil, errors.New("failed")
})
defer patch2.Reset()
}
patch := ApplyFunc(git.PlainCloneContext, func(ctx context.Context, path string, isBare bool, o *git.CloneOptions) (*git.Repository, error) {
var tmpPath string
if tc.args.path != "" {
tmpPath = filepath.Join("./tmp/terraform", tc.args.name, tc.args.path)
} else {
tmpPath = filepath.Join("./tmp/terraform", tc.args.name)
}
home, _ := os.UserHomeDir()
path := filepath.Join(home, ".vela", "terraform")
tmpPath := filepath.Join(path, tc.args.name, tc.args.path)
if len(tc.args.data) > 0 {
err := os.MkdirAll(tmpPath, os.ModePerm)
assert.NilError(t, err)
err = ioutil.WriteFile(filepath.Clean(filepath.Join(tmpPath, tc.args.variableFile)), tc.args.data, 0644)
assert.NilError(t, err)
return nil, nil
})
defer patch.Reset()
}
defer os.RemoveAll(tmpPath)
conf, err := GetTerraformConfigurationFromRemote(tc.args.name, tc.args.url, tc.args.path)
if tc.want.errMsg != "" {

View File

@@ -62,8 +62,8 @@ const (
// AbstractEngine defines Definition's Render interface
type AbstractEngine interface {
Complete(ctx process.Context, abstractTemplate string, params interface{}) error
HealthCheck(ctx process.Context, cli client.Client, ns string, healthPolicyTemplate string) (bool, error)
Status(ctx process.Context, cli client.Client, ns string, customStatusTemplate string, parameter interface{}) (string, error)
HealthCheck(ctx process.Context, cli client.Client, accessor util.NamespaceAccessor, healthPolicyTemplate string) (bool, error)
Status(ctx process.Context, cli client.Client, accessor util.NamespaceAccessor, customStatusTemplate string, parameter interface{}) (string, error)
}
type def struct {
@@ -151,7 +151,7 @@ func (wd *workloadDef) Complete(ctx process.Context, abstractTemplate string, pa
return nil
}
func (wd *workloadDef) getTemplateContext(ctx process.Context, cli client.Reader, ns string) (map[string]interface{}, error) {
func (wd *workloadDef) getTemplateContext(ctx process.Context, cli client.Reader, accessor util.NamespaceAccessor) (map[string]interface{}, error) {
var root = initRoot(ctx.BaseContextLabels())
var commonLabels = GetCommonLabels(ctx.BaseContextLabels())
@@ -162,7 +162,7 @@ func (wd *workloadDef) getTemplateContext(ctx process.Context, cli client.Reader
return nil, err
}
// workload main resource will have a unique label("app.oam.dev/resourceType"="WORKLOAD") in per component/app level
object, err := getResourceFromObj(ctx.GetCtx(), componentWorkload, cli, ns, util.MergeMapOverrideWithDst(map[string]string{
object, err := getResourceFromObj(ctx.GetCtx(), componentWorkload, cli, accessor.For(componentWorkload), util.MergeMapOverrideWithDst(map[string]string{
oam.LabelOAMResourceType: oam.ResourceTypeWorkload,
}, commonLabels), "")
if err != nil {
@@ -182,7 +182,7 @@ func (wd *workloadDef) getTemplateContext(ctx process.Context, cli client.Reader
return nil, err
}
// AuxiliaryWorkload will have a unique label("trait.oam.dev/resource"="name of outputs") in per component/app level
object, err := getResourceFromObj(ctx.GetCtx(), traitRef, cli, ns, util.MergeMapOverrideWithDst(map[string]string{
object, err := getResourceFromObj(ctx.GetCtx(), traitRef, cli, accessor.For(componentWorkload), util.MergeMapOverrideWithDst(map[string]string{
oam.TraitTypeLabel: AuxiliaryWorkload,
}, commonLabels), assist.Name)
if err != nil {
@@ -197,11 +197,11 @@ func (wd *workloadDef) getTemplateContext(ctx process.Context, cli client.Reader
}
// HealthCheck address health check for workload
func (wd *workloadDef) HealthCheck(ctx process.Context, cli client.Client, ns string, healthPolicyTemplate string) (bool, error) {
func (wd *workloadDef) HealthCheck(ctx process.Context, cli client.Client, accessor util.NamespaceAccessor, healthPolicyTemplate string) (bool, error) {
if healthPolicyTemplate == "" {
return true, nil
}
templateContext, err := wd.getTemplateContext(ctx, cli, ns)
templateContext, err := wd.getTemplateContext(ctx, cli, accessor)
if err != nil {
return false, errors.WithMessage(err, "get template context")
}
@@ -228,11 +228,11 @@ func checkHealth(templateContext map[string]interface{}, healthPolicyTemplate st
}
// Status get workload status by customStatusTemplate
func (wd *workloadDef) Status(ctx process.Context, cli client.Client, ns string, customStatusTemplate string, parameter interface{}) (string, error) {
func (wd *workloadDef) Status(ctx process.Context, cli client.Client, accessor util.NamespaceAccessor, customStatusTemplate string, parameter interface{}) (string, error) {
if customStatusTemplate == "" {
return "", nil
}
templateContext, err := wd.getTemplateContext(ctx, cli, ns)
templateContext, err := wd.getTemplateContext(ctx, cli, accessor)
if err != nil {
return "", errors.WithMessage(err, "get template context")
}
@@ -417,7 +417,7 @@ func initRoot(contextLabels map[string]string) map[string]interface{} {
return root
}
func (td *traitDef) getTemplateContext(ctx process.Context, cli client.Reader, ns string) (map[string]interface{}, error) {
func (td *traitDef) getTemplateContext(ctx process.Context, cli client.Reader, accessor util.NamespaceAccessor) (map[string]interface{}, error) {
var root = initRoot(ctx.BaseContextLabels())
var commonLabels = GetCommonLabels(ctx.BaseContextLabels())
@@ -431,7 +431,7 @@ func (td *traitDef) getTemplateContext(ctx process.Context, cli client.Reader, n
if err != nil {
return nil, err
}
object, err := getResourceFromObj(ctx.GetCtx(), traitRef, cli, ns, util.MergeMapOverrideWithDst(map[string]string{
object, err := getResourceFromObj(ctx.GetCtx(), traitRef, cli, accessor.For(traitRef), util.MergeMapOverrideWithDst(map[string]string{
oam.TraitTypeLabel: assist.Type,
}, commonLabels), assist.Name)
if err != nil {
@@ -446,11 +446,11 @@ func (td *traitDef) getTemplateContext(ctx process.Context, cli client.Reader, n
}
// Status get trait status by customStatusTemplate
func (td *traitDef) Status(ctx process.Context, cli client.Client, ns string, customStatusTemplate string, parameter interface{}) (string, error) {
func (td *traitDef) Status(ctx process.Context, cli client.Client, accessor util.NamespaceAccessor, customStatusTemplate string, parameter interface{}) (string, error) {
if customStatusTemplate == "" {
return "", nil
}
templateContext, err := td.getTemplateContext(ctx, cli, ns)
templateContext, err := td.getTemplateContext(ctx, cli, accessor)
if err != nil {
return "", errors.WithMessage(err, "get template context")
}
@@ -458,11 +458,11 @@ func (td *traitDef) Status(ctx process.Context, cli client.Client, ns string, cu
}
// HealthCheck address health check for trait
func (td *traitDef) HealthCheck(ctx process.Context, cli client.Client, ns string, healthPolicyTemplate string) (bool, error) {
func (td *traitDef) HealthCheck(ctx process.Context, cli client.Client, accessor util.NamespaceAccessor, healthPolicyTemplate string) (bool, error) {
if healthPolicyTemplate == "" {
return true, nil
}
templateContext, err := td.getTemplateContext(ctx, cli, ns)
templateContext, err := td.getTemplateContext(ctx, cli, accessor)
if err != nil {
return false, errors.WithMessage(err, "get template context")
}

View File

@@ -51,7 +51,7 @@ const (
ContextComponents = "components"
// ContextComponentType is the component type of current trait binding with
ContextComponentType = "componentType"
// ComponentRevisionPlaceHolder is the component revision name placeHolder, this field will be replace with real value
// ComponentRevisionPlaceHolder is the component revision name placeHolder, this field will be replaced with real value
// after component be created
ComponentRevisionPlaceHolder = "KUBEVELA_COMPONENT_REVISION_PLACEHOLDER"
// ContextDataArtifacts is used to store unstructured resources of components

View File

@@ -358,7 +358,11 @@ func jsonMergePatch(base cue.Value, patch cue.Value) (string, error) {
if err != nil {
return "", errors.Wrapf(err, "failed to merge base value and patch value by JsonMergePatch")
}
return string(merged), nil
output, err := OpenBaiscLit(string(merged))
if err != nil {
return "", errors.Wrapf(err, "failed to parse open basic lit for merged result")
}
return output, nil
}
func jsonPatch(base cue.Value, patch cue.Value) (string, error) {
@@ -379,5 +383,9 @@ func jsonPatch(base cue.Value, patch cue.Value) (string, error) {
if err != nil {
return "", errors.Wrapf(err, "failed to apply json patch")
}
return string(merged), nil
output, err := OpenBaiscLit(string(merged))
if err != nil {
return "", errors.Wrapf(err, "failed to parse open basic lit for merged result")
}
return output, nil
}

View File

@@ -718,10 +718,10 @@ func TestImports(t *testing.T) {
context: stepSessionID: "3w9qkdgn5w"`
v, err := NewValue(`
import (
"vela/op"
"vela/custom"
)
id: op.context.stepSessionID
id: custom.context.stepSessionID
`+cont, nil, cont)
assert.NilError(t, err)

View File

@@ -98,6 +98,9 @@ const (
LabelProject = "core.oam.dev/project"
LabelResourceRules = "rules.oam.dev/resources"
// LabelControllerName indicates the controller name
LabelControllerName = "controller.oam.dev/name"
)
const (
@@ -198,7 +201,7 @@ const (
// AnnotationWorkloadName indicates the managed workload's name by trait
AnnotationWorkloadName = "trait.oam.dev/workload-name"
// AnnotationControllerRequirement indicates the controller version that can process the application.
// AnnotationControllerRequirement indicates the controller version that can process the application/definition.
AnnotationControllerRequirement = "app.oam.dev/controller-version-require"
// AnnotationApplicationServiceAccountName indicates the name of the ServiceAccount to use to apply Components and run Workflow.

View File

@@ -953,3 +953,38 @@ func AsController(r *corev1.ObjectReference) metav1.OwnerReference {
ref.Controller = &c
return ref
}
// NamespaceAccessor namespace accessor for resource
type NamespaceAccessor interface {
For(obj client.Object) string
Namespace() string
}
type applicationResourceNamespaceAccessor struct {
applicationNamespace string
overrideNamespace string
}
// For access namespace for resource
func (accessor *applicationResourceNamespaceAccessor) For(obj client.Object) string {
if accessor.overrideNamespace != "" {
return accessor.overrideNamespace
}
if originalNamespace := obj.GetNamespace(); originalNamespace != "" {
return originalNamespace
}
return accessor.applicationNamespace
}
// Namespace the namespace by default
func (accessor *applicationResourceNamespaceAccessor) Namespace() string {
if accessor.overrideNamespace != "" {
return accessor.overrideNamespace
}
return accessor.applicationNamespace
}
// NewApplicationResourceNamespaceAccessor create namespace accessor for resource in application
func NewApplicationResourceNamespaceAccessor(appNs, overrideNs string) NamespaceAccessor {
return &applicationResourceNamespaceAccessor{applicationNamespace: appNs, overrideNamespace: overrideNs}
}

View File

@@ -16,7 +16,10 @@ limitations under the License.
package oam
// SystemDefinitonNamespace golbal value for controller and webhook systemlevel namespace
var (
// SystemDefinitonNamespace golbal value for controller and webhook systemlevel namespace
SystemDefinitonNamespace string = "vela-system"
// ApplicationControllerName means the controller is application
ApplicationControllerName string = "vela-core"
)

View File

@@ -81,3 +81,14 @@ func TestParseApplyOncePolicy(t *testing.T) {
r.NoError(err)
r.Equal(policySpec, spec)
}
func TestParsePolicy(t *testing.T) {
r := require.New(t)
// Test skipping empty policy
app := &v1beta1.Application{Spec: v1beta1.ApplicationSpec{
Policies: []v1beta1.AppPolicy{{Type: "example", Name: "s", Properties: nil}},
}}
exists, err := parsePolicy(app, "example", nil)
r.False(exists, "empty policy should not be included")
r.NoError(err)
}

View File

@@ -34,7 +34,7 @@ const (
// GetEnvBindingPolicy extract env-binding policy with given policy name, if policy name is empty, the first env-binding policy will be used
func GetEnvBindingPolicy(app *v1beta1.Application, policyName string) (*v1alpha1.EnvBindingSpec, error) {
for _, policy := range app.Spec.Policies {
if (policy.Name == policyName || policyName == "") && policy.Type == v1alpha1.EnvBindingPolicyType {
if (policy.Name == policyName || policyName == "") && policy.Type == v1alpha1.EnvBindingPolicyType && policy.Properties != nil {
envBindingSpec := &v1alpha1.EnvBindingSpec{}
err := json.Unmarshal(policy.Properties.Raw, envBindingSpec)
return envBindingSpec, err

View File

@@ -19,6 +19,7 @@ package policy
import (
"context"
"encoding/json"
"fmt"
"github.com/pkg/errors"
errors2 "k8s.io/apimachinery/pkg/api/errors"
@@ -32,6 +33,9 @@ import (
// ParseOverridePolicyRelatedDefinitions get definitions inside override policy
func ParseOverridePolicyRelatedDefinitions(ctx context.Context, cli client.Client, app *v1beta1.Application, policy v1beta1.AppPolicy) (compDefs []*v1beta1.ComponentDefinition, traitDefs []*v1beta1.TraitDefinition, err error) {
if policy.Properties == nil {
return compDefs, traitDefs, fmt.Errorf("override policy %s must not have empty properties", policy.Name)
}
spec := &v1alpha1.OverridePolicySpec{}
if err = json.Unmarshal(policy.Properties.Raw, spec); err != nil {
return nil, nil, errors.Wrapf(err, "invalid override policy spec")

View File

@@ -62,6 +62,12 @@ func TestParseOverridePolicyRelatedDefinitions(t *testing.T) {
Policy: v1beta1.AppPolicy{Properties: &runtime.RawExtension{Raw: []byte(`{"components":[{"type":"comp","traits":[{"type":"trait-404"}]}]}`)}},
Error: "failed to get trait definition",
},
"empty-policy": {
Policy: v1beta1.AppPolicy{Properties: nil},
ComponentDefs: nil,
TraitDefs: nil,
Error: "have empty properties",
},
}
for name, tt := range testCases {
t.Run(name, func(t *testing.T) {

View File

@@ -18,6 +18,7 @@ package policy
import (
"context"
"fmt"
"github.com/pkg/errors"
utilfeature "k8s.io/apiserver/pkg/util/feature"
@@ -65,6 +66,9 @@ func GetPlacementsFromTopologyPolicies(ctx context.Context, cli client.Client, a
hasTopologyPolicy := false
for _, policy := range policies {
if policy.Type == v1alpha1.TopologyPolicyType {
if policy.Properties == nil {
return nil, fmt.Errorf("topology policy %s must not have empty properties", policy.Name)
}
hasTopologyPolicy = true
topologySpec := &v1alpha1.TopologyPolicySpec{}
if err := utils.StrictUnmarshal(policy.Properties.Raw, topologySpec); err != nil {

View File

@@ -146,6 +146,10 @@ func TestGetClusterLabelSelectorInTopology(t *testing.T) {
Inputs: []v1beta1.AppPolicy{},
Outputs: []v1alpha1.PlacementDecision{{Cluster: "local", Namespace: ""}},
},
"empty-topology-policy": {
Inputs: []v1beta1.AppPolicy{{Type: "topology", Name: "some-name", Properties: nil}},
Error: "have empty properties",
},
}
for name, tt := range testCases {
t.Run(name, func(t *testing.T) {

View File

@@ -105,6 +105,7 @@ func (h *resourceKeeper) record(ctx context.Context, manifests []*unstructured.U
}
cfg := newDispatchConfig(options...)
ctx = auth.ContextClearUserInfo(ctx)
if len(rootManifests) != 0 {
rt, err := h.getRootRT(ctx)
if err != nil {

View File

@@ -437,7 +437,7 @@ func (h *gcHandler) GarbageCollectLegacyResourceTrackers(ctx context.Context) er
}
}
for _, policy := range h.app.Spec.Policies {
if policy.Type == v1alpha1.EnvBindingPolicyType {
if policy.Type == v1alpha1.EnvBindingPolicyType && policy.Properties != nil {
spec := &v1alpha1.EnvBindingSpec{}
if err = json.Unmarshal(policy.Properties.Raw, &spec); err == nil {
for _, env := range spec.Envs {

View File

@@ -163,6 +163,10 @@ func listApplicationResourceTrackers(ctx context.Context, cli client.Client, app
}
// ListApplicationResourceTrackers list resource trackers for application with all historyRTs sorted by version number
// rootRT -> The ResourceTracker that records life-long resources. These resources will only be recycled when application is removed.
// currentRT -> The ResourceTracker that tracks the resources used by the latest version of application.
// historyRTs -> The ResourceTrackers that tracks the resources in outdated versions.
// crRT -> The ResourceTracker that tracks the component revisions created by the application.
func ListApplicationResourceTrackers(ctx context.Context, cli client.Client, app *v1beta1.Application) (rootRT *v1beta1.ResourceTracker, currentRT *v1beta1.ResourceTracker, historyRTs []*v1beta1.ResourceTracker, crRT *v1beta1.ResourceTracker, err error) {
metrics.ListResourceTrackerCounter.WithLabelValues("application").Inc()
rts, err := listApplicationResourceTrackers(ctx, cli, app)

View File

@@ -85,10 +85,7 @@ func (options *ResourceTreePrintOptions) loadResourceRows(currentRT *v1beta1.Res
if mr.Deleted {
continue
}
rows = append(rows, &resourceRow{
mr: mr.DeepCopy(),
status: resourceRowStatusUpdated,
})
rows = append(rows, buildResourceRow(mr, resourceRowStatusUpdated))
}
}
for _, rt := range historyRT {
@@ -100,10 +97,7 @@ func (options *ResourceTreePrintOptions) loadResourceRows(currentRT *v1beta1.Res
}
}
if matchedRow == nil {
rows = append(rows, &resourceRow{
mr: mr.DeepCopy(),
status: resourceRowStatusOutdated,
})
rows = append(rows, buildResourceRow(mr, resourceRowStatusOutdated))
}
}
}
@@ -410,3 +404,14 @@ func RetrieveKubeCtlGetMessageGenerator(cfg *rest.Config) (ResourceDetailRetriev
return nil
}, nil
}
func buildResourceRow(mr v1beta1.ManagedResource, resourceStatus string) *resourceRow {
rr := &resourceRow{
mr: mr.DeepCopy(),
status: resourceStatus,
}
if rr.mr.Cluster == "" {
rr.mr.Cluster = multicluster.ClusterLocalName
}
return rr
}

View File

@@ -22,6 +22,10 @@ import (
"github.com/stretchr/testify/require"
"k8s.io/utils/pointer"
"github.com/oam-dev/kubevela/apis/core.oam.dev/common"
"github.com/oam-dev/kubevela/apis/core.oam.dev/v1beta1"
"github.com/oam-dev/kubevela/pkg/multicluster"
)
func TestResourceTreePrintOption_getWidthForDetails(t *testing.T) {
@@ -46,3 +50,39 @@ func TestResourceTreePrintOptions_wrapDetails(t *testing.T) {
},
options._wrapDetails(detail, 40))
}
func TestBuildResourceRow(t *testing.T) {
r := require.New(t)
cases := map[string]struct {
Cluster string
ResourceRowStatus string
ExpectedCluster string
ExpectedResourceRowStatus string
}{
"localCluster": {
Cluster: "",
ResourceRowStatus: resourceRowStatusUpdated,
ExpectedCluster: multicluster.ClusterLocalName,
ExpectedResourceRowStatus: resourceRowStatusUpdated,
},
"remoteCluster": {
Cluster: "remoteCluster",
ResourceRowStatus: resourceRowStatusUpdated,
ExpectedCluster: "remoteCluster",
ExpectedResourceRowStatus: resourceRowStatusUpdated,
},
}
for name, c := range cases {
mr := v1beta1.ManagedResource{
ClusterObjectReference: common.ClusterObjectReference{
Cluster: c.Cluster,
},
}
rr := buildResourceRow(mr, c.ResourceRowStatus)
r.Equal(c.ExpectedCluster, rr.mr.Cluster, name)
r.Equal(c.ExpectedResourceRowStatus, rr.status, name)
}
}

View File

@@ -19,20 +19,32 @@ package stdlib
import (
"embed"
"fmt"
"os"
"path/filepath"
"strings"
"cuelang.org/go/cue/build"
"k8s.io/klog/v2"
)
func init() {
var err error
BuiltinImports, err = initBuiltinImports()
if err != nil {
klog.ErrorS(err, "Unable to init builtin imports")
os.Exit(1)
}
}
var (
//go:embed pkgs op.cue ql.cue
fs embed.FS
// BuiltinImports is the builtin imports for cue
BuiltinImports []*build.Instance
)
// GetPackages Get Stdlib packages
func GetPackages(tagTempl string) (map[string]string, error) {
func GetPackages() (map[string]string, error) {
files, err := fs.ReadDir("pkgs")
if err != nil {
return nil, err
@@ -63,16 +75,32 @@ func GetPackages(tagTempl string) (map[string]string, error) {
}
return map[string]string{
"vela/op": opContent + "\n" + tagTempl,
"vela/ql": qlContent + "\n" + tagTempl,
"vela/op": opContent,
"vela/ql": qlContent,
}, nil
}
// AddImportsFor install imports for build.Instance.
func AddImportsFor(inst *build.Instance, tagTempl string) error {
pkgs, err := GetPackages(tagTempl)
inst.Imports = append(inst.Imports, BuiltinImports...)
if tagTempl != "" {
p := &build.Instance{
PkgName: filepath.Base("vela/custom"),
ImportPath: "vela/custom",
}
if err := p.AddFile("-", tagTempl); err != nil {
return err
}
inst.Imports = append(inst.Imports, p)
}
return nil
}
func initBuiltinImports() ([]*build.Instance, error) {
imports := make([]*build.Instance, 0)
pkgs, err := GetPackages()
if err != nil {
return err
return nil, err
}
for path, content := range pkgs {
p := &build.Instance{
@@ -80,9 +108,9 @@ func AddImportsFor(inst *build.Instance, tagTempl string) error {
ImportPath: path,
}
if err := p.AddFile("-", content); err != nil {
return err
return nil, err
}
inst.Imports = append(inst.Imports, p)
imports = append(imports, p)
}
return nil
return imports, nil
}

View File

@@ -26,7 +26,7 @@ import (
)
func TestGetPackages(t *testing.T) {
pkgs, err := GetPackages("context: _")
pkgs, err := GetPackages()
assert.NilError(t, err)
var r cue.Runtime
for path, content := range pkgs {
@@ -36,8 +36,8 @@ func TestGetPackages(t *testing.T) {
builder := &build.Instance{}
builder.AddFile("-", `
import "vela/op"
out: op.context`)
import "vela/custom"
out: custom.context`)
err = AddImportsFor(builder, "context: id: \"xxx\"")
assert.NilError(t, err)

View File

@@ -31,7 +31,7 @@
kind: string
}
filter?: {
namespace?: *"" | string
namespace?: string
matchingLabels?: {...}
}
list?: {...}

View File

@@ -86,9 +86,9 @@
limitBytes: *null | int
}
outputs?: {
logs: string
err?: string
info: {
logs?: string
err?: string
info?: {
fromDate: string
toDate: string
}

View File

@@ -35,7 +35,6 @@ import (
)
const (
errAuthenticateProvider = "failed to authenticate Terraform cloud provider %s for %s"
errProviderExists = "terraform provider %s for %s already exists"
errDeleteProvider = "failed to delete Terraform Provider %s err: %w"
errCouldNotDeleteProvider = "the Terraform Provider %s could not be disabled because it was created by enabling a Terraform provider or was manually created"
@@ -54,7 +53,7 @@ func CreateApplication(ctx context.Context, k8sClient client.Client, name, compo
if strings.HasPrefix(componentType, types.TerraformComponentPrefix) {
existed, err := IsTerraformProviderExisted(ctx, k8sClient, name)
if err != nil {
return errors.Wrapf(err, errAuthenticateProvider, name, componentType)
return errors.Wrapf(err, errCheckProviderExistence, name)
}
if existed {
return fmt.Errorf(errProviderExists, name, componentType)

View File

@@ -22,6 +22,9 @@ import (
"strings"
"github.com/pkg/errors"
"github.com/oam-dev/kubevela/pkg/cue/model/value"
"github.com/oam-dev/kubevela/references/common"
)
// QueryView contains query data
@@ -40,6 +43,8 @@ const (
KeyWordView = "view"
// KeyWordParameter represent parameter keyword
KeyWordParameter = "parameter"
// KeyWordTemplate represents template keyword
KeyWordTemplate = "template"
// KeyWordExport represent export keyword
KeyWordExport = "export"
// DefaultExportValue is the default Export value
@@ -91,6 +96,36 @@ func ParseVelaQL(ql string) (QueryView, error) {
return qv, nil
}
// ParseVelaQLFromPath will parse a velaQL file path to QueryView
func ParseVelaQLFromPath(velaQLViewPath string) (*QueryView, error) {
body, err := common.ReadRemoteOrLocalPath(velaQLViewPath)
if err != nil {
return nil, errors.Errorf("read view file from %s: %v", velaQLViewPath, err)
}
val, err := value.NewValue(string(body), nil, "")
if err != nil {
return nil, errors.Errorf("new value for view: %v", err)
}
var expStr string
exp, err := val.LookupValue(KeyWordExport)
if err == nil {
expStr, err = exp.String()
if err != nil {
expStr = DefaultExportValue
}
} else {
expStr = DefaultExportValue
}
return &QueryView{
View: string(body),
Parameter: nil,
Export: strings.Trim(strings.TrimSpace(expStr), `"`),
}, nil
}
// ParseParameter parse parameter to map[string]interface{}
func ParseParameter(parameter string) (map[string]interface{}, error) {
parameter = strings.TrimLeft(parameter, "{")

View File

@@ -18,12 +18,12 @@ package query
import (
"bufio"
"bytes"
"context"
"encoding/base64"
"fmt"
"io"
"regexp"
"strconv"
"strings"
"time"
"github.com/pkg/errors"
@@ -397,14 +397,6 @@ func (h *provider) GeneratorServiceEndpoints(wfctx wfContext.Context, v *value.V
return fillQueryResult(v, serviceEndpoints, "list")
}
var (
terminatedContainerNotFoundRegex = regexp.MustCompile("previous terminated container .+ in pod .+ not found")
)
func isTerminatedContainerNotFound(err error) bool {
return err != nil && terminatedContainerNotFoundRegex.MatchString(err.Error())
}
func (h *provider) CollectLogsInPod(ctx wfContext.Context, v *value.Value, act types.Action) error {
cluster, err := v.GetString("cluster")
if err != nil {
@@ -429,27 +421,29 @@ func (h *provider) CollectLogsInPod(ctx wfContext.Context, v *value.Value, act t
cliCtx := multicluster.ContextWithClusterName(context.Background(), cluster)
clientSet, err := kubernetes.NewForConfig(h.cfg)
if err != nil {
return errors.Wrapf(err, "failed to create kubernetes clientset")
return errors.Wrapf(err, "failed to create kubernetes client")
}
var defaultOutputs = make(map[string]interface{})
var errMsg string
podInst, err := clientSet.CoreV1().Pods(namespace).Get(cliCtx, pod, v1.GetOptions{})
if err != nil {
return errors.Wrapf(err, "failed to get pod")
errMsg = fmt.Sprintf("failed to get pod:%s", err.Error())
}
req := clientSet.CoreV1().Pods(namespace).GetLogs(pod, opts)
readCloser, err := req.Stream(cliCtx)
if err != nil && !isTerminatedContainerNotFound(err) {
return errors.Wrapf(err, "failed to get stream logs")
if err != nil {
errMsg = fmt.Sprintf("failed to get stream logs %s", err.Error())
}
r := bufio.NewReader(readCloser)
var b strings.Builder
var readErr error
if err == nil {
if readCloser != nil && podInst != nil {
r := bufio.NewReader(readCloser)
buffer := bytes.NewBuffer(nil)
var readErr error
defer func() {
_ = readCloser.Close()
}()
for {
s, err := r.ReadString('\n')
b.WriteString(s)
buffer.WriteString(s)
if err != nil {
if !errors.Is(err, io.EOF) {
readErr = err
@@ -457,30 +451,34 @@ func (h *provider) CollectLogsInPod(ctx wfContext.Context, v *value.Value, act t
break
}
}
} else {
readErr = err
toDate := v1.Now()
var fromDate v1.Time
// nolint
if opts.SinceTime != nil {
fromDate = *opts.SinceTime
} else if opts.SinceSeconds != nil {
fromDate = v1.NewTime(toDate.Add(time.Duration(-(*opts.SinceSeconds) * int64(time.Second))))
} else {
fromDate = podInst.CreationTimestamp
}
// the cue string can not support the special characters
logs := base64.StdEncoding.EncodeToString(buffer.Bytes())
defaultOutputs = map[string]interface{}{
"logs": logs,
"info": map[string]interface{}{
"fromDate": fromDate,
"toDate": toDate,
},
}
if readErr != nil {
errMsg = readErr.Error()
}
}
toDate := v1.Now()
var fromDate v1.Time
// nolint
if opts.SinceTime != nil {
fromDate = *opts.SinceTime
} else if opts.SinceSeconds != nil {
fromDate = v1.NewTime(toDate.Add(time.Duration(-(*opts.SinceSeconds) * int64(time.Second))))
} else {
fromDate = podInst.CreationTimestamp
if errMsg != "" {
klog.Warningf(errMsg)
defaultOutputs["err"] = errMsg
}
o := map[string]interface{}{
"logs": b.String(),
"info": map[string]interface{}{
"fromDate": fromDate,
"toDate": toDate,
},
}
if readErr != nil {
o["err"] = readErr.Error()
}
return v.FillObject(o, "outputs")
return v.FillObject(defaultOutputs, "outputs")
}
// Install register handlers to provider discover.

View File

@@ -20,6 +20,7 @@ import (
"context"
"encoding/json"
"fmt"
"strings"
"github.com/pkg/errors"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
@@ -37,6 +38,7 @@ import (
"github.com/oam-dev/kubevela/pkg/utils"
"github.com/oam-dev/kubevela/pkg/utils/apply"
"github.com/oam-dev/kubevela/pkg/workflow/tasks"
"github.com/oam-dev/kubevela/pkg/workflow/tasks/template"
wfTypes "github.com/oam-dev/kubevela/pkg/workflow/types"
)
@@ -73,7 +75,7 @@ func (handler *ViewHandler) QueryView(ctx context.Context, qv QueryView) (*value
outputsTemplate := fmt.Sprintf(OutputsTemplate, qv.Export, qv.Export)
queryKey := QueryParameterKey{}
if err := json.Unmarshal([]byte(outputsTemplate), &queryKey); err != nil {
return nil, err
return nil, errors.Errorf("unmarhsal query template: %v", err)
}
handler.viewTask = v1beta1.WorkflowStep{
@@ -84,7 +86,12 @@ func (handler *ViewHandler) QueryView(ctx context.Context, qv QueryView) (*value
}
pCtx := process.NewContext(process.ContextData{})
taskDiscover := tasks.NewViewTaskDiscover(handler.pd, handler.cli, handler.cfg, handler.dispatch, handler.delete, handler.namespace, 3, pCtx)
loader := template.NewViewTemplateLoader(handler.cli, handler.namespace)
if len(strings.Split(qv.View, "\n")) > 2 {
loader = &template.EchoLoader{}
}
taskDiscover := tasks.NewViewTaskDiscover(handler.pd, handler.cli, handler.cfg, handler.dispatch, handler.delete, handler.namespace, 3, pCtx, loader)
genTask, err := taskDiscover.GetTaskGenerator(ctx, handler.viewTask.Type)
if err != nil {
return nil, err
@@ -97,14 +104,14 @@ func (handler *ViewHandler) QueryView(ctx context.Context, qv QueryView) (*value
viewCtx, err := NewViewContext()
if err != nil {
return nil, err
return nil, errors.Errorf("new view context: %v", err)
}
status, _, err := runner.Run(viewCtx, &wfTypes.TaskRunOptions{})
if err != nil {
return nil, err
return nil, errors.Errorf("run query view: %v", err)
}
if string(status.Phase) != ViewTaskPhaseSucceeded {
return nil, errors.Errorf("failed to query the view %s", status.Message)
return nil, errors.Errorf("failed to query the view: %s", status.Message)
}
return viewCtx.GetVar(qv.Export)
}

View File

@@ -96,7 +96,9 @@ func (h *ValidatingHandler) Handle(ctx context.Context, req admission.Request) a
switch req.Operation {
case admissionv1.Create:
if allErrs := h.ValidateCreate(ctx, app); len(allErrs) > 0 {
return admission.Errored(http.StatusUnprocessableEntity, mergeErrors(allErrs))
// http.StatusUnprocessableEntity will NOT report any error descriptions
// to the client, use generic http.StatusBadRequest instead.
return admission.Errored(http.StatusBadRequest, mergeErrors(allErrs))
}
case admissionv1.Update:
oldApp := &v1beta1.Application{}
@@ -105,7 +107,7 @@ func (h *ValidatingHandler) Handle(ctx context.Context, req admission.Request) a
}
if app.ObjectMeta.DeletionTimestamp.IsZero() {
if allErrs := h.ValidateUpdate(ctx, app, oldApp); len(allErrs) > 0 {
return admission.Errored(http.StatusUnprocessableEntity, mergeErrors(allErrs))
return admission.Errored(http.StatusBadRequest, mergeErrors(allErrs))
}
}
default:

View File

@@ -373,4 +373,21 @@ var _ = Describe("Test Application Validator", func() {
resp = handler.Handle(ctx, req)
Expect(resp.Allowed).Should(BeFalse())
})
It("Test Application with empty policy", func() {
req := admission.Request{
AdmissionRequest: admissionv1.AdmissionRequest{
Operation: admissionv1.Create,
Resource: metav1.GroupVersionResource{Group: "core.oam.dev", Version: "v1beta1", Resource: "applications"},
Object: runtime.RawExtension{
Raw: []byte(`
{"kind":"Application","metadata":{"name":"app-with-empty-policy-webhook-test", "namespace":"default"},
"spec":{"components":[],"policies":[{"name":"2345","type":"garbage-collect","properties":null}]}}
`),
},
},
}
resp := handler.Handle(ctx, req)
Expect(resp.Allowed).Should(BeFalse())
})
})

View File

@@ -118,13 +118,14 @@ func (h *ValidatingHandler) Handle(ctx context.Context, req admission.Request) a
if err := h.Decoder.DecodeRaw(req.AdmissionRequest.OldObject, oldApp); err != nil {
return admission.Errored(http.StatusBadRequest, err)
}
if allErrs := h.ValidateUpdate(ctx, app, oldApp); len(allErrs) > 0 {
return admission.Errored(http.StatusUnprocessableEntity, allErrs.ToAggregate())
// http.StatusUnprocessableEntity will NOT report any error descriptions
// to the client, use generic http.StatusBadRequest instead.
return admission.Errored(http.StatusBadRequest, allErrs.ToAggregate())
}
case admissionv1.Create:
if allErrs := h.ValidateCreate(ctx, app); len(allErrs) > 0 {
return admission.Errored(http.StatusUnprocessableEntity, allErrs.ToAggregate())
return admission.Errored(http.StatusBadRequest, allErrs.ToAggregate())
}
default:
// Do nothing for CONNECT

View File

@@ -30,6 +30,8 @@ import (
"github.com/oam-dev/kubevela/pkg/cue/model"
"github.com/oam-dev/kubevela/pkg/cue/model/value"
"github.com/oam-dev/kubevela/pkg/multicluster"
"github.com/oam-dev/kubevela/pkg/oam"
"github.com/oam-dev/kubevela/pkg/oam/util"
wfContext "github.com/oam-dev/kubevela/pkg/workflow/context"
"github.com/oam-dev/kubevela/pkg/workflow/providers"
"github.com/oam-dev/kubevela/pkg/workflow/types"
@@ -90,6 +92,12 @@ func (h *provider) Apply(ctx wfContext.Context, v *value.Value, act types.Action
}
deployCtx := multicluster.ContextWithClusterName(context.Background(), cluster)
deployCtx = auth.ContextWithUserInfo(deployCtx, h.app)
if h.app != nil {
util.AddLabels(workload, map[string]string{
oam.LabelAppName: h.app.Name,
oam.LabelAppNamespace: h.app.Namespace,
})
}
if err := h.apply(deployCtx, cluster, common.WorkflowResourceCreator, workload); err != nil {
return err
}
@@ -126,7 +134,7 @@ func (h *provider) ApplyInParallel(ctx wfContext.Context, v *value.Value, act ty
deployCtx := multicluster.ContextWithClusterName(context.Background(), cluster)
deployCtx = auth.ContextWithUserInfo(deployCtx, h.app)
if err = h.apply(deployCtx, cluster, common.WorkflowResourceCreator, workloads...); err != nil {
return v.FillObject(err, "err")
return err
}
return nil
}

View File

@@ -126,6 +126,9 @@ func overrideConfiguration(policies []v1beta1.AppPolicy, components []common.App
var err error
for _, policy := range policies {
if policy.Type == v1alpha1.OverridePolicyType {
if policy.Properties == nil {
return nil, fmt.Errorf("override policy %s must not have empty properties", policy.Name)
}
overrideSpec := &v1alpha1.OverridePolicySpec{}
if err := utils.StrictUnmarshal(policy.Properties.Raw, overrideSpec); err != nil {
return nil, errors.Wrapf(err, "failed to parse override policy %s", policy.Name)

View File

@@ -48,6 +48,14 @@ func TestOverrideConfiguration(t *testing.T) {
}},
Error: "failed to parse override policy",
},
"empty-policy": {
Policies: []v1beta1.AppPolicy{{
Name: "override-policy",
Type: "override",
Properties: nil,
}},
Error: "empty properties",
},
"normal": {
Policies: []v1beta1.AppPolicy{{
Name: "override-policy",

View File

@@ -265,7 +265,7 @@ func (t *TaskLoader) makeValue(ctx wfContext.Context, templ string, id string, p
}
contextTempl += "\n" + pCtx.ExtendedContextFile()
return value.NewValue(templ+contextTempl, t.pd, contextTempl, value.ProcessScript, value.TagFieldOrder)
return value.NewValue(templ+contextTempl, t.pd, "", value.ProcessScript, value.TagFieldOrder)
}
type executor struct {

View File

@@ -272,7 +272,7 @@ func (tr *stepGroupTaskRunner) Run(ctx wfContext.Context, options *types.TaskRun
}
// NewViewTaskDiscover will create a client for load task generator.
func NewViewTaskDiscover(pd *packages.PackageDiscover, cli client.Client, cfg *rest.Config, apply kube.Dispatcher, delete kube.Deleter, viewNs string, logLevel int, pCtx process.Context) types.TaskDiscover {
func NewViewTaskDiscover(pd *packages.PackageDiscover, cli client.Client, cfg *rest.Config, apply kube.Dispatcher, delete kube.Deleter, viewNs string, logLevel int, pCtx process.Context, loader template.Loader) types.TaskDiscover {
handlerProviders := providers.NewProviders()
// install builtin provider
@@ -282,10 +282,9 @@ func NewViewTaskDiscover(pd *packages.PackageDiscover, cli client.Client, cfg *r
http.Install(handlerProviders, cli, viewNs)
email.Install(handlerProviders)
templateLoader := template.NewViewTemplateLoader(cli, viewNs)
return &taskDiscover{
remoteTaskDiscover: custom.NewTaskLoader(templateLoader.LoadTaskTemplate, pd, handlerProviders, logLevel, pCtx),
templateLoader: templateLoader,
remoteTaskDiscover: custom.NewTaskLoader(loader.LoadTaskTemplate, pd, handlerProviders, logLevel, pCtx),
templateLoader: loader,
}
}

View File

@@ -119,3 +119,12 @@ func NewViewTemplateLoader(client client.Client, namespace string) Loader {
namespace: namespace,
}
}
// EchoLoader will load data from input as it is.
type EchoLoader struct {
}
// LoadTaskTemplate gets the echo content exactly what it is .
func (ll *EchoLoader) LoadTaskTemplate(_ context.Context, content string) (string, error) {
return content, nil
}

View File

@@ -403,14 +403,14 @@ func (w *workflow) setMetadataToContext(wfCtx wfContext.Context) error {
return wfCtx.SetVar(metadata, wfTypes.ContextKeyMetadata)
}
func (e *engine) getBackoffTimes(stepID string) (success bool, backoffTimes int) {
func (e *engine) getBackoffTimes(stepID string) int {
if v, ok := e.wfCtx.GetValueInMemory(wfTypes.ContextPrefixBackoffTimes, stepID); ok {
times, ok := v.(int)
if ok {
return true, times
return times
}
}
return false, 0
return -1
}
func (e *engine) getBackoffWaitTime() int {
@@ -418,17 +418,19 @@ func (e *engine) getBackoffWaitTime() int {
minTimes := 15
found := false
for _, step := range e.status.Steps {
success, backoffTimes := e.getBackoffTimes(step.ID)
if success && backoffTimes < minTimes {
minTimes = backoffTimes
if backoffTimes := e.getBackoffTimes(step.ID); backoffTimes > 0 {
found = true
if backoffTimes < minTimes {
minTimes = backoffTimes
}
}
if step.SubStepsStatus != nil {
for _, subStep := range step.SubStepsStatus {
success, backoffTimes := e.getBackoffTimes(subStep.ID)
if success && backoffTimes < minTimes {
minTimes = backoffTimes
if backoffTimes := e.getBackoffTimes(subStep.ID); backoffTimes > 0 {
found = true
if backoffTimes < minTimes {
minTimes = backoffTimes
}
}
}
}

View File

@@ -912,10 +912,12 @@ var _ = Describe("Test Workflow", func() {
Expect(interval).Should(BeEquivalentTo(int(0.05 * math.Pow(2, float64(i+5)))))
}
_, err = wf.ExecuteSteps(ctx, revision, runners)
Expect(err).ToNot(HaveOccurred())
interval = e.getBackoffWaitTime()
Expect(interval).Should(BeEquivalentTo(MaxWorkflowWaitBackoffTime))
for i := 0; i < 10; i++ {
_, err = wf.ExecuteSteps(ctx, revision, runners)
Expect(err).ToNot(HaveOccurred())
interval = e.getBackoffWaitTime()
Expect(interval).Should(BeEquivalentTo(MaxWorkflowWaitBackoffTime))
}
By("Test get backoff time after clean")
wfContext.CleanupMemoryStore(app.Name, app.Namespace)

View File

@@ -76,6 +76,8 @@ var addonClusters string
var verboseSatatus bool
var skipValidate bool
// NewAddonCommand create `addon` command
func NewAddonCommand(c common.Args, order string, ioStreams cmdutil.IOStreams) *cobra.Command {
cmd := &cobra.Command{
@@ -189,6 +191,7 @@ Enable addon for specific clusters, (local means control plane):
cmd.Flags().StringVarP(&addonVersion, "version", "v", "", "specify the addon version to enable")
cmd.Flags().StringVarP(&addonClusters, types.ClustersArg, "c", "", "specify the runtime-clusters to enable")
cmd.Flags().BoolVarP(&skipValidate, "skip-version-validating", "s", false, "skip validating system version requirement")
return cmd
}
@@ -285,6 +288,7 @@ Upgrade addon for specific clusters, (local means control plane):
},
}
cmd.Flags().StringVarP(&addonVersion, "version", "v", "", "specify the addon version to upgrade")
cmd.Flags().BoolVarP(&skipValidate, "skip-version-validating", "s", false, "skip validating system version requirement")
return cmd
}
@@ -362,7 +366,11 @@ func enableAddon(ctx context.Context, k8sClient client.Client, dc *discovery.Dis
}
for _, registry := range registries {
err = pkgaddon.EnableAddon(ctx, name, version, k8sClient, dc, apply.NewAPIApplicator(k8sClient), config, registry, args, nil)
var opts []pkgaddon.InstallOption
if skipValidate {
opts = append(opts, pkgaddon.SkipValidateVersion)
}
err = pkgaddon.EnableAddon(ctx, name, version, k8sClient, dc, apply.NewAPIApplicator(k8sClient), config, registry, args, nil, opts...)
if errors.Is(err, pkgaddon.ErrNotExist) {
continue
}
@@ -382,7 +390,11 @@ func enableAddon(ctx context.Context, k8sClient client.Client, dc *discovery.Dis
// enableAddonByLocal enable addon in local dir and return the addon name
func enableAddonByLocal(ctx context.Context, name string, dir string, k8sClient client.Client, dc *discovery.DiscoveryClient, config *rest.Config, args map[string]interface{}) error {
if err := pkgaddon.EnableAddonByLocalDir(ctx, name, dir, k8sClient, dc, apply.NewAPIApplicator(k8sClient), config, args); err != nil {
var opts []pkgaddon.InstallOption
if skipValidate {
opts = append(opts, pkgaddon.SkipValidateVersion)
}
if err := pkgaddon.EnableAddonByLocalDir(ctx, name, dir, k8sClient, dc, apply.NewAPIApplicator(k8sClient), config, args, opts...); err != nil {
return err
}
if err := waitApplicationRunning(k8sClient, name); err != nil {
@@ -719,8 +731,15 @@ func waitApplicationRunning(k8sClient client.Client, addonName string) error {
return client.IgnoreNotFound(err)
}
phase := app.Status.Phase
if phase == common2.ApplicationRunning {
switch app.Status.Phase {
case common2.ApplicationRunning:
return nil
case common2.ApplicationWorkflowSuspending:
fmt.Printf("Enabling suspend, please run \"vela workflow resume %s -n vela-system\" to continue", pkgaddon.Convert2AppName(addonName))
return nil
case common2.ApplicationWorkflowTerminated:
return errors.Errorf("Enabling failed, please run \"vela status %s -n vela-system\" to check the status of the addon", pkgaddon.Convert2AppName(addonName))
default:
}
timeConsumed := int(time.Since(start).Seconds())
applySpinnerNewSuffix(spinner, fmt.Sprintf("Waiting addon application running. It is now in phase: %s (timeout %d/%d seconds)...",

View File

@@ -285,7 +285,7 @@ func NewDefinitionInitCommand(c common.Args) *cobra.Command {
cmd.Flags().StringP(FlagTemplateYAML, "f", "", "Specify the template yaml file that definition will use to build the schema. If empty, a default template for the given definition type will be used.")
cmd.Flags().StringP(FlagOutput, "o", "", "Specify the output path of the generated definition. If empty, the definition will be printed in the console.")
cmd.Flags().BoolP(FlagInteractive, "i", false, "Specify whether use interactive process to help generate definitions.")
cmd.Flags().StringP(FlagProvider, "p", "", "Specify which provider the cloud resource definition belongs to. Only `alibaba`, `aws`, `azure` are supported.")
cmd.Flags().StringP(FlagProvider, "p", "", "Specify which provider the cloud resource definition belongs to. Only `alibaba`, `aws`, `azure`, `gcp`, `baidu`, `tencent`, `elastic`, `ucloud`, `vsphere` are supported.")
cmd.Flags().StringP(FlagGit, "", "", "Specify which git repository the configuration(HCL) is stored in. Valid when --provider/-p is set.")
cmd.Flags().StringP(FlagLocal, "", "", "Specify the local path of the configuration(HCL) file. Valid when --provider/-p is set.")
cmd.Flags().StringP(FlagPath, "", "", "Specify which path the configuration(HCL) is stored in the Git repository. Valid when --git is set.")
@@ -298,7 +298,7 @@ func generateTerraformTypedComponentDefinition(cmd *cobra.Command, name, kind, p
}
switch provider {
case "aws", "azure", "alibaba", "tencent", "gcp", "baidu", "elastic", "ucloud":
case "aws", "azure", "alibaba", "tencent", "gcp", "baidu", "elastic", "ucloud", "vsphere":
var terraform *commontype.Terraform
git, err := cmd.Flags().GetString(FlagGit)
@@ -374,7 +374,7 @@ func generateTerraformTypedComponentDefinition(cmd *cobra.Command, name, kind, p
}
return out.String(), nil
default:
return "", errors.Errorf("Provider `%s` is not supported. Only `alibaba`, `aws`, `azure`, `gcp`, `baidu`, `tencent`, `elastic`, `ucloud` are supported.", provider)
return "", errors.Errorf("Provider `%s` is not supported. Only `alibaba`, `aws`, `azure`, `gcp`, `baidu`, `tencent`, `elastic`, `ucloud`, `vsphere` are supported.", provider)
}
}

View File

@@ -144,13 +144,13 @@ func prepareProviderAddSubCommand(c common.Args, ioStreams cmdutil.IOStreams) ([
if len(os.Args) < 2 || os.Args[1] != "provider" {
return nil, nil
}
ctx, cancel := context.WithTimeout(context.Background(), time.Minute*1)
timeoutCtx, cancel := context.WithTimeout(context.Background(), time.Minute*1)
defer cancel()
k8sClient, err := c.GetClient()
if err != nil {
return nil, err
}
defs, err := getTerraformProviderTypes(ctx, k8sClient)
defs, err := getTerraformProviderTypes(timeoutCtx, k8sClient)
if err == nil {
cmds := make([]*cobra.Command, len(defs))
for i, d := range defs {
@@ -161,14 +161,17 @@ func prepareProviderAddSubCommand(c common.Args, ioStreams cmdutil.IOStreams) ([
Long: fmt.Sprintf("Authenticate Terraform Cloud Provider %s by creating a credential secret and a Terraform Controller Provider", providerType),
Example: fmt.Sprintf("vela provider add %s", providerType),
}
parameters, err := getParameters(ctx, k8sClient, providerType)
parameters, err := getParameters(context.Background(), k8sClient, providerType)
if err != nil {
return nil, err
}
for _, p := range parameters {
// TODO(wonderflow): make the provider default name to be unique but keep the compatiblility as some Application didn't specify the name,
// now it's “default” for every one, the name will conflict if we have more than one cloud provider.
cmd.Flags().String(p.Name, fmt.Sprint(p.Default), p.Usage)
}
cmd.RunE = func(cmd *cobra.Command, args []string) error {
newContext := context.Background()
name, err := cmd.Flags().GetString(providerNameParam)
if err != nil || name == "" {
return fmt.Errorf("must specify a name for the Terraform Cloud Provider %s", providerType)
@@ -188,8 +191,7 @@ func prepareProviderAddSubCommand(c common.Args, ioStreams cmdutil.IOStreams) ([
if err != nil {
return fmt.Errorf(errAuthenticateProvider, providerType, err)
}
if err := config.CreateApplication(ctx, k8sClient, name, providerType, string(data), config.UIParam{}); err != nil {
if err := config.CreateApplication(newContext, k8sClient, name, providerType, string(data), config.UIParam{}); err != nil {
return fmt.Errorf(errAuthenticateProvider, providerType, err)
}
ioStreams.Infof("Successfully authenticate provider %s for %s\n", name, providerType)
@@ -333,9 +335,9 @@ func prepareProviderDeleteCommand(c common.Args, ioStreams cmdutil.IOStreams) *c
if err != nil {
return err
}
ctx, cancel := context.WithTimeout(context.Background(), time.Minute*1)
timeoutCtx, cancel := context.WithTimeout(context.Background(), time.Minute*1)
defer cancel()
defs, err := getTerraformProviderTypes(ctx, k8sClient)
defs, err := getTerraformProviderTypes(timeoutCtx, k8sClient)
if len(args) < 1 {
errMsg := "must specify a Terraform Cloud Provider type"
if err == nil {
@@ -371,13 +373,13 @@ func prepareProviderDeleteSubCommand(c common.Args, ioStreams cmdutil.IOStreams)
if len(os.Args) < 2 || os.Args[1] != "provider" {
return nil, nil
}
ctx, cancel := context.WithTimeout(context.Background(), time.Minute*1)
timeoutContext, cancel := context.WithTimeout(context.Background(), time.Minute*1)
defer cancel()
k8sClient, err := c.GetClient()
if err != nil {
return nil, err
}
defs, err := getTerraformProviderTypes(ctx, k8sClient)
defs, err := getTerraformProviderTypes(timeoutContext, k8sClient)
if err == nil {
cmds := make([]*cobra.Command, len(defs))
for i, d := range defs {
@@ -388,7 +390,7 @@ func prepareProviderDeleteSubCommand(c common.Args, ioStreams cmdutil.IOStreams)
Long: fmt.Sprintf("Delete Terraform Cloud Provider %s", providerType),
Example: fmt.Sprintf("vela provider delete %s", providerType),
}
parameters, err := getParameters(ctx, k8sClient, providerType)
parameters, err := getParameters(context.Background(), k8sClient, providerType)
if err != nil {
return nil, err
}
@@ -402,7 +404,7 @@ func prepareProviderDeleteSubCommand(c common.Args, ioStreams cmdutil.IOStreams)
if err != nil || name == "" {
return fmt.Errorf("must specify a name for the Terraform Cloud Provider %s", providerType)
}
if err := config.DeleteApplication(ctx, k8sClient, name, true); err != nil {
if err := config.DeleteApplication(context.Background(), k8sClient, name, true); err != nil {
return errors.Wrapf(err, "failed to delete Terraform Cloud Provider %s", name)
}
ioStreams.Infof("Successfully delete provider %s for %s\n", name, providerType)

View File

@@ -100,12 +100,6 @@ func startReferenceDocsSite(ctx context.Context, ns string, c common.Args, ioStr
}
referenceHome := filepath.Join(home, "reference")
definitionPath := filepath.Join(referenceHome, "capabilities")
if _, err := os.Stat(definitionPath); err != nil && os.IsNotExist(err) {
if err := os.MkdirAll(definitionPath, 0750); err != nil {
return err
}
}
docsPath := filepath.Join(referenceHome, "docs")
if _, err := os.Stat(docsPath); err != nil && os.IsNotExist(err) {
if err := os.MkdirAll(docsPath, 0750); err != nil {
@@ -171,22 +165,12 @@ func startReferenceDocsSite(ctx context.Context, ns string, c common.Args, ioStr
return err
}
var capabilityPath string
switch capabilityType {
case types.TypeWorkload:
capabilityPath = plugins.WorkloadTypePath
case types.TypeTrait:
capabilityPath = plugins.TraitPath
case types.TypeScope:
case types.TypeComponentDefinition:
capabilityPath = plugins.ComponentDefinitionTypePath
case types.TypeWorkflowStep:
capabilityPath = plugins.WorkflowStepPath
default:
if capabilityType != types.TypeWorkload && capabilityType != types.TypeTrait && capabilityType != types.TypeScope &&
capabilityType != types.TypeComponentDefinition && capabilityType != types.TypeWorkflowStep {
return fmt.Errorf("unsupported type: %v", capabilityType)
}
url := fmt.Sprintf("http://127.0.0.1%s/#/%s/%s", Port, capabilityPath, capabilityName)
url := fmt.Sprintf("http://127.0.0.1%s/#/%s/%s", Port, capabilityType, capabilityName)
server := &http.Server{
Addr: Port,
Handler: http.FileServer(http.Dir(docsPath)),
@@ -227,7 +211,7 @@ func launch(server *http.Server, errChan chan<- error) {
func generateSideBar(capabilities []types.Capability, docsPath string) error {
sideBar := filepath.Join(docsPath, SideBar)
components, traits, workflowsteps := getDefinitions(capabilities)
components, traits, workflowSteps, policies := getDefinitions(capabilities)
f, err := os.Create(sideBar)
if err != nil {
return err
@@ -235,8 +219,9 @@ func generateSideBar(capabilities []types.Capability, docsPath string) error {
if _, err := f.WriteString("- Components Types\n"); err != nil {
return err
}
for _, c := range components {
if _, err := f.WriteString(fmt.Sprintf(" - [%s](%s/%s.md)\n", c, plugins.ComponentDefinitionTypePath, c)); err != nil {
if _, err := f.WriteString(fmt.Sprintf(" - [%s](%s/%s.md)\n", c, types.TypeComponentDefinition, c)); err != nil {
return err
}
}
@@ -244,15 +229,24 @@ func generateSideBar(capabilities []types.Capability, docsPath string) error {
return err
}
for _, t := range traits {
if _, err := f.WriteString(fmt.Sprintf(" - [%s](%s/%s.md)\n", t, plugins.TraitPath, t)); err != nil {
if _, err := f.WriteString(fmt.Sprintf(" - [%s](%s/%s.md)\n", t, types.TypeTrait, t)); err != nil {
return err
}
}
if _, err := f.WriteString("- Workflow Steps\n"); err != nil {
return err
}
for _, t := range workflowsteps {
if _, err := f.WriteString(fmt.Sprintf(" - [%s](%s/%s.md)\n", t, plugins.WorkflowStepPath, t)); err != nil {
for _, t := range workflowSteps {
if _, err := f.WriteString(fmt.Sprintf(" - [%s](%s/%s.md)\n", t, types.TypeWorkflowStep, t)); err != nil {
return err
}
}
if _, err := f.WriteString("- Policies\n"); err != nil {
return err
}
for _, t := range policies {
if _, err := f.WriteString(fmt.Sprintf(" - [%s](%s/%s.md)\n", t, types.TypePolicy, t)); err != nil {
return err
}
}
@@ -327,14 +321,14 @@ func generateREADME(capabilities []types.Capability, docsPath string) error {
return err
}
workloads, traits, workflowsteps := getDefinitions(capabilities)
workloads, traits, workflowSteps, policies := getDefinitions(capabilities)
if _, err := f.WriteString("## Component Types\n"); err != nil {
return err
}
for _, w := range workloads {
if _, err := f.WriteString(fmt.Sprintf(" - [%s](%s/%s.md)\n", w, plugins.ComponentDefinitionTypePath, w)); err != nil {
if _, err := f.WriteString(fmt.Sprintf(" - [%s](%s/%s.md)\n", w, types.TypeComponentDefinition, w)); err != nil {
return err
}
}
@@ -343,7 +337,7 @@ func generateREADME(capabilities []types.Capability, docsPath string) error {
}
for _, t := range traits {
if _, err := f.WriteString(fmt.Sprintf(" - [%s](%s/%s.md)\n", t, plugins.TraitPath, t)); err != nil {
if _, err := f.WriteString(fmt.Sprintf(" - [%s](%s/%s.md)\n", t, types.TypeTrait, t)); err != nil {
return err
}
}
@@ -351,16 +345,26 @@ func generateREADME(capabilities []types.Capability, docsPath string) error {
if _, err := f.WriteString("## Workflow Steps\n"); err != nil {
return err
}
for _, t := range workflowsteps {
if _, err := f.WriteString(fmt.Sprintf(" - [%s](%s/%s.md)\n", t, plugins.WorkflowStepPath, t)); err != nil {
for _, t := range workflowSteps {
if _, err := f.WriteString(fmt.Sprintf(" - [%s](%s/%s.md)\n", t, types.TypeWorkflowStep, t)); err != nil {
return err
}
}
if _, err := f.WriteString("## Policies\n"); err != nil {
return err
}
for _, t := range policies {
if _, err := f.WriteString(fmt.Sprintf(" - [%s](%s/%s.md)\n", t, types.TypePolicy, t)); err != nil {
return err
}
}
return nil
}
func getDefinitions(capabilities []types.Capability) ([]string, []string, []string) {
var components, traits, workflowSteps []string
func getDefinitions(capabilities []types.Capability) ([]string, []string, []string, []string) {
var components, traits, workflowSteps, policies []string
for _, c := range capabilities {
switch c.Type {
case types.TypeComponentDefinition:
@@ -369,12 +373,14 @@ func getDefinitions(capabilities []types.Capability) ([]string, []string, []stri
traits = append(traits, c.Name)
case types.TypeWorkflowStep:
workflowSteps = append(workflowSteps, c.Name)
case types.TypePolicy:
policies = append(policies, c.Name)
case types.TypeScope:
case types.TypeWorkload:
default:
}
}
return components, traits, workflowSteps
return components, traits, workflowSteps, policies
}
// ShowReferenceConsole will show capability reference in console

View File

@@ -27,7 +27,6 @@ import (
"github.com/stretchr/testify/assert"
"github.com/oam-dev/kubevela/apis/types"
"github.com/oam-dev/kubevela/references/plugins"
)
const BaseDir = "testdata"
@@ -134,9 +133,9 @@ func TestGenerateREADME(t *testing.T) {
for _, c := range tc.capabilities {
switch c.Type {
case types.TypeComponentDefinition:
assert.Contains(t, string(data), fmt.Sprintf(" - [%s](%s/%s.md)\n", c.Name, plugins.ComponentDefinitionTypePath, c.Name))
assert.Contains(t, string(data), fmt.Sprintf(" - [%s](%s/%s.md)\n", c.Name, types.TypeComponentDefinition, c.Name))
case types.TypeTrait:
assert.Contains(t, string(data), fmt.Sprintf(" - [%s](%s/%s.md)\n", c.Name, plugins.TraitPath, c.Name))
assert.Contains(t, string(data), fmt.Sprintf(" - [%s](%s/%s.md)\n", c.Name, types.TypeTrait, c.Name))
}
}
})
@@ -147,10 +146,15 @@ func TestGetWorkloadAndTraits(t *testing.T) {
type want struct {
workloads []string
traits []string
policies []string
}
workloadName := "component1"
traitName := "trait1"
scopeName := "scope1"
var (
workloadName = "component1"
traitName = "trait1"
scopeName = "scope1"
policyName = "policy1"
)
cases := map[string]struct {
reason string
@@ -187,11 +191,22 @@ func TestGetWorkloadAndTraits(t *testing.T) {
traits: nil,
},
},
"PolicyTypeCapability": {
capabilities: []types.Capability{
{
Name: policyName,
Type: types.TypePolicy,
},
},
want: want{
policies: []string{policyName},
},
},
}
for name, tc := range cases {
t.Run(name, func(t *testing.T) {
gotWorkloads, gotTraits, _ := getDefinitions(tc.capabilities)
assert.Equal(t, tc.want, want{workloads: gotWorkloads, traits: gotTraits})
gotWorkloads, gotTraits, _, gotPolicies := getDefinitions(tc.capabilities)
assert.Equal(t, tc.want, want{workloads: gotWorkloads, traits: gotTraits, policies: gotPolicies})
})
}
}

Some files were not shown because too many files have changed in this diff Show More