Compare commits

...

30 Commits

Author SHA1 Message Date
Somefive
35a84e9cbf [Backport release-1.4] Fix: gc failure cause workflow restart not working properly (#5241)
* Fix: gc failure cause workflow restart not working properly

Signed-off-by: Somefive <yd219913@alibaba-inc.com>

* Feat: switch ci machine

Signed-off-by: Somefive <yd219913@alibaba-inc.com>

* Fix: enhance test

Signed-off-by: Somefive <yd219913@alibaba-inc.com>

Signed-off-by: Somefive <yd219913@alibaba-inc.com>
2023-01-03 14:52:37 +08:00
github-actions[bot]
bf251d5039 Chore: change the package name of the readme-generator-for-helm (#4895)
Signed-off-by: barnettZQG <barnett.zqg@gmail.com>
(cherry picked from commit 8b46e6076a)

Co-authored-by: barnettZQG <barnett.zqg@gmail.com>
2022-10-20 16:38:40 +08:00
github-actions[bot]
31f0b28d96 Fix: support default value of ui schema (#4858)
Signed-off-by: Jianbo Sun <jianbo.sjb@alibaba-inc.com>
(cherry picked from commit e584a35c83)

Co-authored-by: Jianbo Sun <jianbo.sjb@alibaba-inc.com>
2022-10-13 14:45:10 +08:00
github-actions[bot]
810c47545e [Backport release-1.4] Fix: allow to read definition from user's namespace when force delete (#4788)
* Fix: allow to read definition from user's namespace when force deleting app with configuration

Signed-off-by: Qiaozp <qiaozhongpei.qzp@alibaba-inc.com>
(cherry picked from commit 2f08c36132)

* Fix test

Signed-off-by: Qiaozp <qiaozhongpei.qzp@alibaba-inc.com>
(cherry picked from commit 981950a14d)

* Fix wrong test

Signed-off-by: Qiaozp <qiaozhongpei.qzp@alibaba-inc.com>
(cherry picked from commit 62863f1007)

Co-authored-by: Qiaozp <qiaozhongpei.qzp@alibaba-inc.com>
2022-09-27 11:58:19 +08:00
github-actions[bot]
293f38dd84 Fix: panic when properties empty (#4747)
Signed-off-by: Somefive <yd219913@alibaba-inc.com>
(cherry picked from commit 13fec3cb18)

Co-authored-by: Somefive <yd219913@alibaba-inc.com>
2022-09-19 11:18:33 +08:00
barnettZQG
1cf2cd23d2 Fix: CVE-2022-27664 (#4721) (#4724)
Signed-off-by: barnettZQG <barnett.zqg@gmail.com>

Signed-off-by: barnettZQG <barnett.zqg@gmail.com>
2022-09-14 16:15:30 +08:00
github-actions[bot]
69cf083d4a Fix: fix uninstallation continues when answer is no (#4711)
Signed-off-by: Charlie Chiang <charlie_c_0129@outlook.com>
(cherry picked from commit 81115ef6ff)

Co-authored-by: Charlie Chiang <charlie_c_0129@outlook.com>
2022-09-13 10:24:46 +08:00
Tianxin Dong
7df2b34a0b Fix: fix signedKey using platform id (#4653)
Signed-off-by: FogDong <dongtianxin.tx@alibaba-inc.com>

Signed-off-by: FogDong <dongtianxin.tx@alibaba-inc.com>
2022-08-24 19:33:24 +08:00
github-actions[bot]
2a03e16098 fix: add supported but missing provider (#4650)
Signed-off-by: Yuedong Wu <57584831+lunarwhite@users.noreply.github.com>
(cherry picked from commit fa96c917a8)

Co-authored-by: Yuedong Wu <57584831+lunarwhite@users.noreply.github.com>
2022-08-24 09:28:35 +08:00
github-actions[bot]
110927ed97 Fix: fix writing logs to file (#4588)
Signed-off-by: FogDong <dongtianxin.tx@alibaba-inc.com>
(cherry picked from commit d4b3bbf049)

Co-authored-by: FogDong <dongtianxin.tx@alibaba-inc.com>
2022-08-10 10:00:39 +08:00
github-actions[bot]
90fbfa0f81 Feat: definition support controller requirement (#4578)
Signed-off-by: yangsoon <songyang.song@alibaba-inc.com>
(cherry picked from commit 714f218f90)

Co-authored-by: yangsoon <songyang.song@alibaba-inc.com>
2022-08-08 16:07:44 +08:00
github-actions[bot]
7fb045328d [Backport release-1.4] Fix: reject applications with empty policy properties (#4565)
* Fix: reject applications with empty policies

Signed-off-by: Charlie Chiang <charlie_c_0129@outlook.com>
(cherry picked from commit 337032511e)

* Style: change err msg

Signed-off-by: Charlie Chiang <charlie_c_0129@outlook.com>
(cherry picked from commit 2bb5c0245a)

* Fix: use 400 instead of 422 to show err msg

Signed-off-by: Charlie Chiang <charlie_c_0129@outlook.com>
(cherry picked from commit 553ac92c62)

* Test: fix tests

Signed-off-by: Charlie Chiang <charlie_c_0129@outlook.com>
(cherry picked from commit 0ce352d13b)

Co-authored-by: Charlie Chiang <charlie_c_0129@outlook.com>
2022-08-05 15:04:47 +08:00
Somefive
766c5852c6 Fix: address vela-core crash due to empty policy properties (#4473) (#4480)
* Fix: fix topology core crash

Signed-off-by: Charlie Chiang <charlie_c_0129@outlook.com>

* Test: add tests

Signed-off-by: Charlie Chiang <charlie_c_0129@outlook.com>

* Fix: same problem in other places

Signed-off-by: Charlie Chiang <charlie_c_0129@outlook.com>

* Style: remove empty line

Signed-off-by: Charlie Chiang <charlie_c_0129@outlook.com>

* Feat: raise error when empty topology is used

Signed-off-by: Charlie Chiang <charlie_c_0129@outlook.com>

* Feat: raise error when empty override policy is used

Signed-off-by: Charlie Chiang <charlie_c_0129@outlook.com>

Co-authored-by: Charlie Chiang <charlie_c_0129@outlook.com>
2022-07-27 13:48:19 +08:00
github-actions[bot]
4cb9a14b18 Fix: fix logs to record the right publish version (#4476)
Signed-off-by: yangsoon <songyang.song@alibaba-inc.com>
(cherry picked from commit 4846104c8f)

Co-authored-by: yangsoon <songyang.song@alibaba-inc.com>
2022-07-27 01:13:07 +08:00
github-actions[bot]
9a1e75cf48 Fix: The apply failure error is ignored when the workflow is executed (#4461)
Signed-off-by: yangsoon <songyang.song@alibaba-inc.com>
(cherry picked from commit b1d8e6c88b)

Co-authored-by: yangsoon <songyang.song@alibaba-inc.com>
2022-07-25 22:19:26 +08:00
Tianxin Dong
192dc8966d Fix: fix backoff time after default backoff times (#4414)
Signed-off-by: FogDong <dongtianxin.tx@alibaba-inc.com>
2022-07-20 17:48:28 +08:00
Jianbo Sun
b596b70ebe Fix: addon function converted (#4411)
Signed-off-by: Jianbo Sun <jianbo.sjb@alibaba-inc.com>
2022-07-19 19:54:44 +08:00
github-actions[bot]
0cd370e867 Fix: fix volumes duplicate in list (#4390)
Signed-off-by: FogDong <dongtianxin.tx@alibaba-inc.com>
(cherry picked from commit 08fb73aa95)

Co-authored-by: FogDong <dongtianxin.tx@alibaba-inc.com>
2022-07-15 20:11:49 +08:00
github-actions[bot]
d9adc73e5c Fix: add usage comment for ref-objects (#4386)
Signed-off-by: Somefive <yd219913@alibaba-inc.com>
(cherry picked from commit 0418c83117)

Co-authored-by: Somefive <yd219913@alibaba-inc.com>
2022-07-14 18:51:05 +08:00
github-actions[bot]
4a2d9807c8 [Backport release-1.4] Fix: fail directly when app terminated (#4385)
* fail directly when app terminated

Signed-off-by: 楚岳 <wangyike.wyk@alibaba-inc.com>
(cherry picked from commit 842c211cf6)

* support suspend

Signed-off-by: 楚岳 <wangyike.wyk@alibaba-inc.com>

fix typo

Signed-off-by: 楚岳 <wangyike.wyk@alibaba-inc.com>
(cherry picked from commit 307ef372f1)

Co-authored-by: 楚岳 <wangyike.wyk@alibaba-inc.com>
2022-07-14 18:50:23 +08:00
github-actions[bot]
840cb8ce58 Fix: several minor bugs (#4381)
Signed-off-by: Somefive <yd219913@alibaba-inc.com>
(cherry picked from commit c5f10a5723)

Co-authored-by: Somefive <yd219913@alibaba-inc.com>
2022-07-14 11:22:41 +08:00
github-actions[bot]
5a64fec916 Fix: abuse timeout context in terraform provider (#4375)
Signed-off-by: Jianbo Sun <jianbo.sjb@alibaba-inc.com>
(cherry picked from commit ab56b3c274)

Co-authored-by: Jianbo Sun <jianbo.sjb@alibaba-inc.com>
2022-07-13 15:48:13 +08:00
Jianbo Sun
657a374ded Fix: add the job of independently publishing chart packages (#4360) (#4361)
* Fix: add the job of independently publishing chart packages

Signed-off-by: barnettZQG <barnett.zqg@gmail.com>

* Fix: add the job of independently publishing chart packages

Signed-off-by: barnettZQG <barnett.zqg@gmail.com>

Co-authored-by: barnettZQG <barnett.zqg@gmail.com>
2022-07-12 12:31:05 +08:00
Tianxin Dong
dfe12cd9ca [Backport-1.4]: optimize imports packages to reduce 75% cpu with better performance (#4355)
* Feat: optimize imports packages

Signed-off-by: FogDong <dongtianxin.tx@alibaba-inc.com>

* fix test

Signed-off-by: FogDong <dongtianxin.tx@alibaba-inc.com>
2022-07-11 18:33:53 +08:00
github-actions[bot]
cd42f67848 Fix: init container bug (#4354)
Signed-off-by: Somefive <yd219913@alibaba-inc.com>
(cherry picked from commit 3f116c7f10)

Co-authored-by: Somefive <yd219913@alibaba-inc.com>
2022-07-11 17:03:49 +08:00
github-actions[bot]
61d2c588e3 Fix: health check use original ns if no override and original exists (#4353)
Signed-off-by: Somefive <yd219913@alibaba-inc.com>
(cherry picked from commit f7923b5ac9)

Co-authored-by: Somefive <yd219913@alibaba-inc.com>
2022-07-11 16:49:24 +08:00
github-actions[bot]
b3dad698a5 Fix: enhance sidecar & init traits (#4343)
Signed-off-by: Somefive <yd219913@alibaba-inc.com>
(cherry picked from commit dc9b18d119)

Co-authored-by: Somefive <yd219913@alibaba-inc.com>
2022-07-08 19:09:55 +08:00
github-actions[bot]
ec5159c2ca Fix: disable apprev status update when apprev disabled (#4338)
Signed-off-by: Somefive <yd219913@alibaba-inc.com>
(cherry picked from commit b4c8e3265a)

Co-authored-by: Somefive <yd219913@alibaba-inc.com>
2022-07-07 15:51:35 +08:00
github-actions[bot]
a7b2b221e0 [Backport release-1.4] Fix: more cluster system info range. (#4333)
* more collect info

Signed-off-by: 楚岳 <wangyike.wyk@alibaba-inc.com>
(cherry picked from commit b27764e072)

* fix comments

Signed-off-by: 楚岳 <wangyike.wyk@alibaba-inc.com>
(cherry picked from commit 57805aa844)

Co-authored-by: 楚岳 <wangyike.wyk@alibaba-inc.com>
2022-07-06 16:15:31 +08:00
github-actions[bot]
caa495a5d9 [Backport release-1.4] Fix: ref-objects parameter with invalid field definition (#4330)
* fix: ref-objects parameter with invalid field definition

which cause validating webhook failed when use ref-objects component

Signed-off-by: jiangshantao <jiangshantao-dbg@qq.com>
(cherry picked from commit 13f328f362)

* fix: run make reviewable

Signed-off-by: jiangshantao <jiangshantao-dbg@qq.com>
(cherry picked from commit e09410af90)

Co-authored-by: jst <jst@meitu.com>
2022-07-06 14:18:17 +08:00
90 changed files with 1083 additions and 321 deletions

View File

@@ -55,7 +55,7 @@ jobs:
apiserver-unit-tests:
runs-on: aliyun
runs-on: aliyun-legacy
needs: [ detect-noop,set-k8s-matrix ]
if: needs.detect-noop.outputs.noop != 'true'
strategy:

89
.github/workflows/chart.yaml vendored Normal file
View File

@@ -0,0 +1,89 @@
name: Publish Chart
on:
push:
tags:
- "v*"
workflow_dispatch: { }
env:
BUCKET: ${{ secrets.OSS_BUCKET }}
ENDPOINT: ${{ secrets.OSS_ENDPOINT }}
ACCESS_KEY: ${{ secrets.OSS_ACCESS_KEY }}
ACCESS_KEY_SECRET: ${{ secrets.OSS_ACCESS_KEY_SECRET }}
ARTIFACT_HUB_REPOSITORY_ID: ${{ secrets.ARTIFACT_HUB_REPOSITORY_ID }}
jobs:
publish-charts:
env:
HELM_CHARTS_DIR: charts
HELM_CHART: charts/vela-core
MINIMAL_HELM_CHART: charts/vela-minimal
LEGACY_HELM_CHART: legacy/charts/vela-core-legacy
VELA_ROLLOUT_HELM_CHART: runtime/rollout/charts
LOCAL_OSS_DIRECTORY: .oss/
runs-on: ubuntu-20.04
steps:
- uses: actions/checkout@master
- name: Get git revision
id: vars
shell: bash
run: |
echo "::set-output name=git_revision::$(git rev-parse --short HEAD)"
- name: Install Helm
uses: azure/setup-helm@v1
with:
version: v3.4.0
- name: Setup node
uses: actions/setup-node@v2
with:
node-version: '14'
- name: Generate helm doc
run: |
make helm-doc-gen
- name: Prepare legacy chart
run: |
rsync -r $LEGACY_HELM_CHART $HELM_CHARTS_DIR
rsync -r $HELM_CHART/* $LEGACY_HELM_CHART --exclude=Chart.yaml --exclude=crds
- name: Prepare vela chart
run: |
rsync -r $VELA_ROLLOUT_HELM_CHART $HELM_CHARTS_DIR
- name: Get the version
id: get_version
run: |
VERSION=${GITHUB_REF#refs/tags/}
echo ::set-output name=VERSION::${VERSION}
- name: Tag helm chart image
run: |
image_tag=${{ steps.get_version.outputs.VERSION }}
chart_version=${{ steps.get_version.outputs.VERSION }}
sed -i "s/latest/${image_tag}/g" $HELM_CHART/values.yaml
sed -i "s/latest/${image_tag}/g" $MINIMAL_HELM_CHART/values.yaml
sed -i "s/latest/${image_tag}/g" $LEGACY_HELM_CHART/values.yaml
sed -i "s/latest/${image_tag}/g" $VELA_ROLLOUT_HELM_CHART/values.yaml
chart_smever=${chart_version#"v"}
sed -i "s/0.1.0/$chart_smever/g" $HELM_CHART/Chart.yaml
sed -i "s/0.1.0/$chart_smever/g" $MINIMAL_HELM_CHART/Chart.yaml
sed -i "s/0.1.0/$chart_smever/g" $LEGACY_HELM_CHART/Chart.yaml
sed -i "s/0.1.0/$chart_smever/g" $VELA_ROLLOUT_HELM_CHART/Chart.yaml
- name: Install ossutil
run: wget http://gosspublic.alicdn.com/ossutil/1.7.0/ossutil64 && chmod +x ossutil64 && mv ossutil64 ossutil
- name: Configure Alibaba Cloud OSSUTIL
run: ./ossutil --config-file .ossutilconfig config -i ${ACCESS_KEY} -k ${ACCESS_KEY_SECRET} -e ${ENDPOINT} -c .ossutilconfig
- name: sync cloud to local
run: ./ossutil --config-file .ossutilconfig sync oss://$BUCKET/core $LOCAL_OSS_DIRECTORY
- name: add artifacthub stuff to the repo
run: |
rsync $HELM_CHART/README.md $LEGACY_HELM_CHART/README.md
rsync $HELM_CHART/README.md $VELA_ROLLOUT_HELM_CHART/README.md
sed -i "s/ARTIFACT_HUB_REPOSITORY_ID/$ARTIFACT_HUB_REPOSITORY_ID/g" hack/artifacthub/artifacthub-repo.yml
rsync hack/artifacthub/artifacthub-repo.yml $LOCAL_OSS_DIRECTORY
- name: Package helm charts
run: |
helm package $HELM_CHART --destination $LOCAL_OSS_DIRECTORY
helm package $MINIMAL_HELM_CHART --destination $LOCAL_OSS_DIRECTORY
helm package $LEGACY_HELM_CHART --destination $LOCAL_OSS_DIRECTORY
helm package $VELA_ROLLOUT_HELM_CHART --destination $LOCAL_OSS_DIRECTORY
helm repo index --url https://$BUCKET.$ENDPOINT/core $LOCAL_OSS_DIRECTORY
- name: sync local to cloud
run: ./ossutil --config-file .ossutilconfig sync $LOCAL_OSS_DIRECTORY oss://$BUCKET/core -f

View File

@@ -53,7 +53,7 @@ jobs:
e2e-multi-cluster-tests:
runs-on: aliyun
runs-on: aliyun-legacy
needs: [ detect-noop,set-k8s-matrix ]
if: needs.detect-noop.outputs.noop != 'true'
strategy:
@@ -97,7 +97,9 @@ jobs:
kubectl cluster-info
- name: Load Image to kind cluster (Hub)
run: make kind-load
run: |
make kind-load
make kind-load-runtime-cluster
- name: Cleanup for e2e tests
run: |

View File

@@ -52,7 +52,7 @@ jobs:
fi
e2e-rollout-tests:
runs-on: aliyun
runs-on: aliyun-legacy
needs: [ detect-noop,set-k8s-matrix ]
if: needs.detect-noop.outputs.noop != 'true'
strategy:

View File

@@ -52,7 +52,7 @@ jobs:
fi
e2e-tests:
runs-on: aliyun
runs-on: aliyun-legacy
needs: [ detect-noop,set-k8s-matrix ]
if: needs.detect-noop.outputs.noop != 'true'
strategy:

View File

@@ -98,7 +98,7 @@ jobs:
version: ${{ env.GOLANGCI_VERSION }}
check-diff:
runs-on: aliyun
runs-on: aliyun-legacy
needs: detect-noop
if: needs.detect-noop.outputs.noop != 'true'

View File

@@ -8,11 +8,8 @@ on:
workflow_dispatch: {}
env:
BUCKET: ${{ secrets.OSS_BUCKET }}
ENDPOINT: ${{ secrets.OSS_ENDPOINT }}
ACCESS_KEY: ${{ secrets.OSS_ACCESS_KEY }}
ACCESS_KEY_SECRET: ${{ secrets.OSS_ACCESS_KEY_SECRET }}
ARTIFACT_HUB_REPOSITORY_ID: ${{ secrets.ARTIFACT_HUB_REPOSITORY_ID }}
jobs:
publish-core-images:
@@ -171,90 +168,6 @@ jobs:
ghcr.io/${{ github.repository_owner }}/oamdev/vela-rollout:${{ steps.get_version.outputs.VERSION }}
${{ secrets.ACR_DOMAIN }}/oamdev/vela-rollout:${{ steps.get_version.outputs.VERSION }}
publish-charts:
env:
HELM_CHARTS_DIR: charts
HELM_CHART: charts/vela-core
MINIMAL_HELM_CHART: charts/vela-minimal
LEGACY_HELM_CHART: legacy/charts/vela-core-legacy
VELA_ROLLOUT_HELM_CHART: runtime/rollout/charts
LOCAL_OSS_DIRECTORY: .oss/
runs-on: ubuntu-20.04
steps:
- uses: actions/checkout@master
- name: Get git revision
id: vars
shell: bash
run: |
echo "::set-output name=git_revision::$(git rev-parse --short HEAD)"
- name: Install Helm
uses: azure/setup-helm@v1
with:
version: v3.4.0
- name: Setup node
uses: actions/setup-node@v2
with:
node-version: '14'
- name: Generate helm doc
run: |
make helm-doc-gen
- name: Prepare legacy chart
run: |
rsync -r $LEGACY_HELM_CHART $HELM_CHARTS_DIR
rsync -r $HELM_CHART/* $LEGACY_HELM_CHART --exclude=Chart.yaml --exclude=crds
- name: Prepare vela chart
run: |
rsync -r $VELA_ROLLOUT_HELM_CHART $HELM_CHARTS_DIR
- uses: oprypin/find-latest-tag@v1
with:
repository: oam-dev/kubevela
releases-only: true
id: latest_tag
- name: Tag helm chart image
run: |
latest_repo_tag=${{ steps.latest_tag.outputs.tag }}
sub="."
major="$(cut -d"$sub" -f1 <<<"$latest_repo_tag")"
minor="$(cut -d"$sub" -f2 <<<"$latest_repo_tag")"
patch="0"
current_repo_tag="$major.$minor.$patch"
image_tag=${GITHUB_REF#refs/tags/}
chart_version=$latest_repo_tag
if [[ ${GITHUB_REF} == "refs/heads/master" ]]; then
image_tag=latest
chart_version=${current_repo_tag}-nightly-build
fi
sed -i "s/latest/${image_tag}/g" $HELM_CHART/values.yaml
sed -i "s/latest/${image_tag}/g" $MINIMAL_HELM_CHART/values.yaml
sed -i "s/latest/${image_tag}/g" $LEGACY_HELM_CHART/values.yaml
sed -i "s/latest/${image_tag}/g" $VELA_ROLLOUT_HELM_CHART/values.yaml
chart_smever=${chart_version#"v"}
sed -i "s/0.1.0/$chart_smever/g" $HELM_CHART/Chart.yaml
sed -i "s/0.1.0/$chart_smever/g" $MINIMAL_HELM_CHART/Chart.yaml
sed -i "s/0.1.0/$chart_smever/g" $LEGACY_HELM_CHART/Chart.yaml
sed -i "s/0.1.0/$chart_smever/g" $VELA_ROLLOUT_HELM_CHART/Chart.yaml
- name: Install ossutil
run: wget http://gosspublic.alicdn.com/ossutil/1.7.0/ossutil64 && chmod +x ossutil64 && mv ossutil64 ossutil
- name: Configure Alibaba Cloud OSSUTIL
run: ./ossutil --config-file .ossutilconfig config -i ${ACCESS_KEY} -k ${ACCESS_KEY_SECRET} -e ${ENDPOINT} -c .ossutilconfig
- name: sync cloud to local
run: ./ossutil --config-file .ossutilconfig sync oss://$BUCKET/core $LOCAL_OSS_DIRECTORY
- name: add artifacthub stuff to the repo
run: |
rsync $HELM_CHART/README.md $LEGACY_HELM_CHART/README.md
rsync $HELM_CHART/README.md $VELA_ROLLOUT_HELM_CHART/README.md
sed -i "s/ARTIFACT_HUB_REPOSITORY_ID/$ARTIFACT_HUB_REPOSITORY_ID/g" hack/artifacthub/artifacthub-repo.yml
rsync hack/artifacthub/artifacthub-repo.yml $LOCAL_OSS_DIRECTORY
- name: Package helm charts
run: |
helm package $HELM_CHART --destination $LOCAL_OSS_DIRECTORY
helm package $MINIMAL_HELM_CHART --destination $LOCAL_OSS_DIRECTORY
helm package $LEGACY_HELM_CHART --destination $LOCAL_OSS_DIRECTORY
helm package $VELA_ROLLOUT_HELM_CHART --destination $LOCAL_OSS_DIRECTORY
helm repo index --url https://$BUCKET.$ENDPOINT/core $LOCAL_OSS_DIRECTORY
- name: sync local to cloud
run: ./ossutil --config-file .ossutilconfig sync $LOCAL_OSS_DIRECTORY oss://$BUCKET/core -f
publish-capabilities:
env:
CAPABILITY_BUCKET: kubevela-registry

View File

@@ -4,7 +4,7 @@ on:
- cron: '* * * * *'
jobs:
clean-image:
runs-on: aliyun
runs-on: aliyun-legacy
steps:
- name: Cleanup image
run: docker image prune -f

View File

@@ -83,15 +83,17 @@ endif
# load docker image to the kind cluster
kind-load: kind-load-runtime-cluster
kind-load: kind-load-rollout
docker build -t $(VELA_CORE_TEST_IMAGE) -f Dockerfile.e2e .
kind load docker-image $(VELA_CORE_TEST_IMAGE) || { echo >&2 "kind not installed or error loading image: $(VELA_CORE_TEST_IMAGE)"; exit 1; }
kind-load-runtime-cluster:
kind-load-rollout:
/bin/sh hack/e2e/build_runtime_rollout.sh
docker build -t $(VELA_RUNTIME_ROLLOUT_TEST_IMAGE) -f runtime/rollout/e2e/Dockerfile.e2e runtime/rollout/e2e/
rm -rf runtime/rollout/e2e/tmp
kind load docker-image $(VELA_RUNTIME_ROLLOUT_TEST_IMAGE) || { echo >&2 "kind not installed or error loading image: $(VELA_RUNTIME_ROLLOUT_TEST_IMAGE)"; exit 1; }
kind-load-runtime-cluster:
kind load docker-image $(VELA_RUNTIME_ROLLOUT_TEST_IMAGE) --name=$(RUNTIME_CLUSTER_NAME) || { echo >&2 "kind not installed or error loading image: $(VELA_RUNTIME_ROLLOUT_TEST_IMAGE)"; exit 1; }
# Run tests

View File

@@ -43,7 +43,7 @@ spec:
volumeMounts: [{
name: parameter.mountName
mountPath: parameter.initMountPath
}]
}] + parameter.extraVolumeMounts
}]
// +patchKey=name
volumes: [{
@@ -97,5 +97,13 @@ spec:
// +usage=Specify the mount path of init container
initMountPath: string
// +usage=Specify the extra volume mounts for the init container
extraVolumeMounts: [...{
// +usage=The name of the volume to be mounted
name: string
// +usage=The mountPath for mount in the init container
mountPath: string
}]
}

View File

@@ -14,12 +14,18 @@ spec:
cue:
template: |
#K8sObject: {
apiVersion: string
kind: string
metadata: {
name: string
...
}
// +usage=The resource type for the Kubernetes objects
resource?: string
// +usage=The group name for the Kubernetes objects
group?: string
// +usage=If specified, fetch the Kubernetes objects with the name, exclusive to labelSelector
name?: string
// +usage=If specified, fetch the Kubernetes objects from the namespace. Otherwise, fetch from the application's namespace.
namespace?: string
// +usage=If specified, fetch the Kubernetes objects from the cluster. Otherwise, fetch from the local cluster.
cluster?: string
// +usage=If specified, fetch the Kubernetes objects according to the label selector, exclusive to name
labelSelector?: [string]: string
...
}
output: parameter.objects[0]
@@ -30,7 +36,12 @@ spec:
}
}
}
parameter: objects: [...#K8sObject]
parameter: {
// +usage=If specified, application will fetch native Kubernetes objects according to the object description
objects?: [...#K8sObject]
// +usage=If specified, the objects in the urls will be loaded.
urls?: [...string]
}
status:
customStatus: |-
if context.output.apiVersion == "apps/v1" && context.output.kind == "Deployment" {

View File

@@ -63,7 +63,7 @@ spec:
resources: p.resources
}
if p.resourceNames != _|_ {
resources: p.resourceNames
resourceNames: p.resourceNames
}
if p.nonResourceURLs != _|_ {
nonResourceURLs: p.nonResourceURLs
@@ -100,7 +100,7 @@ spec:
resources: p.resources
}
if p.resourceNames != _|_ {
resources: p.resourceNames
resourceNames: p.resourceNames
}
if p.nonResourceURLs != _|_ {
nonResourceURLs: p.nonResourceURLs

View File

@@ -82,6 +82,11 @@ spec:
// +usage=The key of the config map to select from. Must be a valid secret key
key: string
}
// +usage=Specify the field reference for env
fieldRef?: {
// +usage=Specify the field path for env
fieldPath: string
}
}
}]

View File

@@ -64,6 +64,9 @@ spec:
{
name: "pvc-" + v.name
mountPath: v.mountPath
if v.subPath != _|_ {
subPath: v.subPath
}
}
}
},
@@ -73,6 +76,9 @@ spec:
{
name: "configmap-" + v.name
mountPath: v.mountPath
if v.subPath != _|_ {
subPath: v.subPath
}
}
},
] | []
@@ -103,6 +109,9 @@ spec:
{
name: "secret-" + v.name
mountPath: v.mountPath
if v.subPath != _|_ {
subPath: v.subPath
}
}
},
] | []
@@ -133,6 +142,9 @@ spec:
{
name: "emptydir-" + v.name
mountPath: v.mountPath
if v.subPath != _|_ {
subPath: v.subPath
}
}
},
] | []
@@ -141,12 +153,28 @@ spec:
{
name: "pvc-" + v.name
devicePath: v.mountPath
if v.subPath != _|_ {
subPath: v.subPath
}
}
},
] | []
volumesList: pvcVolumesList + configMapVolumesList + secretVolumesList + emptyDirVolumesList
deDupVolumesArray: [
for val in [
for i, vi in volumesList {
for j, vj in volumesList if j < i && vi.name == vj.name {
_ignore: true
}
vi
},
] if val._ignore == _|_ {
val
},
]
patch: spec: template: spec: {
// +patchKey=name
volumes: pvcVolumesList + configMapVolumesList + secretVolumesList + emptyDirVolumesList
volumes: deDupVolumesArray
containers: [{
// +patchKey=name
@@ -234,6 +262,7 @@ spec:
name: string
mountOnly: *false | bool
mountPath: string
subPath?: string
volumeMode: *"Filesystem" | string
volumeName?: string
accessModes: *["ReadWriteOnce"] | [...string]
@@ -275,6 +304,7 @@ spec:
configMapKey: string
}]
mountPath?: string
subPath?: string
defaultMode: *420 | int
readOnly: *false | bool
data?: {...}
@@ -298,6 +328,7 @@ spec:
secretKey: string
}]
mountPath?: string
subPath?: string
defaultMode: *420 | int
readOnly: *false | bool
stringData?: {...}
@@ -313,6 +344,7 @@ spec:
emptyDir?: [...{
name: string
mountPath: string
subPath?: string
medium: *"" | "Memory"
}]
}

View File

@@ -20,7 +20,10 @@ spec:
for v in parameter.volumeMounts.pvc {
{
mountPath: v.mountPath
name: v.name
if v.subPath != _|_ {
subPath: v.subPath
}
name: v.name
}
},
] | []
@@ -29,7 +32,10 @@ spec:
for v in parameter.volumeMounts.configMap {
{
mountPath: v.mountPath
name: v.name
if v.subPath != _|_ {
subPath: v.subPath
}
name: v.name
}
},
] | []
@@ -38,7 +44,10 @@ spec:
for v in parameter.volumeMounts.secret {
{
mountPath: v.mountPath
name: v.name
if v.subPath != _|_ {
subPath: v.subPath
}
name: v.name
}
},
] | []
@@ -47,7 +56,10 @@ spec:
for v in parameter.volumeMounts.emptyDir {
{
mountPath: v.mountPath
name: v.name
if v.subPath != _|_ {
subPath: v.subPath
}
name: v.name
}
},
] | []
@@ -56,7 +68,10 @@ spec:
for v in parameter.volumeMounts.hostPath {
{
mountPath: v.mountPath
name: v.name
if v.subPath != _|_ {
subPath: v.subPath
}
name: v.name
}
},
] | []
@@ -119,6 +134,19 @@ spec:
},
] | []
}
volumesList: volumesArray.pvc + volumesArray.configMap + volumesArray.secret + volumesArray.emptyDir + volumesArray.hostPath
deDupVolumesArray: [
for val in [
for i, vi in volumesList {
for j, vj in volumesList if j < i && vi.name == vj.name {
_ignore: true
}
vi
},
] if val._ignore == _|_ {
val
},
]
output: {
apiVersion: "apps/v1"
kind: "Deployment"
@@ -262,7 +290,7 @@ spec:
}
if parameter["volumeMounts"] != _|_ {
volumes: volumesArray.pvc + volumesArray.configMap + volumesArray.secret + volumesArray.emptyDir + volumesArray.hostPath
volumes: deDupVolumesArray
}
}
}
@@ -375,6 +403,7 @@ spec:
pvc?: [...{
name: string
mountPath: string
subPath?: string
// +usage=The name of the PVC
claimName: string
}]
@@ -382,6 +411,7 @@ spec:
configMap?: [...{
name: string
mountPath: string
subPath?: string
defaultMode: *420 | int
cmName: string
items?: [...{
@@ -394,6 +424,7 @@ spec:
secret?: [...{
name: string
mountPath: string
subPath?: string
defaultMode: *420 | int
secretName: string
items?: [...{
@@ -406,12 +437,14 @@ spec:
emptyDir?: [...{
name: string
mountPath: string
subPath?: string
medium: *"" | "Memory"
}]
// +usage=Mount HostPath type volume
hostPath?: [...{
name: string
mountPath: string
subPath?: string
path: string
}]
}

View File

@@ -43,7 +43,7 @@ spec:
volumeMounts: [{
name: parameter.mountName
mountPath: parameter.initMountPath
}]
}] + parameter.extraVolumeMounts
}]
// +patchKey=name
volumes: [{
@@ -97,5 +97,13 @@ spec:
// +usage=Specify the mount path of init container
initMountPath: string
// +usage=Specify the extra volume mounts for the init container
extraVolumeMounts: [...{
// +usage=The name of the volume to be mounted
name: string
// +usage=The mountPath for mount in the init container
mountPath: string
}]
}

View File

@@ -14,12 +14,18 @@ spec:
cue:
template: |
#K8sObject: {
apiVersion: string
kind: string
metadata: {
name: string
...
}
// +usage=The resource type for the Kubernetes objects
resource?: string
// +usage=The group name for the Kubernetes objects
group?: string
// +usage=If specified, fetch the Kubernetes objects with the name, exclusive to labelSelector
name?: string
// +usage=If specified, fetch the Kubernetes objects from the namespace. Otherwise, fetch from the application's namespace.
namespace?: string
// +usage=If specified, fetch the Kubernetes objects from the cluster. Otherwise, fetch from the local cluster.
cluster?: string
// +usage=If specified, fetch the Kubernetes objects according to the label selector, exclusive to name
labelSelector?: [string]: string
...
}
output: parameter.objects[0]
@@ -30,7 +36,12 @@ spec:
}
}
}
parameter: objects: [...#K8sObject]
parameter: {
// +usage=If specified, application will fetch native Kubernetes objects according to the object description
objects?: [...#K8sObject]
// +usage=If specified, the objects in the urls will be loaded.
urls?: [...string]
}
status:
customStatus: |-
if context.output.apiVersion == "apps/v1" && context.output.kind == "Deployment" {

View File

@@ -63,7 +63,7 @@ spec:
resources: p.resources
}
if p.resourceNames != _|_ {
resources: p.resourceNames
resourceNames: p.resourceNames
}
if p.nonResourceURLs != _|_ {
nonResourceURLs: p.nonResourceURLs
@@ -100,7 +100,7 @@ spec:
resources: p.resources
}
if p.resourceNames != _|_ {
resources: p.resourceNames
resourceNames: p.resourceNames
}
if p.nonResourceURLs != _|_ {
nonResourceURLs: p.nonResourceURLs

View File

@@ -82,6 +82,11 @@ spec:
// +usage=The key of the config map to select from. Must be a valid secret key
key: string
}
// +usage=Specify the field reference for env
fieldRef?: {
// +usage=Specify the field path for env
fieldPath: string
}
}
}]

View File

@@ -64,6 +64,9 @@ spec:
{
name: "pvc-" + v.name
mountPath: v.mountPath
if v.subPath != _|_ {
subPath: v.subPath
}
}
}
},
@@ -73,6 +76,9 @@ spec:
{
name: "configmap-" + v.name
mountPath: v.mountPath
if v.subPath != _|_ {
subPath: v.subPath
}
}
},
] | []
@@ -103,6 +109,9 @@ spec:
{
name: "secret-" + v.name
mountPath: v.mountPath
if v.subPath != _|_ {
subPath: v.subPath
}
}
},
] | []
@@ -133,6 +142,9 @@ spec:
{
name: "emptydir-" + v.name
mountPath: v.mountPath
if v.subPath != _|_ {
subPath: v.subPath
}
}
},
] | []
@@ -141,12 +153,28 @@ spec:
{
name: "pvc-" + v.name
devicePath: v.mountPath
if v.subPath != _|_ {
subPath: v.subPath
}
}
},
] | []
volumesList: pvcVolumesList + configMapVolumesList + secretVolumesList + emptyDirVolumesList
deDupVolumesArray: [
for val in [
for i, vi in volumesList {
for j, vj in volumesList if j < i && vi.name == vj.name {
_ignore: true
}
vi
},
] if val._ignore == _|_ {
val
},
]
patch: spec: template: spec: {
// +patchKey=name
volumes: pvcVolumesList + configMapVolumesList + secretVolumesList + emptyDirVolumesList
volumes: deDupVolumesArray
containers: [{
// +patchKey=name
@@ -234,6 +262,7 @@ spec:
name: string
mountOnly: *false | bool
mountPath: string
subPath?: string
volumeMode: *"Filesystem" | string
volumeName?: string
accessModes: *["ReadWriteOnce"] | [...string]
@@ -275,6 +304,7 @@ spec:
configMapKey: string
}]
mountPath?: string
subPath?: string
defaultMode: *420 | int
readOnly: *false | bool
data?: {...}
@@ -298,6 +328,7 @@ spec:
secretKey: string
}]
mountPath?: string
subPath?: string
defaultMode: *420 | int
readOnly: *false | bool
stringData?: {...}
@@ -313,6 +344,7 @@ spec:
emptyDir?: [...{
name: string
mountPath: string
subPath?: string
medium: *"" | "Memory"
}]
}

View File

@@ -20,7 +20,10 @@ spec:
for v in parameter.volumeMounts.pvc {
{
mountPath: v.mountPath
name: v.name
if v.subPath != _|_ {
subPath: v.subPath
}
name: v.name
}
},
] | []
@@ -29,7 +32,10 @@ spec:
for v in parameter.volumeMounts.configMap {
{
mountPath: v.mountPath
name: v.name
if v.subPath != _|_ {
subPath: v.subPath
}
name: v.name
}
},
] | []
@@ -38,7 +44,10 @@ spec:
for v in parameter.volumeMounts.secret {
{
mountPath: v.mountPath
name: v.name
if v.subPath != _|_ {
subPath: v.subPath
}
name: v.name
}
},
] | []
@@ -47,7 +56,10 @@ spec:
for v in parameter.volumeMounts.emptyDir {
{
mountPath: v.mountPath
name: v.name
if v.subPath != _|_ {
subPath: v.subPath
}
name: v.name
}
},
] | []
@@ -56,7 +68,10 @@ spec:
for v in parameter.volumeMounts.hostPath {
{
mountPath: v.mountPath
name: v.name
if v.subPath != _|_ {
subPath: v.subPath
}
name: v.name
}
},
] | []
@@ -119,6 +134,19 @@ spec:
},
] | []
}
volumesList: volumesArray.pvc + volumesArray.configMap + volumesArray.secret + volumesArray.emptyDir + volumesArray.hostPath
deDupVolumesArray: [
for val in [
for i, vi in volumesList {
for j, vj in volumesList if j < i && vi.name == vj.name {
_ignore: true
}
vi
},
] if val._ignore == _|_ {
val
},
]
output: {
apiVersion: "apps/v1"
kind: "Deployment"
@@ -262,7 +290,7 @@ spec:
}
if parameter["volumeMounts"] != _|_ {
volumes: volumesArray.pvc + volumesArray.configMap + volumesArray.secret + volumesArray.emptyDir + volumesArray.hostPath
volumes: deDupVolumesArray
}
}
}
@@ -375,6 +403,7 @@ spec:
pvc?: [...{
name: string
mountPath: string
subPath?: string
// +usage=The name of the PVC
claimName: string
}]
@@ -382,6 +411,7 @@ spec:
configMap?: [...{
name: string
mountPath: string
subPath?: string
defaultMode: *420 | int
cmName: string
items?: [...{
@@ -394,6 +424,7 @@ spec:
secret?: [...{
name: string
mountPath: string
subPath?: string
defaultMode: *420 | int
secretName: string
items?: [...{
@@ -406,12 +437,14 @@ spec:
emptyDir?: [...{
name: string
mountPath: string
subPath?: string
medium: *"" | "Memory"
}]
// +usage=Mount HostPath type volume
hostPath?: [...{
name: string
mountPath: string
subPath?: string
path: string
}]
}

View File

@@ -19,6 +19,7 @@ package main
import (
"context"
"errors"
goflag "flag"
"fmt"
"io"
"net/http"
@@ -138,6 +139,7 @@ func main() {
flag.DurationVar(&clusterMetricsInterval, "cluster-metrics-interval", 15*time.Second, "The interval that ClusterMetricsMgr will collect metrics from clusters, default value is 15 seconds.")
flag.BoolVar(&controllerArgs.EnableCompatibility, "enable-asi-compatibility", false, "enable compatibility for asi")
flag.BoolVar(&controllerArgs.IgnoreAppWithoutControllerRequirement, "ignore-app-without-controller-version", false, "If true, application controller will not process the app without 'app.oam.dev/controller-version-require' annotation")
flag.BoolVar(&controllerArgs.IgnoreDefinitionWithoutControllerRequirement, "ignore-definition-without-controller-version", false, "If true, trait/component/workflowstep definition controller will not process the definition without 'definition.oam.dev/controller-version-require' annotation")
standardcontroller.AddOptimizeFlags()
standardcontroller.AddAdmissionFlags()
flag.IntVar(&resourcekeeper.MaxDispatchConcurrent, "max-dispatch-concurrent", 10, "Set the max dispatch concurrent number, default is 10")
@@ -146,9 +148,10 @@ func main() {
flag.IntVar(&custom.MaxWorkflowStepErrorRetryTimes, "max-workflow-step-error-retry-times", 10, "Set the max workflow step error retry times, default is 10")
utilfeature.DefaultMutableFeatureGate.AddFlag(flag.CommandLine)
flag.Parse()
// setup logging
klog.InitFlags(nil)
flag.CommandLine.AddGoFlagSet(goflag.CommandLine)
flag.Parse()
if logDebug {
_ = flag.Set("v", strconv.Itoa(int(commonconfig.LogDebug)))
}

View File

@@ -25,3 +25,7 @@ spec:
- name: my-mount
mountPath: /test
claimName: myclaim
- name: my-mount
mountPath: /test2
subPath: /sub
claimName: myclaim

View File

@@ -16,8 +16,9 @@ spec:
pvc:
- name: test1
mountPath: /test/mount/pvc
- name: test2
- name: test1
mountPath: /test/mount2/pvc
subPath: /sub
configMap:
- name: test1
mountPath: /test/mount/cm

4
go.mod
View File

@@ -281,9 +281,9 @@ require (
go.uber.org/atomic v1.7.0 // indirect
go.uber.org/multierr v1.6.0 // indirect
golang.org/x/mod v0.6.0-dev.0.20220106191415-9b9b3d81d5e3 // indirect
golang.org/x/net v0.0.0-20220516155154-20f960328961 // indirect
golang.org/x/net v0.0.0-20220906165146-f3363e06e74c // indirect
golang.org/x/sync v0.0.0-20220513210516-0976fa681c29 // indirect
golang.org/x/sys v0.0.0-20220513210249-45d2b4557a2a // indirect
golang.org/x/sys v0.0.0-20220728004956-3c1f35247d10 // indirect
golang.org/x/text v0.3.7 // indirect
golang.org/x/time v0.0.0-20220224211638-0e9765cccd65 // indirect
golang.org/x/xerrors v0.0.0-20220411194840-2f41105eb62f // indirect

6
go.sum
View File

@@ -2371,8 +2371,9 @@ golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su
golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
golang.org/x/net v0.0.0-20220325170049-de3da57026de/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
golang.org/x/net v0.0.0-20220412020605-290c469a71a5/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
golang.org/x/net v0.0.0-20220516155154-20f960328961 h1:+W/iTMPG0EL7aW+/atntZwZrvSRIj3m3yX414dSULUU=
golang.org/x/net v0.0.0-20220516155154-20f960328961/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
golang.org/x/net v0.0.0-20220906165146-f3363e06e74c h1:yKufUcDwucU5urd+50/Opbt4AYpqthk7wHpHok8f1lo=
golang.org/x/net v0.0.0-20220906165146-f3363e06e74c/go.mod h1:YDH+HFinaLZZlnHAfSS6ZXJJ9M9t4Dl22yv3iI2vPwk=
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
golang.org/x/oauth2 v0.0.0-20181106182150-f42d05182288/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
@@ -2557,8 +2558,9 @@ golang.org/x/sys v0.0.0-20220227234510-4e6760a101f9/go.mod h1:oPkhp1MJrh7nUepCBc
golang.org/x/sys v0.0.0-20220319134239-a9b59b0215f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220328115105-d36c6a25d886/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220412211240-33da011f77ad/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220513210249-45d2b4557a2a h1:N2T1jUrTQE9Re6TFF5PhvEHXHCguynGhKjWVsIUt5cY=
golang.org/x/sys v0.0.0-20220513210249-45d2b4557a2a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220728004956-3c1f35247d10 h1:WIoqL4EROvwiPdUtaip4VcDdpZ4kha7wBWZrbVKCIZg=
golang.org/x/sys v0.0.0-20220728004956-3c1f35247d10/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
golang.org/x/term v0.0.0-20210220032956-6a3ed077a48d/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=

View File

@@ -77,7 +77,7 @@ ifeq (, $(shell which readme-generator))
@{ \
set -e ;\
echo 'installing readme-generator-for-helm' ;\
npm install -g readme-generator-for-helm ;\
npm install -g @bitnami/readme-generator-for-helm ;\
}
else
@$(OK) readme-generator-for-helm is already installed

View File

@@ -86,7 +86,6 @@ var _ = Describe("test FindWholeAddonPackagesFromRegistry", func() {
Expect(res).To(HaveLen(1))
Expect(res[0].Name).To(Equal("velaux"))
Expect(res[0].InstallPackage).ToNot(BeNil())
Expect(res[0].APISchema).ToNot(BeNil())
})
It("should return one valid result, matching one registry", func() {
res, err := FindWholeAddonPackagesFromRegistry(context.Background(), k8sClient, []string{"velaux"}, []string{"KubeVela"})
@@ -94,7 +93,6 @@ var _ = Describe("test FindWholeAddonPackagesFromRegistry", func() {
Expect(res).To(HaveLen(1))
Expect(res[0].Name).To(Equal("velaux"))
Expect(res[0].InstallPackage).ToNot(BeNil())
Expect(res[0].APISchema).ToNot(BeNil())
})
})
@@ -113,10 +111,8 @@ var _ = Describe("test FindWholeAddonPackagesFromRegistry", func() {
Expect(res).To(HaveLen(2))
Expect(res[0].Name).To(Equal("velaux"))
Expect(res[0].InstallPackage).ToNot(BeNil())
Expect(res[0].APISchema).ToNot(BeNil())
Expect(res[1].Name).To(Equal("traefik"))
Expect(res[1].InstallPackage).ToNot(BeNil())
Expect(res[1].APISchema).ToNot(BeNil())
})
})
@@ -127,7 +123,6 @@ var _ = Describe("test FindWholeAddonPackagesFromRegistry", func() {
Expect(res).To(HaveLen(1))
Expect(res[0].Name).To(Equal("velaux"))
Expect(res[0].InstallPackage).ToNot(BeNil())
Expect(res[0].APISchema).ToNot(BeNil())
})
})
})

View File

@@ -32,6 +32,7 @@ const (
// SystemInfo systemInfo model
type SystemInfo struct {
BaseModel
SignedKey string `json:"signedKey"`
InstallID string `json:"installID"`
EnableCollection bool `json:"enableCollection"`
LoginType string `json:"loginType"`

View File

@@ -57,7 +57,8 @@ const (
GrantTypeRefresh = "refresh"
)
var signedKey = ""
// signedKey is the signed key of JWT
var signedKey string
// AuthenticationService is the service of authentication
type AuthenticationService interface {

View File

@@ -63,6 +63,7 @@ func (u systemInfoServiceImpl) Get(ctx context.Context) (*model.SystemInfo, erro
}
return info, nil
}
info.SignedKey = rand.String(32)
installID := rand.String(16)
info.InstallID = installID
info.EnableCollection = true
@@ -159,7 +160,7 @@ func (u systemInfoServiceImpl) Init(ctx context.Context) error {
if err != nil {
return err
}
signedKey = info.InstallID
signedKey = info.SignedKey
_, err = initDexConfig(ctx, u.KubeClient, "http://velaux.com")
return err
}

View File

@@ -322,7 +322,17 @@ func genClusterCountInfo(num int) string {
return "<10"
case num < 50:
return "<50"
case num < 100:
return "<100"
case num < 150:
return "<150"
case num < 200:
return "<200"
case num < 300:
return "<300"
case num < 500:
return "<500"
default:
return ">=50"
return ">=500"
}
}

View File

@@ -211,8 +211,28 @@ func TestGenClusterCountInfo(t *testing.T) {
res: "<50",
},
{
count: 100,
res: ">=50",
count: 90,
res: "<100",
},
{
count: 137,
res: "<150",
},
{
count: 170,
res: "<200",
},
{
count: 270,
res: "<300",
},
{
count: 400,
res: "<500",
},
{
count: 520,
res: ">=500",
},
}
for _, testcase := range testcases {

View File

@@ -82,8 +82,8 @@ func (c Condition) Validate() error {
if c.JSONKey == "" {
return fmt.Errorf("the json key of the condition can not be empty")
}
if c.Action != "enable" && c.Action != "disable" {
return fmt.Errorf("the action of the condition must be enable or disable")
if c.Action != "enable" && c.Action != "disable" && c.Action != "" {
return fmt.Errorf("the action of the condition only supports enable, disable or leave it empty")
}
if c.Op != "" && !StringsContain([]string{"==", "!=", "in"}, c.Op) {
return fmt.Errorf("the op of the condition must be `==` 、`!=` and `in`")

View File

@@ -97,21 +97,21 @@ func (wl *Workload) EvalContext(ctx process.Context) error {
}
// EvalStatus eval workload status
func (wl *Workload) EvalStatus(ctx process.Context, cli client.Client, ns string) (string, error) {
func (wl *Workload) EvalStatus(ctx process.Context, cli client.Client, accessor util.NamespaceAccessor) (string, error) {
// if the standard workload is managed by trait always return empty message
if wl.SkipApplyWorkload {
return "", nil
}
return wl.engine.Status(ctx, cli, ns, wl.FullTemplate.CustomStatus, wl.Params)
return wl.engine.Status(ctx, cli, accessor, wl.FullTemplate.CustomStatus, wl.Params)
}
// EvalHealth eval workload health check
func (wl *Workload) EvalHealth(ctx process.Context, client client.Client, namespace string) (bool, error) {
func (wl *Workload) EvalHealth(ctx process.Context, client client.Client, accessor util.NamespaceAccessor) (bool, error) {
// if health of template is not set or standard workload is managed by trait always return true
if wl.FullTemplate.Health == "" || wl.SkipApplyWorkload {
return true, nil
}
return wl.engine.HealthCheck(ctx, client, namespace, wl.FullTemplate.Health)
return wl.engine.HealthCheck(ctx, client, accessor, wl.FullTemplate.Health)
}
// Scope defines the scope of workload
@@ -145,16 +145,16 @@ func (trait *Trait) EvalContext(ctx process.Context) error {
}
// EvalStatus eval trait status
func (trait *Trait) EvalStatus(ctx process.Context, cli client.Client, ns string) (string, error) {
return trait.engine.Status(ctx, cli, ns, trait.CustomStatusFormat, trait.Params)
func (trait *Trait) EvalStatus(ctx process.Context, cli client.Client, accessor util.NamespaceAccessor) (string, error) {
return trait.engine.Status(ctx, cli, accessor, trait.CustomStatusFormat, trait.Params)
}
// EvalHealth eval trait health check
func (trait *Trait) EvalHealth(ctx process.Context, client client.Client, namespace string) (bool, error) {
func (trait *Trait) EvalHealth(ctx process.Context, client client.Client, accessor util.NamespaceAccessor) (bool, error) {
if trait.FullTemplate.Health == "" {
return true, nil
}
return trait.engine.HealthCheck(ctx, client, namespace, trait.HealthCheckPolicy)
return trait.engine.HealthCheck(ctx, client, accessor, trait.HealthCheckPolicy)
}
// Appfile describes application

View File

@@ -365,6 +365,9 @@ func (p *Parser) parsePoliciesFromRevision(ctx context.Context, af *Appfile) (er
return err
}
for _, policy := range af.Policies {
if policy.Properties == nil && policy.Type != v1alpha1.DebugPolicyType {
return fmt.Errorf("policy %s named %s must not have empty properties", policy.Type, policy.Name)
}
switch policy.Type {
case v1alpha1.GarbageCollectPolicyType:
case v1alpha1.ApplyOncePolicyType:
@@ -390,6 +393,9 @@ func (p *Parser) parsePolicies(ctx context.Context, af *Appfile) (err error) {
return err
}
for _, policy := range af.Policies {
if policy.Properties == nil && policy.Type != v1alpha1.DebugPolicyType {
return fmt.Errorf("policy %s named %s must not have empty properties", policy.Type, policy.Name)
}
switch policy.Type {
case v1alpha1.GarbageCollectPolicyType:
case v1alpha1.ApplyOncePolicyType:

View File

@@ -243,6 +243,20 @@ spec:
image: "busybox"
`
const appfileYamlEmptyPolicy = `
apiVersion: core.oam.dev/v1beta1
kind: Application
metadata:
name: application-sample
namespace: default
spec:
components: []
policies:
- type: garbage-collect
name: somename
properties:
`
var _ = Describe("Test application parser", func() {
It("Test we can parse an application to an appFile", func() {
o := v1beta1.Application{}
@@ -282,6 +296,14 @@ var _ = Describe("Test application parser", func() {
Expect(err).ShouldNot(HaveOccurred())
_, err = NewApplicationParser(&tclient, dm, pd).GenerateAppFile(context.TODO(), &notfound)
Expect(err).Should(HaveOccurred())
By("app with empty policy")
emptyPolicy := v1beta1.Application{}
err = yaml.Unmarshal([]byte(appfileYamlEmptyPolicy), &emptyPolicy)
Expect(err).ShouldNot(HaveOccurred())
_, err = NewApplicationParser(&tclient, dm, pd).GenerateAppFile(context.TODO(), &emptyPolicy)
Expect(err).Should(HaveOccurred())
Expect(err.Error()).Should(ContainSubstring("have empty properties"))
})
})

View File

@@ -86,4 +86,7 @@ type Args struct {
// IgnoreAppWithoutControllerRequirement indicates that application controller will not process the app without 'app.oam.dev/controller-version-require' annotation.
IgnoreAppWithoutControllerRequirement bool
// IgnoreDefinitionWithoutControllerRequirement indicates that trait/component/workflowstep definition controller will not process the definition without 'definition.oam.dev/controller-version-require' annotation.
IgnoreDefinitionWithoutControllerRequirement bool
}

View File

@@ -136,7 +136,7 @@ func (r *Reconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Resu
if annotations := app.GetAnnotations(); annotations == nil || annotations[oam.AnnotationKubeVelaVersion] == "" {
metav1.SetMetaDataAnnotation(&app.ObjectMeta, oam.AnnotationKubeVelaVersion, version.VelaVersion)
}
logCtx.AddTag("publish_version", app.GetAnnotations()[oam.AnnotationKubeVelaVersion])
logCtx.AddTag("publish_version", app.GetAnnotations()[oam.AnnotationPublishVersion])
appParser := appfile.NewApplicationParser(r.Client, r.dm, r.pd)
handler, err := NewAppHandler(logCtx, r, app, appParser)
@@ -307,6 +307,11 @@ func (r *Reconciler) gcResourceTrackers(logCtx monitorContext.Context, handler *
}))
defer subCtx.Commit("finish gc resourceTrackers")
statusUpdater := r.updateStatus
if isPatch {
statusUpdater = r.patchStatus
}
var options []resourcekeeper.GCOption
if !gcOutdated {
options = append(options, resourcekeeper.DisableMarkStageGCOption{}, resourcekeeper.DisableGCComponentRevisionOption{}, resourcekeeper.DisableLegacyGCOption{})
@@ -314,8 +319,10 @@ func (r *Reconciler) gcResourceTrackers(logCtx monitorContext.Context, handler *
finished, waiting, err := handler.resourceKeeper.GarbageCollect(logCtx, options...)
if err != nil {
logCtx.Error(err, "Failed to gc resourcetrackers")
r.Recorder.Event(handler.app, event.Warning(velatypes.ReasonFailedGC, err))
return r.endWithNegativeCondition(logCtx, handler.app, condition.ReconcileError(err), phase)
cond := condition.Deleting()
cond.Message = fmt.Sprintf("error encountered during garbage collection: %s", err.Error())
handler.app.Status.SetConditions(cond)
return r.result(statusUpdater(logCtx, handler.app, phase)).ret()
}
if !finished {
logCtx.Info("GarbageCollecting resourcetrackers unfinished")
@@ -324,13 +331,13 @@ func (r *Reconciler) gcResourceTrackers(logCtx monitorContext.Context, handler *
cond.Message = fmt.Sprintf("Waiting for %s to delete. (At least %d resources are deleting.)", waiting[0].DisplayName(), len(waiting))
}
handler.app.Status.SetConditions(cond)
return r.result(r.patchStatus(logCtx, handler.app, phase)).requeue(baseGCBackoffWaitTime).ret()
return r.result(statusUpdater(logCtx, handler.app, phase)).requeue(baseGCBackoffWaitTime).ret()
}
logCtx.Info("GarbageCollected resourcetrackers")
if !isPatch {
return r.result(r.updateStatus(logCtx, handler.app, common.ApplicationRunningWorkflow)).ret()
phase = common.ApplicationRunningWorkflow
}
return r.result(r.patchStatus(logCtx, handler.app, phase)).ret()
return r.result(statusUpdater(logCtx, handler.app, phase)).ret()
}
type reconcileResult struct {

View File

@@ -38,6 +38,7 @@ import (
"github.com/oam-dev/kubevela/pkg/monitor/metrics"
"github.com/oam-dev/kubevela/pkg/multicluster"
"github.com/oam-dev/kubevela/pkg/oam"
"github.com/oam-dev/kubevela/pkg/oam/util"
"github.com/oam-dev/kubevela/pkg/resourcekeeper"
)
@@ -215,17 +216,14 @@ func (h *AppHandler) ProduceArtifacts(ctx context.Context, comps []*types.Compon
// nolint
func (h *AppHandler) collectHealthStatus(ctx context.Context, wl *appfile.Workload, appRev *v1beta1.ApplicationRevision, overrideNamespace string) (*common.ApplicationComponentStatus, bool, error) {
namespace := h.app.Namespace
if overrideNamespace != "" {
namespace = overrideNamespace
}
accessor := util.NewApplicationResourceNamespaceAccessor(h.app.Namespace, overrideNamespace)
var (
status = common.ApplicationComponentStatus{
Name: wl.Name,
WorkloadDefinition: wl.FullTemplate.Reference.Definition,
Healthy: true,
Namespace: namespace,
Namespace: accessor.Namespace(),
Cluster: multicluster.ClusterNameInContext(ctx),
}
appName = appRev.Spec.Application.Name
@@ -235,10 +233,10 @@ func (h *AppHandler) collectHealthStatus(ctx context.Context, wl *appfile.Worklo
if wl.CapabilityCategory == types.TerraformCategory {
var configuration terraforv1beta2.Configuration
if err := h.r.Client.Get(ctx, client.ObjectKey{Name: wl.Name, Namespace: namespace}, &configuration); err != nil {
if err := h.r.Client.Get(ctx, client.ObjectKey{Name: wl.Name, Namespace: accessor.Namespace()}, &configuration); err != nil {
if kerrors.IsNotFound(err) {
var legacyConfiguration terraforv1beta1.Configuration
if err := h.r.Client.Get(ctx, client.ObjectKey{Name: wl.Name, Namespace: namespace}, &legacyConfiguration); err != nil {
if err := h.r.Client.Get(ctx, client.ObjectKey{Name: wl.Name, Namespace: accessor.Namespace()}, &legacyConfiguration); err != nil {
return nil, false, errors.WithMessagef(err, "app=%s, comp=%s, check health error", appName, wl.Name)
}
isHealth = setStatus(&status, legacyConfiguration.Status.ObservedGeneration, legacyConfiguration.Generation,
@@ -251,12 +249,12 @@ func (h *AppHandler) collectHealthStatus(ctx context.Context, wl *appfile.Worklo
appRev.Name, configuration.Status.Apply.State, configuration.Status.Apply.Message)
}
} else {
if ok, err := wl.EvalHealth(wl.Ctx, h.r.Client, namespace); !ok || err != nil {
if ok, err := wl.EvalHealth(wl.Ctx, h.r.Client, accessor); !ok || err != nil {
isHealth = false
status.Healthy = false
}
status.Message, err = wl.EvalStatus(wl.Ctx, h.r.Client, namespace)
status.Message, err = wl.EvalStatus(wl.Ctx, h.r.Client, accessor)
if err != nil {
return nil, false, errors.WithMessagef(err, "app=%s, comp=%s, evaluate workload status message error", appName, wl.Name)
}
@@ -264,24 +262,25 @@ func (h *AppHandler) collectHealthStatus(ctx context.Context, wl *appfile.Worklo
var traitStatusList []common.ApplicationTraitStatus
for _, tr := range wl.Traits {
traitOverrideNamespace := overrideNamespace
if tr.FullTemplate.TraitDefinition.Spec.ControlPlaneOnly {
namespace = appRev.GetNamespace()
traitOverrideNamespace = appRev.GetNamespace()
wl.Ctx.SetCtx(context.WithValue(wl.Ctx.GetCtx(), multicluster.ClusterContextKey, multicluster.ClusterLocalName))
}
_accessor := util.NewApplicationResourceNamespaceAccessor(h.app.Namespace, traitOverrideNamespace)
var traitStatus = common.ApplicationTraitStatus{
Type: tr.Name,
Healthy: true,
}
if ok, err := tr.EvalHealth(wl.Ctx, h.r.Client, namespace); !ok || err != nil {
if ok, err := tr.EvalHealth(wl.Ctx, h.r.Client, _accessor); !ok || err != nil {
isHealth = false
traitStatus.Healthy = false
}
traitStatus.Message, err = tr.EvalStatus(wl.Ctx, h.r.Client, namespace)
traitStatus.Message, err = tr.EvalStatus(wl.Ctx, h.r.Client, _accessor)
if err != nil {
return nil, false, errors.WithMessagef(err, "app=%s, comp=%s, trait=%s, evaluate status message error", appName, wl.Name, tr.Name)
}
traitStatusList = append(traitStatusList, traitStatus)
namespace = appRev.GetNamespace()
wl.Ctx.SetCtx(context.WithValue(wl.Ctx.GetCtx(), multicluster.ClusterContextKey, status.Cluster))
}

View File

@@ -41,6 +41,7 @@ import (
"github.com/oam-dev/kubevela/pkg/oam/util"
"github.com/oam-dev/kubevela/pkg/policy/envbinding"
"github.com/oam-dev/kubevela/pkg/utils"
"github.com/oam-dev/kubevela/pkg/velaql/providers/query"
"github.com/oam-dev/kubevela/pkg/workflow/providers"
"github.com/oam-dev/kubevela/pkg/workflow/providers/http"
"github.com/oam-dev/kubevela/pkg/workflow/providers/kube"
@@ -81,6 +82,7 @@ func (h *AppHandler) GenerateApplicationSteps(ctx monitorContext.Context,
terraformProvider.Install(handlerProviders, app, func(comp common.ApplicationComponent) (*appfile.Workload, error) {
return appParser.ParseWorkloadFromRevision(comp, appRev)
})
query.Install(handlerProviders, h.r.Client, nil)
var tasks []wfTypes.TaskRunner
for _, step := range af.WorkflowSteps {

View File

@@ -967,7 +967,7 @@ func (h historiesByComponentRevision) Less(i, j int) bool {
// UpdateApplicationRevisionStatus update application revision status
func (h *AppHandler) UpdateApplicationRevisionStatus(ctx context.Context, appRev *v1beta1.ApplicationRevision, succeed bool, wfStatus *common.WorkflowStatus) {
if appRev == nil {
if appRev == nil || DisableAllApplicationRevision {
return
}
appRev.Status.Succeeded = succeed

View File

@@ -43,17 +43,24 @@ import (
"github.com/oam-dev/kubevela/pkg/oam"
"github.com/oam-dev/kubevela/pkg/oam/discoverymapper"
"github.com/oam-dev/kubevela/pkg/oam/util"
"github.com/oam-dev/kubevela/version"
)
// Reconciler reconciles a ComponentDefinition object
type Reconciler struct {
client.Client
dm discoverymapper.DiscoveryMapper
pd *packages.PackageDiscover
Scheme *runtime.Scheme
record event.Recorder
dm discoverymapper.DiscoveryMapper
pd *packages.PackageDiscover
Scheme *runtime.Scheme
record event.Recorder
options
}
type options struct {
defRevLimit int
concurrentReconciles int
ignoreDefNoCtrlReq bool
controllerVersion string
}
// Reconcile is the main logic for ComponentDefinition controller
@@ -68,6 +75,11 @@ func (r *Reconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Resu
return ctrl.Result{}, client.IgnoreNotFound(err)
}
if !r.matchControllerRequirement(&componentDefinition) {
klog.InfoS("skip componentDefinition: not match the controller requirement of componentDefinition", "componentDefinition", klog.KObj(&componentDefinition))
return ctrl.Result{}, nil
}
// refresh package discover when componentDefinition is registered
if componentDefinition.Spec.Workload.Type != types.AutoDetectWorkloadDefinition {
err := utils.RefreshPackageDiscover(ctx, r.Client, r.dm, r.pd, &componentDefinition)
@@ -187,12 +199,32 @@ func (r *Reconciler) SetupWithManager(mgr ctrl.Manager) error {
// Setup adds a controller that reconciles ComponentDefinition.
func Setup(mgr ctrl.Manager, args oamctrl.Args) error {
r := Reconciler{
Client: mgr.GetClient(),
Scheme: mgr.GetScheme(),
dm: args.DiscoveryMapper,
pd: args.PackageDiscover,
defRevLimit: args.DefRevisionLimit,
concurrentReconciles: args.ConcurrentReconciles,
Client: mgr.GetClient(),
Scheme: mgr.GetScheme(),
dm: args.DiscoveryMapper,
pd: args.PackageDiscover,
options: parseOptions(args),
}
return r.SetupWithManager(mgr)
}
func parseOptions(args oamctrl.Args) options {
return options{
defRevLimit: args.DefRevisionLimit,
concurrentReconciles: args.ConcurrentReconciles,
ignoreDefNoCtrlReq: args.IgnoreDefinitionWithoutControllerRequirement,
controllerVersion: version.VelaVersion,
}
}
func (r *Reconciler) matchControllerRequirement(componentDefinition *v1beta1.ComponentDefinition) bool {
if componentDefinition.Annotations != nil {
if requireVersion, ok := componentDefinition.Annotations[oam.AnnotationControllerRequirement]; ok {
return requireVersion == r.controllerVersion
}
}
if r.ignoreDefNoCtrlReq {
return false
}
return true
}

View File

@@ -90,11 +90,13 @@ var _ = BeforeSuite(func(done Done) {
Expect(err).ToNot(HaveOccurred())
r = Reconciler{
Client: mgr.GetClient(),
Scheme: mgr.GetScheme(),
dm: dm,
pd: pd,
defRevLimit: defRevisionLimit,
Client: mgr.GetClient(),
Scheme: mgr.GetScheme(),
dm: dm,
pd: pd,
options: options{
defRevLimit: defRevisionLimit,
},
}
Expect(r.SetupWithManager(mgr)).ToNot(HaveOccurred())
var ctx context.Context

View File

@@ -44,6 +44,7 @@ import (
af "github.com/oam-dev/kubevela/pkg/appfile"
"github.com/oam-dev/kubevela/pkg/cue/process"
"github.com/oam-dev/kubevela/pkg/oam"
"github.com/oam-dev/kubevela/pkg/oam/util"
)
const (
@@ -478,7 +479,8 @@ func CUEBasedHealthCheck(ctx context.Context, c client.Client, wlRef WorkloadRef
okToCheckTrait = true
return
}
isHealthy, err := wl.EvalHealth(pCtx, c, ns)
accessor := util.NewApplicationResourceNamespaceAccessor(ns, "")
isHealthy, err := wl.EvalHealth(pCtx, c, accessor)
if err != nil {
wlHealth.HealthStatus = StatusUnhealthy
wlHealth.Diagnosis = errors.Wrap(err, errHealthCheck).Error()
@@ -490,7 +492,7 @@ func CUEBasedHealthCheck(ctx context.Context, c client.Client, wlRef WorkloadRef
// TODO(wonderflow): we should add a custom way to let the template say why it's unhealthy, only a bool flag is not enough
wlHealth.HealthStatus = StatusUnhealthy
}
wlHealth.CustomStatusMsg, err = wl.EvalStatus(pCtx, c, ns)
wlHealth.CustomStatusMsg, err = wl.EvalStatus(pCtx, c, accessor)
if err != nil {
wlHealth.Diagnosis = errors.Wrap(err, errHealthCheck).Error()
}
@@ -522,7 +524,8 @@ func CUEBasedHealthCheck(ctx context.Context, c client.Client, wlRef WorkloadRef
traits[i] = tHealth
continue
}
isHealthy, err := tr.EvalHealth(pCtx, c, ns)
accessor := util.NewApplicationResourceNamespaceAccessor("", ns)
isHealthy, err := tr.EvalHealth(pCtx, c, accessor)
if err != nil {
tHealth.HealthStatus = StatusUnhealthy
tHealth.Diagnosis = errors.Wrap(err, errHealthCheck).Error()
@@ -535,7 +538,7 @@ func CUEBasedHealthCheck(ctx context.Context, c client.Client, wlRef WorkloadRef
// TODO(wonderflow): we should add a custom way to let the template say why it's unhealthy, only a bool flag is not enough
tHealth.HealthStatus = StatusUnhealthy
}
tHealth.CustomStatusMsg, err = tr.EvalStatus(pCtx, c, ns)
tHealth.CustomStatusMsg, err = tr.EvalStatus(pCtx, c, accessor)
if err != nil {
tHealth.Diagnosis = errors.Wrap(err, errHealthCheck).Error()
}

View File

@@ -42,17 +42,24 @@ import (
"github.com/oam-dev/kubevela/pkg/oam"
"github.com/oam-dev/kubevela/pkg/oam/discoverymapper"
"github.com/oam-dev/kubevela/pkg/oam/util"
"github.com/oam-dev/kubevela/version"
)
// Reconciler reconciles a TraitDefinition object
type Reconciler struct {
client.Client
dm discoverymapper.DiscoveryMapper
pd *packages.PackageDiscover
Scheme *runtime.Scheme
record event.Recorder
dm discoverymapper.DiscoveryMapper
pd *packages.PackageDiscover
Scheme *runtime.Scheme
record event.Recorder
options
}
type options struct {
defRevLimit int
concurrentReconciles int
ignoreDefNoCtrlReq bool
controllerVersion string
}
// Reconcile is the main logic for TraitDefinition controller
@@ -67,6 +74,11 @@ func (r *Reconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Resu
return ctrl.Result{}, client.IgnoreNotFound(err)
}
if !r.matchControllerRequirement(&traitdefinition) {
klog.InfoS("skip traitDefinition: not match the controller requirement of traitDefinition", "traitDefinition", klog.KObj(&traitdefinition))
return ctrl.Result{}, nil
}
// this is a placeholder for finalizer here in the future
if traitdefinition.DeletionTimestamp != nil {
klog.InfoS("The TraitDefinition is being deleted", "traitDefinition", klog.KRef(req.Namespace, req.Name))
@@ -193,12 +205,32 @@ func (r *Reconciler) SetupWithManager(mgr ctrl.Manager) error {
// Setup adds a controller that reconciles TraitDefinition.
func Setup(mgr ctrl.Manager, args oamctrl.Args) error {
r := Reconciler{
Client: mgr.GetClient(),
Scheme: mgr.GetScheme(),
dm: args.DiscoveryMapper,
pd: args.PackageDiscover,
defRevLimit: args.DefRevisionLimit,
concurrentReconciles: args.ConcurrentReconciles,
Client: mgr.GetClient(),
Scheme: mgr.GetScheme(),
dm: args.DiscoveryMapper,
pd: args.PackageDiscover,
options: parseOptions(args),
}
return r.SetupWithManager(mgr)
}
func parseOptions(args oamctrl.Args) options {
return options{
defRevLimit: args.DefRevisionLimit,
concurrentReconciles: args.ConcurrentReconciles,
ignoreDefNoCtrlReq: args.IgnoreDefinitionWithoutControllerRequirement,
controllerVersion: version.VelaVersion,
}
}
func (r *Reconciler) matchControllerRequirement(traitDefinition *v1beta1.TraitDefinition) bool {
if traitDefinition.Annotations != nil {
if requireVersion, ok := traitDefinition.Annotations[oam.AnnotationControllerRequirement]; ok {
return requireVersion == r.controllerVersion
}
}
if r.ignoreDefNoCtrlReq {
return false
}
return true
}

View File

@@ -90,11 +90,13 @@ var _ = BeforeSuite(func(done Done) {
Expect(err).ToNot(HaveOccurred())
r = Reconciler{
Client: mgr.GetClient(),
Scheme: mgr.GetScheme(),
dm: dm,
pd: pd,
defRevLimit: defRevisionLimit,
Client: mgr.GetClient(),
Scheme: mgr.GetScheme(),
dm: dm,
pd: pd,
options: options{
defRevLimit: defRevisionLimit,
},
}
Expect(r.SetupWithManager(mgr)).ToNot(HaveOccurred())
var ctx context.Context

View File

@@ -90,11 +90,13 @@ var _ = BeforeSuite(func(done Done) {
Expect(err).ToNot(HaveOccurred())
r = Reconciler{
Client: mgr.GetClient(),
Scheme: mgr.GetScheme(),
dm: dm,
pd: pd,
defRevLimit: defRevisionLimit,
Client: mgr.GetClient(),
Scheme: mgr.GetScheme(),
dm: dm,
pd: pd,
options: options{
defRevLimit: defRevisionLimit,
},
}
Expect(r.SetupWithManager(mgr)).ToNot(HaveOccurred())
var ctx context.Context

View File

@@ -42,17 +42,24 @@ import (
"github.com/oam-dev/kubevela/pkg/oam"
"github.com/oam-dev/kubevela/pkg/oam/discoverymapper"
"github.com/oam-dev/kubevela/pkg/oam/util"
"github.com/oam-dev/kubevela/version"
)
// Reconciler reconciles a WorkflowStepDefinition object
type Reconciler struct {
client.Client
dm discoverymapper.DiscoveryMapper
pd *packages.PackageDiscover
Scheme *runtime.Scheme
record event.Recorder
dm discoverymapper.DiscoveryMapper
pd *packages.PackageDiscover
Scheme *runtime.Scheme
record event.Recorder
options
}
type options struct {
defRevLimit int
concurrentReconciles int
ignoreDefNoCtrlReq bool
controllerVersion string
}
// Reconcile is the main logic for WorkflowStepDefinition controller
@@ -68,6 +75,11 @@ func (r *Reconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Resu
return ctrl.Result{}, client.IgnoreNotFound(err)
}
if !r.matchControllerRequirement(&wfstepdefinition) {
klog.InfoS("skip workflowStepDefinition: not match the controller requirement of workflowStepDefinition", "workflowStepDefinition", klog.KObj(&wfstepdefinition))
return ctrl.Result{}, nil
}
// this is a placeholder for finalizer here in the future
if wfstepdefinition.DeletionTimestamp != nil {
return ctrl.Result{}, nil
@@ -192,11 +204,32 @@ func (r *Reconciler) SetupWithManager(mgr ctrl.Manager) error {
// Setup adds a controller that reconciles WorkflowStepDefinition.
func Setup(mgr ctrl.Manager, args oamctrl.Args) error {
r := Reconciler{
Client: mgr.GetClient(),
Scheme: mgr.GetScheme(),
dm: args.DiscoveryMapper,
pd: args.PackageDiscover,
defRevLimit: args.DefRevisionLimit,
Client: mgr.GetClient(),
Scheme: mgr.GetScheme(),
dm: args.DiscoveryMapper,
pd: args.PackageDiscover,
options: parseOptions(args),
}
return r.SetupWithManager(mgr)
}
func parseOptions(args oamctrl.Args) options {
return options{
defRevLimit: args.DefRevisionLimit,
concurrentReconciles: args.ConcurrentReconciles,
ignoreDefNoCtrlReq: args.IgnoreDefinitionWithoutControllerRequirement,
controllerVersion: version.VelaVersion,
}
}
func (r *Reconciler) matchControllerRequirement(wfstepdefinition *v1beta1.WorkflowStepDefinition) bool {
if wfstepdefinition.Annotations != nil {
if requireVersion, ok := wfstepdefinition.Annotations[oam.AnnotationControllerRequirement]; ok {
return requireVersion == r.controllerVersion
}
}
if r.ignoreDefNoCtrlReq {
return false
}
return true
}

View File

@@ -62,8 +62,8 @@ const (
// AbstractEngine defines Definition's Render interface
type AbstractEngine interface {
Complete(ctx process.Context, abstractTemplate string, params interface{}) error
HealthCheck(ctx process.Context, cli client.Client, ns string, healthPolicyTemplate string) (bool, error)
Status(ctx process.Context, cli client.Client, ns string, customStatusTemplate string, parameter interface{}) (string, error)
HealthCheck(ctx process.Context, cli client.Client, accessor util.NamespaceAccessor, healthPolicyTemplate string) (bool, error)
Status(ctx process.Context, cli client.Client, accessor util.NamespaceAccessor, customStatusTemplate string, parameter interface{}) (string, error)
}
type def struct {
@@ -151,7 +151,7 @@ func (wd *workloadDef) Complete(ctx process.Context, abstractTemplate string, pa
return nil
}
func (wd *workloadDef) getTemplateContext(ctx process.Context, cli client.Reader, ns string) (map[string]interface{}, error) {
func (wd *workloadDef) getTemplateContext(ctx process.Context, cli client.Reader, accessor util.NamespaceAccessor) (map[string]interface{}, error) {
var root = initRoot(ctx.BaseContextLabels())
var commonLabels = GetCommonLabels(ctx.BaseContextLabels())
@@ -162,7 +162,7 @@ func (wd *workloadDef) getTemplateContext(ctx process.Context, cli client.Reader
return nil, err
}
// workload main resource will have a unique label("app.oam.dev/resourceType"="WORKLOAD") in per component/app level
object, err := getResourceFromObj(ctx.GetCtx(), componentWorkload, cli, ns, util.MergeMapOverrideWithDst(map[string]string{
object, err := getResourceFromObj(ctx.GetCtx(), componentWorkload, cli, accessor.For(componentWorkload), util.MergeMapOverrideWithDst(map[string]string{
oam.LabelOAMResourceType: oam.ResourceTypeWorkload,
}, commonLabels), "")
if err != nil {
@@ -182,7 +182,7 @@ func (wd *workloadDef) getTemplateContext(ctx process.Context, cli client.Reader
return nil, err
}
// AuxiliaryWorkload will have a unique label("trait.oam.dev/resource"="name of outputs") in per component/app level
object, err := getResourceFromObj(ctx.GetCtx(), traitRef, cli, ns, util.MergeMapOverrideWithDst(map[string]string{
object, err := getResourceFromObj(ctx.GetCtx(), traitRef, cli, accessor.For(componentWorkload), util.MergeMapOverrideWithDst(map[string]string{
oam.TraitTypeLabel: AuxiliaryWorkload,
}, commonLabels), assist.Name)
if err != nil {
@@ -197,11 +197,11 @@ func (wd *workloadDef) getTemplateContext(ctx process.Context, cli client.Reader
}
// HealthCheck address health check for workload
func (wd *workloadDef) HealthCheck(ctx process.Context, cli client.Client, ns string, healthPolicyTemplate string) (bool, error) {
func (wd *workloadDef) HealthCheck(ctx process.Context, cli client.Client, accessor util.NamespaceAccessor, healthPolicyTemplate string) (bool, error) {
if healthPolicyTemplate == "" {
return true, nil
}
templateContext, err := wd.getTemplateContext(ctx, cli, ns)
templateContext, err := wd.getTemplateContext(ctx, cli, accessor)
if err != nil {
return false, errors.WithMessage(err, "get template context")
}
@@ -228,11 +228,11 @@ func checkHealth(templateContext map[string]interface{}, healthPolicyTemplate st
}
// Status get workload status by customStatusTemplate
func (wd *workloadDef) Status(ctx process.Context, cli client.Client, ns string, customStatusTemplate string, parameter interface{}) (string, error) {
func (wd *workloadDef) Status(ctx process.Context, cli client.Client, accessor util.NamespaceAccessor, customStatusTemplate string, parameter interface{}) (string, error) {
if customStatusTemplate == "" {
return "", nil
}
templateContext, err := wd.getTemplateContext(ctx, cli, ns)
templateContext, err := wd.getTemplateContext(ctx, cli, accessor)
if err != nil {
return "", errors.WithMessage(err, "get template context")
}
@@ -417,7 +417,7 @@ func initRoot(contextLabels map[string]string) map[string]interface{} {
return root
}
func (td *traitDef) getTemplateContext(ctx process.Context, cli client.Reader, ns string) (map[string]interface{}, error) {
func (td *traitDef) getTemplateContext(ctx process.Context, cli client.Reader, accessor util.NamespaceAccessor) (map[string]interface{}, error) {
var root = initRoot(ctx.BaseContextLabels())
var commonLabels = GetCommonLabels(ctx.BaseContextLabels())
@@ -431,7 +431,7 @@ func (td *traitDef) getTemplateContext(ctx process.Context, cli client.Reader, n
if err != nil {
return nil, err
}
object, err := getResourceFromObj(ctx.GetCtx(), traitRef, cli, ns, util.MergeMapOverrideWithDst(map[string]string{
object, err := getResourceFromObj(ctx.GetCtx(), traitRef, cli, accessor.For(traitRef), util.MergeMapOverrideWithDst(map[string]string{
oam.TraitTypeLabel: assist.Type,
}, commonLabels), assist.Name)
if err != nil {
@@ -446,11 +446,11 @@ func (td *traitDef) getTemplateContext(ctx process.Context, cli client.Reader, n
}
// Status get trait status by customStatusTemplate
func (td *traitDef) Status(ctx process.Context, cli client.Client, ns string, customStatusTemplate string, parameter interface{}) (string, error) {
func (td *traitDef) Status(ctx process.Context, cli client.Client, accessor util.NamespaceAccessor, customStatusTemplate string, parameter interface{}) (string, error) {
if customStatusTemplate == "" {
return "", nil
}
templateContext, err := td.getTemplateContext(ctx, cli, ns)
templateContext, err := td.getTemplateContext(ctx, cli, accessor)
if err != nil {
return "", errors.WithMessage(err, "get template context")
}
@@ -458,11 +458,11 @@ func (td *traitDef) Status(ctx process.Context, cli client.Client, ns string, cu
}
// HealthCheck address health check for trait
func (td *traitDef) HealthCheck(ctx process.Context, cli client.Client, ns string, healthPolicyTemplate string) (bool, error) {
func (td *traitDef) HealthCheck(ctx process.Context, cli client.Client, accessor util.NamespaceAccessor, healthPolicyTemplate string) (bool, error) {
if healthPolicyTemplate == "" {
return true, nil
}
templateContext, err := td.getTemplateContext(ctx, cli, ns)
templateContext, err := td.getTemplateContext(ctx, cli, accessor)
if err != nil {
return false, errors.WithMessage(err, "get template context")
}

View File

@@ -718,10 +718,10 @@ func TestImports(t *testing.T) {
context: stepSessionID: "3w9qkdgn5w"`
v, err := NewValue(`
import (
"vela/op"
"vela/custom"
)
id: op.context.stepSessionID
id: custom.context.stepSessionID
`+cont, nil, cont)
assert.NilError(t, err)

View File

@@ -201,7 +201,7 @@ const (
// AnnotationWorkloadName indicates the managed workload's name by trait
AnnotationWorkloadName = "trait.oam.dev/workload-name"
// AnnotationControllerRequirement indicates the controller version that can process the application.
// AnnotationControllerRequirement indicates the controller version that can process the application/definition.
AnnotationControllerRequirement = "app.oam.dev/controller-version-require"
// AnnotationApplicationServiceAccountName indicates the name of the ServiceAccount to use to apply Components and run Workflow.

View File

@@ -953,3 +953,38 @@ func AsController(r *corev1.ObjectReference) metav1.OwnerReference {
ref.Controller = &c
return ref
}
// NamespaceAccessor namespace accessor for resource
type NamespaceAccessor interface {
For(obj client.Object) string
Namespace() string
}
type applicationResourceNamespaceAccessor struct {
applicationNamespace string
overrideNamespace string
}
// For access namespace for resource
func (accessor *applicationResourceNamespaceAccessor) For(obj client.Object) string {
if accessor.overrideNamespace != "" {
return accessor.overrideNamespace
}
if originalNamespace := obj.GetNamespace(); originalNamespace != "" {
return originalNamespace
}
return accessor.applicationNamespace
}
// Namespace the namespace by default
func (accessor *applicationResourceNamespaceAccessor) Namespace() string {
if accessor.overrideNamespace != "" {
return accessor.overrideNamespace
}
return accessor.applicationNamespace
}
// NewApplicationResourceNamespaceAccessor create namespace accessor for resource in application
func NewApplicationResourceNamespaceAccessor(appNs, overrideNs string) NamespaceAccessor {
return &applicationResourceNamespaceAccessor{applicationNamespace: appNs, overrideNamespace: overrideNs}
}

View File

@@ -81,3 +81,14 @@ func TestParseApplyOncePolicy(t *testing.T) {
r.NoError(err)
r.Equal(policySpec, spec)
}
func TestParsePolicy(t *testing.T) {
r := require.New(t)
// Test skipping empty policy
app := &v1beta1.Application{Spec: v1beta1.ApplicationSpec{
Policies: []v1beta1.AppPolicy{{Type: "example", Name: "s", Properties: nil}},
}}
exists, err := parsePolicy(app, "example", nil)
r.False(exists, "empty policy should not be included")
r.NoError(err)
}

View File

@@ -34,7 +34,7 @@ const (
// GetEnvBindingPolicy extract env-binding policy with given policy name, if policy name is empty, the first env-binding policy will be used
func GetEnvBindingPolicy(app *v1beta1.Application, policyName string) (*v1alpha1.EnvBindingSpec, error) {
for _, policy := range app.Spec.Policies {
if (policy.Name == policyName || policyName == "") && policy.Type == v1alpha1.EnvBindingPolicyType {
if (policy.Name == policyName || policyName == "") && policy.Type == v1alpha1.EnvBindingPolicyType && policy.Properties != nil {
envBindingSpec := &v1alpha1.EnvBindingSpec{}
err := json.Unmarshal(policy.Properties.Raw, envBindingSpec)
return envBindingSpec, err

View File

@@ -19,6 +19,7 @@ package policy
import (
"context"
"encoding/json"
"fmt"
"github.com/pkg/errors"
errors2 "k8s.io/apimachinery/pkg/api/errors"
@@ -32,6 +33,9 @@ import (
// ParseOverridePolicyRelatedDefinitions get definitions inside override policy
func ParseOverridePolicyRelatedDefinitions(ctx context.Context, cli client.Client, app *v1beta1.Application, policy v1beta1.AppPolicy) (compDefs []*v1beta1.ComponentDefinition, traitDefs []*v1beta1.TraitDefinition, err error) {
if policy.Properties == nil {
return compDefs, traitDefs, fmt.Errorf("override policy %s must not have empty properties", policy.Name)
}
spec := &v1alpha1.OverridePolicySpec{}
if err = json.Unmarshal(policy.Properties.Raw, spec); err != nil {
return nil, nil, errors.Wrapf(err, "invalid override policy spec")

View File

@@ -62,6 +62,12 @@ func TestParseOverridePolicyRelatedDefinitions(t *testing.T) {
Policy: v1beta1.AppPolicy{Properties: &runtime.RawExtension{Raw: []byte(`{"components":[{"type":"comp","traits":[{"type":"trait-404"}]}]}`)}},
Error: "failed to get trait definition",
},
"empty-policy": {
Policy: v1beta1.AppPolicy{Properties: nil},
ComponentDefs: nil,
TraitDefs: nil,
Error: "have empty properties",
},
}
for name, tt := range testCases {
t.Run(name, func(t *testing.T) {

View File

@@ -18,6 +18,7 @@ package policy
import (
"context"
"fmt"
"github.com/pkg/errors"
utilfeature "k8s.io/apiserver/pkg/util/feature"
@@ -65,6 +66,9 @@ func GetPlacementsFromTopologyPolicies(ctx context.Context, cli client.Client, a
hasTopologyPolicy := false
for _, policy := range policies {
if policy.Type == v1alpha1.TopologyPolicyType {
if policy.Properties == nil {
return nil, fmt.Errorf("topology policy %s must not have empty properties", policy.Name)
}
hasTopologyPolicy = true
topologySpec := &v1alpha1.TopologyPolicySpec{}
if err := utils.StrictUnmarshal(policy.Properties.Raw, topologySpec); err != nil {

View File

@@ -146,6 +146,10 @@ func TestGetClusterLabelSelectorInTopology(t *testing.T) {
Inputs: []v1beta1.AppPolicy{},
Outputs: []v1alpha1.PlacementDecision{{Cluster: "local", Namespace: ""}},
},
"empty-topology-policy": {
Inputs: []v1beta1.AppPolicy{{Type: "topology", Name: "some-name", Properties: nil}},
Error: "have empty properties",
},
}
for name, tt := range testCases {
t.Run(name, func(t *testing.T) {

View File

@@ -437,7 +437,7 @@ func (h *gcHandler) GarbageCollectLegacyResourceTrackers(ctx context.Context) er
}
}
for _, policy := range h.app.Spec.Policies {
if policy.Type == v1alpha1.EnvBindingPolicyType {
if policy.Type == v1alpha1.EnvBindingPolicyType && policy.Properties != nil {
spec := &v1alpha1.EnvBindingSpec{}
if err = json.Unmarshal(policy.Properties.Raw, &spec); err == nil {
for _, env := range spec.Envs {

View File

@@ -163,6 +163,10 @@ func listApplicationResourceTrackers(ctx context.Context, cli client.Client, app
}
// ListApplicationResourceTrackers list resource trackers for application with all historyRTs sorted by version number
// rootRT -> The ResourceTracker that records life-long resources. These resources will only be recycled when application is removed.
// currentRT -> The ResourceTracker that tracks the resources used by the latest version of application.
// historyRTs -> The ResourceTrackers that tracks the resources in outdated versions.
// crRT -> The ResourceTracker that tracks the component revisions created by the application.
func ListApplicationResourceTrackers(ctx context.Context, cli client.Client, app *v1beta1.Application) (rootRT *v1beta1.ResourceTracker, currentRT *v1beta1.ResourceTracker, historyRTs []*v1beta1.ResourceTracker, crRT *v1beta1.ResourceTracker, err error) {
metrics.ListResourceTrackerCounter.WithLabelValues("application").Inc()
rts, err := listApplicationResourceTrackers(ctx, cli, app)

View File

@@ -19,20 +19,32 @@ package stdlib
import (
"embed"
"fmt"
"os"
"path/filepath"
"strings"
"cuelang.org/go/cue/build"
"k8s.io/klog/v2"
)
func init() {
var err error
BuiltinImports, err = initBuiltinImports()
if err != nil {
klog.ErrorS(err, "Unable to init builtin imports")
os.Exit(1)
}
}
var (
//go:embed pkgs op.cue ql.cue
fs embed.FS
// BuiltinImports is the builtin imports for cue
BuiltinImports []*build.Instance
)
// GetPackages Get Stdlib packages
func GetPackages(tagTempl string) (map[string]string, error) {
func GetPackages() (map[string]string, error) {
files, err := fs.ReadDir("pkgs")
if err != nil {
return nil, err
@@ -63,16 +75,32 @@ func GetPackages(tagTempl string) (map[string]string, error) {
}
return map[string]string{
"vela/op": opContent + "\n" + tagTempl,
"vela/ql": qlContent + "\n" + tagTempl,
"vela/op": opContent,
"vela/ql": qlContent,
}, nil
}
// AddImportsFor install imports for build.Instance.
func AddImportsFor(inst *build.Instance, tagTempl string) error {
pkgs, err := GetPackages(tagTempl)
inst.Imports = append(inst.Imports, BuiltinImports...)
if tagTempl != "" {
p := &build.Instance{
PkgName: filepath.Base("vela/custom"),
ImportPath: "vela/custom",
}
if err := p.AddFile("-", tagTempl); err != nil {
return err
}
inst.Imports = append(inst.Imports, p)
}
return nil
}
func initBuiltinImports() ([]*build.Instance, error) {
imports := make([]*build.Instance, 0)
pkgs, err := GetPackages()
if err != nil {
return err
return nil, err
}
for path, content := range pkgs {
p := &build.Instance{
@@ -80,9 +108,9 @@ func AddImportsFor(inst *build.Instance, tagTempl string) error {
ImportPath: path,
}
if err := p.AddFile("-", content); err != nil {
return err
return nil, err
}
inst.Imports = append(inst.Imports, p)
imports = append(imports, p)
}
return nil
return imports, nil
}

View File

@@ -26,7 +26,7 @@ import (
)
func TestGetPackages(t *testing.T) {
pkgs, err := GetPackages("context: _")
pkgs, err := GetPackages()
assert.NilError(t, err)
var r cue.Runtime
for path, content := range pkgs {
@@ -36,8 +36,8 @@ func TestGetPackages(t *testing.T) {
builder := &build.Instance{}
builder.AddFile("-", `
import "vela/op"
out: op.context`)
import "vela/custom"
out: custom.context`)
err = AddImportsFor(builder, "context: id: \"xxx\"")
assert.NilError(t, err)

View File

@@ -31,7 +31,7 @@
kind: string
}
filter?: {
namespace?: *"" | string
namespace?: string
matchingLabels?: {...}
}
list?: {...}

View File

@@ -96,7 +96,9 @@ func (h *ValidatingHandler) Handle(ctx context.Context, req admission.Request) a
switch req.Operation {
case admissionv1.Create:
if allErrs := h.ValidateCreate(ctx, app); len(allErrs) > 0 {
return admission.Errored(http.StatusUnprocessableEntity, mergeErrors(allErrs))
// http.StatusUnprocessableEntity will NOT report any error descriptions
// to the client, use generic http.StatusBadRequest instead.
return admission.Errored(http.StatusBadRequest, mergeErrors(allErrs))
}
case admissionv1.Update:
oldApp := &v1beta1.Application{}
@@ -105,7 +107,7 @@ func (h *ValidatingHandler) Handle(ctx context.Context, req admission.Request) a
}
if app.ObjectMeta.DeletionTimestamp.IsZero() {
if allErrs := h.ValidateUpdate(ctx, app, oldApp); len(allErrs) > 0 {
return admission.Errored(http.StatusUnprocessableEntity, mergeErrors(allErrs))
return admission.Errored(http.StatusBadRequest, mergeErrors(allErrs))
}
}
default:

View File

@@ -373,4 +373,21 @@ var _ = Describe("Test Application Validator", func() {
resp = handler.Handle(ctx, req)
Expect(resp.Allowed).Should(BeFalse())
})
It("Test Application with empty policy", func() {
req := admission.Request{
AdmissionRequest: admissionv1.AdmissionRequest{
Operation: admissionv1.Create,
Resource: metav1.GroupVersionResource{Group: "core.oam.dev", Version: "v1beta1", Resource: "applications"},
Object: runtime.RawExtension{
Raw: []byte(`
{"kind":"Application","metadata":{"name":"app-with-empty-policy-webhook-test", "namespace":"default"},
"spec":{"components":[],"policies":[{"name":"2345","type":"garbage-collect","properties":null}]}}
`),
},
},
}
resp := handler.Handle(ctx, req)
Expect(resp.Allowed).Should(BeFalse())
})
})

View File

@@ -118,13 +118,14 @@ func (h *ValidatingHandler) Handle(ctx context.Context, req admission.Request) a
if err := h.Decoder.DecodeRaw(req.AdmissionRequest.OldObject, oldApp); err != nil {
return admission.Errored(http.StatusBadRequest, err)
}
if allErrs := h.ValidateUpdate(ctx, app, oldApp); len(allErrs) > 0 {
return admission.Errored(http.StatusUnprocessableEntity, allErrs.ToAggregate())
// http.StatusUnprocessableEntity will NOT report any error descriptions
// to the client, use generic http.StatusBadRequest instead.
return admission.Errored(http.StatusBadRequest, allErrs.ToAggregate())
}
case admissionv1.Create:
if allErrs := h.ValidateCreate(ctx, app); len(allErrs) > 0 {
return admission.Errored(http.StatusUnprocessableEntity, allErrs.ToAggregate())
return admission.Errored(http.StatusBadRequest, allErrs.ToAggregate())
}
default:
// Do nothing for CONNECT

View File

@@ -30,6 +30,8 @@ import (
"github.com/oam-dev/kubevela/pkg/cue/model"
"github.com/oam-dev/kubevela/pkg/cue/model/value"
"github.com/oam-dev/kubevela/pkg/multicluster"
"github.com/oam-dev/kubevela/pkg/oam"
"github.com/oam-dev/kubevela/pkg/oam/util"
wfContext "github.com/oam-dev/kubevela/pkg/workflow/context"
"github.com/oam-dev/kubevela/pkg/workflow/providers"
"github.com/oam-dev/kubevela/pkg/workflow/types"
@@ -90,6 +92,12 @@ func (h *provider) Apply(ctx wfContext.Context, v *value.Value, act types.Action
}
deployCtx := multicluster.ContextWithClusterName(context.Background(), cluster)
deployCtx = auth.ContextWithUserInfo(deployCtx, h.app)
if h.app != nil {
util.AddLabels(workload, map[string]string{
oam.LabelAppName: h.app.Name,
oam.LabelAppNamespace: h.app.Namespace,
})
}
if err := h.apply(deployCtx, cluster, common.WorkflowResourceCreator, workload); err != nil {
return err
}
@@ -126,7 +134,7 @@ func (h *provider) ApplyInParallel(ctx wfContext.Context, v *value.Value, act ty
deployCtx := multicluster.ContextWithClusterName(context.Background(), cluster)
deployCtx = auth.ContextWithUserInfo(deployCtx, h.app)
if err = h.apply(deployCtx, cluster, common.WorkflowResourceCreator, workloads...); err != nil {
return v.FillObject(err, "err")
return err
}
return nil
}

View File

@@ -126,6 +126,9 @@ func overrideConfiguration(policies []v1beta1.AppPolicy, components []common.App
var err error
for _, policy := range policies {
if policy.Type == v1alpha1.OverridePolicyType {
if policy.Properties == nil {
return nil, fmt.Errorf("override policy %s must not have empty properties", policy.Name)
}
overrideSpec := &v1alpha1.OverridePolicySpec{}
if err := utils.StrictUnmarshal(policy.Properties.Raw, overrideSpec); err != nil {
return nil, errors.Wrapf(err, "failed to parse override policy %s", policy.Name)

View File

@@ -48,6 +48,14 @@ func TestOverrideConfiguration(t *testing.T) {
}},
Error: "failed to parse override policy",
},
"empty-policy": {
Policies: []v1beta1.AppPolicy{{
Name: "override-policy",
Type: "override",
Properties: nil,
}},
Error: "empty properties",
},
"normal": {
Policies: []v1beta1.AppPolicy{{
Name: "override-policy",

View File

@@ -168,7 +168,9 @@ func (g *DeployPreApproveWorkflowStepGenerator) Generate(app *v1beta1.Applicatio
for _, step := range existingSteps {
if step.Type == "deploy" && !lastSuspend {
props := DeployWorkflowStepSpec{}
_ = utils.StrictUnmarshal(step.Properties.Raw, &props)
if step.Properties != nil {
_ = utils.StrictUnmarshal(step.Properties.Raw, &props)
}
if props.Auto != nil && !*props.Auto {
steps = append(steps, v1beta1.WorkflowStep{
Name: "manual-approve-" + step.Name,

View File

@@ -265,7 +265,7 @@ func (t *TaskLoader) makeValue(ctx wfContext.Context, templ string, id string, p
}
contextTempl += "\n" + pCtx.ExtendedContextFile()
return value.NewValue(templ+contextTempl, t.pd, contextTempl, value.ProcessScript, value.TagFieldOrder)
return value.NewValue(templ+contextTempl, t.pd, "", value.ProcessScript, value.TagFieldOrder)
}
type executor struct {

View File

@@ -403,14 +403,14 @@ func (w *workflow) setMetadataToContext(wfCtx wfContext.Context) error {
return wfCtx.SetVar(metadata, wfTypes.ContextKeyMetadata)
}
func (e *engine) getBackoffTimes(stepID string) (success bool, backoffTimes int) {
func (e *engine) getBackoffTimes(stepID string) int {
if v, ok := e.wfCtx.GetValueInMemory(wfTypes.ContextPrefixBackoffTimes, stepID); ok {
times, ok := v.(int)
if ok {
return true, times
return times
}
}
return false, 0
return -1
}
func (e *engine) getBackoffWaitTime() int {
@@ -418,17 +418,19 @@ func (e *engine) getBackoffWaitTime() int {
minTimes := 15
found := false
for _, step := range e.status.Steps {
success, backoffTimes := e.getBackoffTimes(step.ID)
if success && backoffTimes < minTimes {
minTimes = backoffTimes
if backoffTimes := e.getBackoffTimes(step.ID); backoffTimes > 0 {
found = true
if backoffTimes < minTimes {
minTimes = backoffTimes
}
}
if step.SubStepsStatus != nil {
for _, subStep := range step.SubStepsStatus {
success, backoffTimes := e.getBackoffTimes(subStep.ID)
if success && backoffTimes < minTimes {
minTimes = backoffTimes
if backoffTimes := e.getBackoffTimes(subStep.ID); backoffTimes > 0 {
found = true
if backoffTimes < minTimes {
minTimes = backoffTimes
}
}
}
}

View File

@@ -912,10 +912,12 @@ var _ = Describe("Test Workflow", func() {
Expect(interval).Should(BeEquivalentTo(int(0.05 * math.Pow(2, float64(i+5)))))
}
_, err = wf.ExecuteSteps(ctx, revision, runners)
Expect(err).ToNot(HaveOccurred())
interval = e.getBackoffWaitTime()
Expect(interval).Should(BeEquivalentTo(MaxWorkflowWaitBackoffTime))
for i := 0; i < 10; i++ {
_, err = wf.ExecuteSteps(ctx, revision, runners)
Expect(err).ToNot(HaveOccurred())
interval = e.getBackoffWaitTime()
Expect(interval).Should(BeEquivalentTo(MaxWorkflowWaitBackoffTime))
}
By("Test get backoff time after clean")
wfContext.CleanupMemoryStore(app.Name, app.Namespace)

View File

@@ -731,8 +731,15 @@ func waitApplicationRunning(k8sClient client.Client, addonName string) error {
return client.IgnoreNotFound(err)
}
phase := app.Status.Phase
if phase == common2.ApplicationRunning {
switch app.Status.Phase {
case common2.ApplicationRunning:
return nil
case common2.ApplicationWorkflowSuspending:
fmt.Printf("Enabling suspend, please run \"vela workflow resume %s -n vela-system\" to continue", pkgaddon.Convert2AppName(addonName))
return nil
case common2.ApplicationWorkflowTerminated:
return errors.Errorf("Enabling failed, please run \"vela status %s -n vela-system\" to check the status of the addon", pkgaddon.Convert2AppName(addonName))
default:
}
timeConsumed := int(time.Since(start).Seconds())
applySpinnerNewSuffix(spinner, fmt.Sprintf("Waiting addon application running. It is now in phase: %s (timeout %d/%d seconds)...",

View File

@@ -165,7 +165,7 @@ var _ = Describe("Addon status or info", func() {
Expect(ds.DeleteRegistry(context.Background(), "KubeVela")).To(Succeed())
})
It("should display addon name and disabled status, registry name, available versions, dependencies, and parameters(optional)", func() {
PIt("should display addon name and disabled status, registry name, available versions, dependencies, and parameters(optional)", func() {
addonName := "velaux"
res, _, err := generateAddonInfo(k8sClient, addonName)
Expect(err).Should(BeNil())

View File

@@ -285,7 +285,7 @@ func NewDefinitionInitCommand(c common.Args) *cobra.Command {
cmd.Flags().StringP(FlagTemplateYAML, "f", "", "Specify the template yaml file that definition will use to build the schema. If empty, a default template for the given definition type will be used.")
cmd.Flags().StringP(FlagOutput, "o", "", "Specify the output path of the generated definition. If empty, the definition will be printed in the console.")
cmd.Flags().BoolP(FlagInteractive, "i", false, "Specify whether use interactive process to help generate definitions.")
cmd.Flags().StringP(FlagProvider, "p", "", "Specify which provider the cloud resource definition belongs to. Only `alibaba`, `aws`, `azure` are supported.")
cmd.Flags().StringP(FlagProvider, "p", "", "Specify which provider the cloud resource definition belongs to. Only `alibaba`, `aws`, `azure`, `gcp`, `baidu`, `tencent`, `elastic`, `ucloud`, `vsphere` are supported.")
cmd.Flags().StringP(FlagGit, "", "", "Specify which git repository the configuration(HCL) is stored in. Valid when --provider/-p is set.")
cmd.Flags().StringP(FlagLocal, "", "", "Specify the local path of the configuration(HCL) file. Valid when --provider/-p is set.")
cmd.Flags().StringP(FlagPath, "", "", "Specify which path the configuration(HCL) is stored in the Git repository. Valid when --git is set.")
@@ -298,7 +298,7 @@ func generateTerraformTypedComponentDefinition(cmd *cobra.Command, name, kind, p
}
switch provider {
case "aws", "azure", "alibaba", "tencent", "gcp", "baidu", "elastic", "ucloud":
case "aws", "azure", "alibaba", "tencent", "gcp", "baidu", "elastic", "ucloud", "vsphere":
var terraform *commontype.Terraform
git, err := cmd.Flags().GetString(FlagGit)
@@ -374,7 +374,7 @@ func generateTerraformTypedComponentDefinition(cmd *cobra.Command, name, kind, p
}
return out.String(), nil
default:
return "", errors.Errorf("Provider `%s` is not supported. Only `alibaba`, `aws`, `azure`, `gcp`, `baidu`, `tencent`, `elastic`, `ucloud` are supported.", provider)
return "", errors.Errorf("Provider `%s` is not supported. Only `alibaba`, `aws`, `azure`, `gcp`, `baidu`, `tencent`, `elastic`, `ucloud`, `vsphere` are supported.", provider)
}
}

View File

@@ -144,13 +144,13 @@ func prepareProviderAddSubCommand(c common.Args, ioStreams cmdutil.IOStreams) ([
if len(os.Args) < 2 || os.Args[1] != "provider" {
return nil, nil
}
ctx, cancel := context.WithTimeout(context.Background(), time.Minute*1)
timeoutCtx, cancel := context.WithTimeout(context.Background(), time.Minute*1)
defer cancel()
k8sClient, err := c.GetClient()
if err != nil {
return nil, err
}
defs, err := getTerraformProviderTypes(ctx, k8sClient)
defs, err := getTerraformProviderTypes(timeoutCtx, k8sClient)
if err == nil {
cmds := make([]*cobra.Command, len(defs))
for i, d := range defs {
@@ -161,7 +161,7 @@ func prepareProviderAddSubCommand(c common.Args, ioStreams cmdutil.IOStreams) ([
Long: fmt.Sprintf("Authenticate Terraform Cloud Provider %s by creating a credential secret and a Terraform Controller Provider", providerType),
Example: fmt.Sprintf("vela provider add %s", providerType),
}
parameters, err := getParameters(ctx, k8sClient, providerType)
parameters, err := getParameters(context.Background(), k8sClient, providerType)
if err != nil {
return nil, err
}
@@ -335,9 +335,9 @@ func prepareProviderDeleteCommand(c common.Args, ioStreams cmdutil.IOStreams) *c
if err != nil {
return err
}
ctx, cancel := context.WithTimeout(context.Background(), time.Minute*1)
timeoutCtx, cancel := context.WithTimeout(context.Background(), time.Minute*1)
defer cancel()
defs, err := getTerraformProviderTypes(ctx, k8sClient)
defs, err := getTerraformProviderTypes(timeoutCtx, k8sClient)
if len(args) < 1 {
errMsg := "must specify a Terraform Cloud Provider type"
if err == nil {
@@ -373,13 +373,13 @@ func prepareProviderDeleteSubCommand(c common.Args, ioStreams cmdutil.IOStreams)
if len(os.Args) < 2 || os.Args[1] != "provider" {
return nil, nil
}
ctx, cancel := context.WithTimeout(context.Background(), time.Minute*1)
timeoutContext, cancel := context.WithTimeout(context.Background(), time.Minute*1)
defer cancel()
k8sClient, err := c.GetClient()
if err != nil {
return nil, err
}
defs, err := getTerraformProviderTypes(ctx, k8sClient)
defs, err := getTerraformProviderTypes(timeoutContext, k8sClient)
if err == nil {
cmds := make([]*cobra.Command, len(defs))
for i, d := range defs {
@@ -390,7 +390,7 @@ func prepareProviderDeleteSubCommand(c common.Args, ioStreams cmdutil.IOStreams)
Long: fmt.Sprintf("Delete Terraform Cloud Provider %s", providerType),
Example: fmt.Sprintf("vela provider delete %s", providerType),
}
parameters, err := getParameters(ctx, k8sClient, providerType)
parameters, err := getParameters(context.Background(), k8sClient, providerType)
if err != nil {
return nil, err
}
@@ -404,7 +404,7 @@ func prepareProviderDeleteSubCommand(c common.Args, ioStreams cmdutil.IOStreams)
if err != nil || name == "" {
return fmt.Errorf("must specify a name for the Terraform Cloud Provider %s", providerType)
}
if err := config.DeleteApplication(ctx, k8sClient, name, true); err != nil {
if err := config.DeleteApplication(context.Background(), k8sClient, name, true); err != nil {
return errors.Wrapf(err, "failed to delete Terraform Cloud Provider %s", name)
}
ioStreams.Infof("Successfully delete provider %s for %s\n", name, providerType)

View File

@@ -17,6 +17,7 @@ limitations under the License.
package cli
import (
"bufio"
"context"
"fmt"
"time"
@@ -48,11 +49,15 @@ type UnInstallArgs struct {
Namespace string
Detail bool
force bool
cancel bool
}
// NewUnInstallCommand creates `uninstall` command to uninstall vela core
func NewUnInstallCommand(c common.Args, order string, ioStreams util.IOStreams) *cobra.Command {
unInstallArgs := &UnInstallArgs{Args: c, userInput: NewUserInput(), helmHelper: helm.NewHelper()}
unInstallArgs := &UnInstallArgs{Args: c, userInput: &UserInput{
Writer: ioStreams.Out,
Reader: bufio.NewReader(ioStreams.In),
}, helmHelper: helm.NewHelper()}
cmd := &cobra.Command{
Use: "uninstall",
Short: "Uninstalls KubeVela from a Kubernetes cluster",
@@ -60,8 +65,8 @@ func NewUnInstallCommand(c common.Args, order string, ioStreams util.IOStreams)
Long: "Uninstalls KubeVela from a Kubernetes cluster.",
Args: cobra.ExactArgs(0),
PreRunE: func(cmd *cobra.Command, args []string) error {
userConfirmation := unInstallArgs.userInput.AskBool("Would you like to uninstall KubeVela from this cluster?", &UserInputOptions{AssumeYes: assumeYes})
if !userConfirmation {
unInstallArgs.cancel = unInstallArgs.userInput.AskBool("Would you like to uninstall KubeVela from this cluster?", &UserInputOptions{AssumeYes: assumeYes})
if !unInstallArgs.cancel {
return nil
}
kubeClient, err := c.GetClient()
@@ -98,6 +103,9 @@ func NewUnInstallCommand(c common.Args, order string, ioStreams util.IOStreams)
return nil
},
RunE: func(cmd *cobra.Command, args []string) error {
if !unInstallArgs.cancel {
return nil
}
ioStreams.Info("Starting to uninstall KubeVela")
restConfig, err := c.GetConfig()
if err != nil {

View File

@@ -19,14 +19,20 @@ package cli
import (
"context"
"fmt"
"os"
"strings"
"testing"
"time"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"github.com/stretchr/testify/assert"
"sigs.k8s.io/yaml"
"github.com/oam-dev/kubevela/apis/core.oam.dev/v1beta1"
"github.com/oam-dev/kubevela/pkg/oam/util"
"github.com/oam-dev/kubevela/pkg/utils/common"
pkgutils "github.com/oam-dev/kubevela/pkg/utils/util"
)
var _ = Describe("Test Install Command", func() {
@@ -63,6 +69,17 @@ var _ = Describe("Test Install Command", func() {
})
})
func TestUninstall(t *testing.T) {
// Test answering NO when prompted. Should just exit.
cmd := NewUnInstallCommand(common.Args{}, "", pkgutils.IOStreams{
Out: os.Stdout,
In: strings.NewReader("n\n"),
})
cmd.SetArgs([]string{})
err := cmd.Execute()
assert.Nil(t, err, "should just exit if answer is no")
}
var fluxcdYaml = `
apiVersion: core.oam.dev/v1beta1
kind: Application

View File

@@ -233,14 +233,17 @@ func prepareToForceDeleteTerraformComponents(ctx context.Context, k8sClient clie
for _, c := range app.Spec.Components {
var def corev1beta1.ComponentDefinition
if err := k8sClient.Get(ctx, client.ObjectKey{Name: c.Type, Namespace: types.DefaultKubeVelaNS}, &def); err != nil {
return err
if !apierrors.IsNotFound(err) {
return err
}
if err := k8sClient.Get(ctx, client.ObjectKey{Name: c.Type, Namespace: namespace}, &def); err != nil {
return err
}
}
if def.Spec.Schematic != nil && def.Spec.Schematic.Terraform != nil {
var conf terraformapi.Configuration
if err := k8sClient.Get(ctx, client.ObjectKey{Name: c.Name, Namespace: namespace}, &conf); err != nil {
if !apierrors.IsNotFound(err) {
return err
}
return err
}
conf.Spec.ForceDelete = &forceDelete
if err := k8sClient.Update(ctx, &conf); err != nil {

View File

@@ -19,7 +19,7 @@ import (
"context"
"testing"
terraformapi "github.com/oam-dev/terraform-controller/api/v1beta1"
terraformapi "github.com/oam-dev/terraform-controller/api/v1beta2"
"github.com/stretchr/testify/assert"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
@@ -55,7 +55,7 @@ func TestPrepareToForceDeleteTerraformComponents(t *testing.T) {
def1 := &v1beta1.ComponentDefinition{
TypeMeta: metav1.TypeMeta{
Kind: "ComponentDefinition",
APIVersion: "core.oam.dev/v1beta1",
APIVersion: "core.oam.dev/v1beta2",
},
ObjectMeta: metav1.ObjectMeta{
Name: "d1",
@@ -75,6 +75,16 @@ func TestPrepareToForceDeleteTerraformComponents(t *testing.T) {
Namespace: "default",
},
}
userNamespace := "another-namespace"
def2 := def1.DeepCopy()
def2.SetNamespace(userNamespace)
app2 := app1.DeepCopy()
app2.SetNamespace(userNamespace)
app2.SetName("app2")
conf2 := conf1.DeepCopy()
conf2.SetNamespace(userNamespace)
k8sClient1 := fake.NewClientBuilder().WithScheme(s).WithObjects(app1, def1, conf1).Build()
k8sClient2 := fake.NewClientBuilder().Build()
@@ -83,6 +93,7 @@ func TestPrepareToForceDeleteTerraformComponents(t *testing.T) {
k8sClient4 := fake.NewClientBuilder().WithScheme(s).WithObjects(app1, def1).Build()
k8sClient5 := fake.NewClientBuilder().WithScheme(s).WithObjects(app2, def2, conf2).Build()
type args struct {
k8sClient client.Client
namespace string
@@ -141,16 +152,27 @@ func TestPrepareToForceDeleteTerraformComponents(t *testing.T) {
"app1",
},
want: want{
errMsg: "no kind is registered for the type",
errMsg: "configurations.terraform.core.oam.dev \"c1\" not found",
},
},
"can read definition from application namespace": {
args: args{
k8sClient5,
userNamespace,
"app2",
},
want: want{},
},
}
for name, tc := range testcases {
t.Run(name, func(t *testing.T) {
err := prepareToForceDeleteTerraformComponents(ctx, tc.args.k8sClient, tc.args.namespace, tc.args.name)
if err != nil || tc.want.errMsg != "" {
if err != nil {
assert.NotEmpty(t, tc.want.errMsg)
assert.Contains(t, err.Error(), tc.want.errMsg)
} else {
assert.Empty(t, tc.want.errMsg)
}
})
}

View File

@@ -28,6 +28,7 @@ import (
"k8s.io/apimachinery/pkg/runtime"
"github.com/oam-dev/kubevela/apis/core.oam.dev/common"
kubevelatypes "github.com/oam-dev/kubevela/apis/types"
"github.com/oam-dev/kubevela/pkg/oam"
"github.com/oam-dev/kubevela/pkg/utils"
@@ -501,5 +502,95 @@ var _ = Describe("Test multicluster scenario", func() {
g.Expect(kerrors.IsNotFound(err)).Should(BeTrue())
}, 2*time.Minute).Should(Succeed())
})
It("Test application with failed gc and restart workflow", func() {
By("duplicate cluster")
secret := &corev1.Secret{}
const secretName = "disconnection-test"
Expect(k8sClient.Get(hubCtx, types.NamespacedName{Namespace: kubevelatypes.DefaultKubeVelaNS, Name: WorkerClusterName}, secret)).Should(Succeed())
secret.SetName(secretName)
secret.SetResourceVersion("")
Expect(k8sClient.Create(hubCtx, secret)).Should(Succeed())
defer func() {
_ = k8sClient.Delete(hubCtx, secret)
}()
By("create cluster normally")
bs, err := os.ReadFile("./testdata/app/app-disconnection-test.yaml")
Expect(err).Should(Succeed())
app := &v1beta1.Application{}
Expect(yaml.Unmarshal(bs, app)).Should(Succeed())
app.SetNamespace(namespace)
Expect(k8sClient.Create(hubCtx, app)).Should(Succeed())
key := client.ObjectKeyFromObject(app)
Eventually(func(g Gomega) {
g.Expect(k8sClient.Get(hubCtx, key, app)).Should(Succeed())
g.Expect(app.Status.Phase).Should(Equal(common.ApplicationRunning))
}).WithTimeout(30 * time.Second).WithPolling(2 * time.Second).Should(Succeed())
By("disconnect cluster")
Expect(k8sClient.Get(hubCtx, types.NamespacedName{Namespace: kubevelatypes.DefaultKubeVelaNS, Name: secretName}, secret)).Should(Succeed())
secret.Data["tls.crt"] = []byte("-")
Expect(k8sClient.Update(hubCtx, secret)).Should(Succeed())
By("update application")
Expect(k8sClient.Get(hubCtx, key, app)).Should(Succeed())
app.Spec.Policies = nil
Expect(k8sClient.Update(hubCtx, app)).Should(Succeed())
Eventually(func(g Gomega) {
g.Expect(k8sClient.Get(hubCtx, key, app)).Should(Succeed())
g.Expect(app.Status.ObservedGeneration).Should(Equal(app.Generation))
g.Expect(app.Status.Phase).Should(Equal(common.ApplicationRunning))
rts := &v1beta1.ResourceTrackerList{}
g.Expect(k8sClient.List(hubCtx, rts, client.MatchingLabels{oam.LabelAppName: key.Name, oam.LabelAppNamespace: key.Namespace})).Should(Succeed())
cnt := 0
for _, item := range rts.Items {
if item.Spec.Type == v1beta1.ResourceTrackerTypeVersioned {
cnt++
}
}
g.Expect(cnt).Should(Equal(2))
}).WithTimeout(30 * time.Second).WithPolling(2 * time.Second).Should(Succeed())
By("try update application again")
Expect(k8sClient.Get(hubCtx, key, app)).Should(Succeed())
if app.Annotations == nil {
app.Annotations = map[string]string{}
}
app.Annotations[oam.AnnotationPublishVersion] = "test"
Expect(k8sClient.Update(hubCtx, app)).Should(Succeed())
Eventually(func(g Gomega) {
g.Expect(k8sClient.Get(hubCtx, key, app)).Should(Succeed())
g.Expect(app.Status.LatestRevision).ShouldNot(BeNil())
g.Expect(app.Status.LatestRevision.Revision).Should(Equal(int64(3)))
g.Expect(app.Status.ObservedGeneration).Should(Equal(app.Generation))
g.Expect(app.Status.Phase).Should(Equal(common.ApplicationRunning))
}).WithTimeout(1 * time.Minute).WithPolling(2 * time.Second).Should(Succeed())
By("clear disconnection cluster secret")
Expect(k8sClient.Get(hubCtx, types.NamespacedName{Namespace: kubevelatypes.DefaultKubeVelaNS, Name: secretName}, secret)).Should(Succeed())
Expect(k8sClient.Delete(hubCtx, secret)).Should(Succeed())
By("update application again")
Eventually(func(g Gomega) {
g.Expect(k8sClient.Get(hubCtx, key, app)).Should(Succeed())
app.Annotations[oam.AnnotationPublishVersion] = "test2"
g.Expect(k8sClient.Update(hubCtx, app)).Should(Succeed())
}).WithTimeout(10 * time.Second).WithPolling(2 * time.Second).Should(Succeed())
By("wait gc application completed")
Eventually(func(g Gomega) {
rts := &v1beta1.ResourceTrackerList{}
g.Expect(k8sClient.List(hubCtx, rts, client.MatchingLabels{oam.LabelAppName: key.Name, oam.LabelAppNamespace: key.Namespace})).Should(Succeed())
cnt := 0
for _, item := range rts.Items {
if item.Spec.Type == v1beta1.ResourceTrackerTypeVersioned {
cnt++
}
}
g.Expect(cnt).Should(Equal(1))
}).WithTimeout(3 * time.Minute).WithPolling(10 * time.Second).Should(Succeed())
})
})
})

View File

@@ -0,0 +1,17 @@
apiVersion: core.oam.dev/v1beta1
kind: Application
metadata:
name: app-disconnection-test
spec:
components:
- type: k8s-objects
name: app-dis-cm
properties:
objects:
- apiVersion: v1
kind: ConfigMap
policies:
- type: topology
name: disconnection-test
properties:
clusters: ["disconnection-test"]

View File

@@ -55,12 +55,18 @@
}
template: {
#K8sObject: {
apiVersion: string
kind: string
metadata: {
name: string
...
}
// +usage=The resource type for the Kubernetes objects
resource?: string
// +usage=The group name for the Kubernetes objects
group?: string
// +usage=If specified, fetch the Kubernetes objects with the name, exclusive to labelSelector
name?: string
// +usage=If specified, fetch the Kubernetes objects from the namespace. Otherwise, fetch from the application's namespace.
namespace?: string
// +usage=If specified, fetch the Kubernetes objects from the cluster. Otherwise, fetch from the local cluster.
cluster?: string
// +usage=If specified, fetch the Kubernetes objects according to the label selector, exclusive to name
labelSelector?: [string]: string
...
}
@@ -74,6 +80,9 @@ template: {
}
}
parameter: {
objects: [...#K8sObject]
// +usage=If specified, application will fetch native Kubernetes objects according to the object description
objects?: [...#K8sObject]
// +usage=If specified, the objects in the urls will be loaded.
urls?: [...string]
}
}

View File

@@ -57,7 +57,10 @@ template: {
for v in parameter.volumeMounts.pvc {
{
mountPath: v.mountPath
name: v.name
if v.subPath != _|_ {
subPath: v.subPath
}
name: v.name
}
},
] | []
@@ -66,7 +69,10 @@ template: {
for v in parameter.volumeMounts.configMap {
{
mountPath: v.mountPath
name: v.name
if v.subPath != _|_ {
subPath: v.subPath
}
name: v.name
}
},
] | []
@@ -75,7 +81,10 @@ template: {
for v in parameter.volumeMounts.secret {
{
mountPath: v.mountPath
name: v.name
if v.subPath != _|_ {
subPath: v.subPath
}
name: v.name
}
},
] | []
@@ -84,7 +93,10 @@ template: {
for v in parameter.volumeMounts.emptyDir {
{
mountPath: v.mountPath
name: v.name
if v.subPath != _|_ {
subPath: v.subPath
}
name: v.name
}
},
] | []
@@ -93,7 +105,10 @@ template: {
for v in parameter.volumeMounts.hostPath {
{
mountPath: v.mountPath
name: v.name
if v.subPath != _|_ {
subPath: v.subPath
}
name: v.name
}
},
] | []
@@ -160,6 +175,20 @@ template: {
] | []
}
volumesList: volumesArray.pvc + volumesArray.configMap + volumesArray.secret + volumesArray.emptyDir + volumesArray.hostPath
deDupVolumesArray: [
for val in [
for i, vi in volumesList {
for j, vj in volumesList if j < i && vi.name == vj.name {
_ignore: true
}
vi
},
] if val._ignore == _|_ {
val
},
]
output: {
apiVersion: "apps/v1"
kind: "Deployment"
@@ -305,7 +334,7 @@ template: {
}
if parameter["volumeMounts"] != _|_ {
volumes: volumesArray.pvc + volumesArray.configMap + volumesArray.secret + volumesArray.emptyDir + volumesArray.hostPath
volumes: deDupVolumesArray
}
}
}
@@ -421,6 +450,7 @@ template: {
pvc?: [...{
name: string
mountPath: string
subPath?: string
// +usage=The name of the PVC
claimName: string
}]
@@ -428,6 +458,7 @@ template: {
configMap?: [...{
name: string
mountPath: string
subPath?: string
defaultMode: *420 | int
cmName: string
items?: [...{
@@ -440,6 +471,7 @@ template: {
secret?: [...{
name: string
mountPath: string
subPath?: string
defaultMode: *420 | int
secretName: string
items?: [...{
@@ -452,12 +484,14 @@ template: {
emptyDir?: [...{
name: string
mountPath: string
subPath?: string
medium: *"" | "Memory"
}]
// +usage=Mount HostPath type volume
hostPath?: [...{
name: string
mountPath: string
subPath?: string
path: string
}]
}

View File

@@ -38,7 +38,7 @@ template: {
volumeMounts: [{
name: parameter.mountName
mountPath: parameter.initMountPath
}]
}] + parameter.extraVolumeMounts
}]
// +patchKey=name
volumes: [{
@@ -92,5 +92,13 @@ template: {
// +usage=Specify the mount path of init container
initMountPath: string
// +usage=Specify the extra volume mounts for the init container
extraVolumeMounts: [...{
// +usage=The name of the volume to be mounted
name: string
// +usage=The mountPath for mount in the init container
mountPath: string
}]
}
}

View File

@@ -59,7 +59,7 @@ template: {
resources: p.resources
}
if p.resourceNames != _|_ {
resources: p.resourceNames
resourceNames: p.resourceNames
}
if p.nonResourceURLs != _|_ {
nonResourceURLs: p.nonResourceURLs
@@ -96,7 +96,7 @@ template: {
resources: p.resources
}
if p.resourceNames != _|_ {
resources: p.resourceNames
resourceNames: p.resourceNames
}
if p.nonResourceURLs != _|_ {
nonResourceURLs: p.nonResourceURLs

View File

@@ -77,6 +77,11 @@ template: {
// +usage=The key of the config map to select from. Must be a valid secret key
key: string
}
// +usage=Specify the field reference for env
fieldRef?: {
// +usage=Specify the field path for env
fieldPath: string
}
}
}]

View File

@@ -65,6 +65,9 @@ template: {
{
name: "pvc-" + v.name
mountPath: v.mountPath
if v.subPath != _|_ {
subPath: v.subPath
}
}
}
},
@@ -75,6 +78,9 @@ template: {
{
name: "configmap-" + v.name
mountPath: v.mountPath
if v.subPath != _|_ {
subPath: v.subPath
}
}
},
] | []
@@ -108,6 +114,9 @@ template: {
{
name: "secret-" + v.name
mountPath: v.mountPath
if v.subPath != _|_ {
subPath: v.subPath
}
}
},
] | []
@@ -141,6 +150,9 @@ template: {
{
name: "emptydir-" + v.name
mountPath: v.mountPath
if v.subPath != _|_ {
subPath: v.subPath
}
}
},
] | []
@@ -150,13 +162,30 @@ template: {
{
name: "pvc-" + v.name
devicePath: v.mountPath
if v.subPath != _|_ {
subPath: v.subPath
}
}
},
] | []
volumesList: pvcVolumesList + configMapVolumesList + secretVolumesList + emptyDirVolumesList
deDupVolumesArray: [
for val in [
for i, vi in volumesList {
for j, vj in volumesList if j < i && vi.name == vj.name {
_ignore: true
}
vi
},
] if val._ignore == _|_ {
val
},
]
patch: spec: template: spec: {
// +patchKey=name
volumes: pvcVolumesList + configMapVolumesList + secretVolumesList + emptyDirVolumesList
volumes: deDupVolumesArray
containers: [{
// +patchKey=name
@@ -248,6 +277,7 @@ template: {
name: string
mountOnly: *false | bool
mountPath: string
subPath?: string
volumeMode: *"Filesystem" | string
volumeName?: string
accessModes: *["ReadWriteOnce"] | [...string]
@@ -289,6 +319,7 @@ template: {
configMapKey: string
}]
mountPath?: string
subPath?: string
defaultMode: *420 | int
readOnly: *false | bool
data?: {...}
@@ -312,6 +343,7 @@ template: {
secretKey: string
}]
mountPath?: string
subPath?: string
defaultMode: *420 | int
readOnly: *false | bool
stringData?: {...}
@@ -327,6 +359,7 @@ template: {
emptyDir?: [...{
name: string
mountPath: string
subPath?: string
medium: *"" | "Memory"
}]
}