Compare commits

..

21 Commits

Author SHA1 Message Date
github-actions[bot]
4c525f8e5d [Backport release-1.5] Fix: allow to read definition from user's namespace when force delete (#4789)
* Fix: allow to read definition from user's namespace when force deleting app with configuration

Signed-off-by: Qiaozp <qiaozhongpei.qzp@alibaba-inc.com>
(cherry picked from commit 2f08c36132)

* Fix test

Signed-off-by: Qiaozp <qiaozhongpei.qzp@alibaba-inc.com>
(cherry picked from commit 981950a14d)

* Fix wrong test

Signed-off-by: Qiaozp <qiaozhongpei.qzp@alibaba-inc.com>
(cherry picked from commit 62863f1007)

Co-authored-by: Qiaozp <qiaozhongpei.qzp@alibaba-inc.com>
2022-09-27 11:58:44 +08:00
github-actions[bot]
bdf71bb290 [Backport release-1.5] Fix: memory leak of the apiserver (#4777)
* Fix: memory leak of the apiserver

Signed-off-by: barnettZQG <barnett.zqg@gmail.com>
(cherry picked from commit 0a8a70730f)

* Fix: listen to the context done event

Signed-off-by: barnettZQG <barnett.zqg@gmail.com>
(cherry picked from commit dfb81224cb)

* Fix: remove the shutdown code

Signed-off-by: barnettZQG <barnett.zqg@gmail.com>
(cherry picked from commit a331b2c54a)

Co-authored-by: barnettZQG <barnett.zqg@gmail.com>
2022-09-23 17:14:10 +08:00
Somefive
5873ba4c47 [Backport 1.5] Fix: gc legacy rt with regularization (#4768)
* Fix: gc legacy rt with regularization

Signed-off-by: Somefive <yd219913@alibaba-inc.com>

* Test: add test

Signed-off-by: Somefive <yd219913@alibaba-inc.com>

Signed-off-by: Somefive <yd219913@alibaba-inc.com>
2022-09-23 16:42:47 +08:00
github-actions[bot]
9ded3c9d3e Update definition.go (#4767)
fix bug, use labels to replace annotation

(cherry picked from commit 8f395d843c)

Co-authored-by: Hair1ossTeenager <45008570+Hair1ossTeenager@users.noreply.github.com>
2022-09-21 10:29:37 +08:00
github-actions[bot]
56c2827669 Fix: auth lack perm for rollout (#4764)
Signed-off-by: Somefive <yd219913@alibaba-inc.com>
(cherry picked from commit b538850eec)

Co-authored-by: Somefive <yd219913@alibaba-inc.com>
2022-09-20 20:35:20 +08:00
Somefive
b6a7d8621f Fix: disable workflow rerun when app spec struct change (#4754)
Signed-off-by: Somefive <yd219913@alibaba-inc.com>

Signed-off-by: Somefive <yd219913@alibaba-inc.com>
2022-09-20 15:43:38 +08:00
barnettZQG
b80f673f69 Fix: query the resource duplicately (#4714) (#4750)
* Fix: query the resource duplicately

Signed-off-by: barnettZQG <barnett.zqg@gmail.com>

* Fix: add an e2e test case

Signed-off-by: barnettZQG <barnett.zqg@gmail.com>

Signed-off-by: barnettZQG <barnett.zqg@gmail.com>

Signed-off-by: barnettZQG <barnett.zqg@gmail.com>
2022-09-19 16:14:34 +08:00
barnettZQG
e14dd09872 Fix: nil pointer dereference (#4735)
Signed-off-by: barnettZQG <barnett.zqg@gmail.com>

Signed-off-by: barnettZQG <barnett.zqg@gmail.com>
2022-09-19 15:45:28 +08:00
qiaozp
e31bacbb05 Fix: show command caused by wrong backport (#4749)
* Fix show command caused by wrong backport

Signed-off-by: Qiaozp <qiaozhongpei.qzp@alibaba-inc.com>

* fmt

Signed-off-by: Qiaozp <qiaozhongpei.qzp@alibaba-inc.com>

Signed-off-by: Qiaozp <qiaozhongpei.qzp@alibaba-inc.com>
2022-09-19 12:07:39 +08:00
github-actions[bot]
e602c0420b Fix: panic when properties empty (#4748)
Signed-off-by: Somefive <yd219913@alibaba-inc.com>
(cherry picked from commit 13fec3cb18)

Co-authored-by: Somefive <yd219913@alibaba-inc.com>
2022-09-19 11:18:55 +08:00
github-actions[bot]
68e3c7cfc7 [Backport release-1.5] Fix: wrong endpoint for LoadBalancer type service (#4738)
* Fix: wrong endpoint for LoadBalancer type service

Signed-off-by: Qiaozp <qiaozhongpei.qzp@alibaba-inc.com>

Fix test

Signed-off-by: Qiaozp <qiaozhongpei.qzp@alibaba-inc.com>

fix test

Signed-off-by: Qiaozp <qiaozhongpei.qzp@alibaba-inc.com>
(cherry picked from commit 2d511b7ae8)

* fix test

Signed-off-by: Qiaozp <qiaozhongpei.qzp@alibaba-inc.com>
(cherry picked from commit cbea03fb27)

* fix cli test

Signed-off-by: Qiaozp <qiaozhongpei.qzp@alibaba-inc.com>
(cherry picked from commit 145ea3a237)

Co-authored-by: Qiaozp <qiaozhongpei.qzp@alibaba-inc.com>
2022-09-16 16:58:07 +08:00
github-actions[bot]
a5b8cfc9fb [Backport release-1.5] Fix: vela show panic for component markdown format (#4726)
* Fix: vela show panic for component markdown format

Signed-off-by: qiaozp <qiaozhongpei.qzp@alibaba-inc.com>
(cherry picked from commit 71f08ccfc7)

* move code

Signed-off-by: Qiaozp <qiaozhongpei.qzp@alibaba-inc.com>
(cherry picked from commit 2b64a9e8bc)

Co-authored-by: qiaozp <qiaozhongpei.qzp@alibaba-inc.com>
2022-09-15 09:55:03 +08:00
barnettZQG
fce308d234 Fix: CVE-2022-27664 (#4721) (#4723)
Signed-off-by: barnettZQG <barnett.zqg@gmail.com>

Signed-off-by: barnettZQG <barnett.zqg@gmail.com>
2022-09-14 16:15:13 +08:00
github-actions[bot]
3c9f359e60 [Backport release-1.5] Fix: the workflow records do not delete if the driver is MongoDB (#4722)
* Fix: the workflow records do not delete if the driver is MongoDB

Signed-off-by: barnettZQG <barnett.zqg@gmail.com>
(cherry picked from commit 284197ef09)

* Fix: change the unit test case

Signed-off-by: barnettZQG <barnett.zqg@gmail.com>
(cherry picked from commit 934c04b511)

Co-authored-by: barnettZQG <barnett.zqg@gmail.com>
2022-09-14 14:29:11 +08:00
github-actions[bot]
e5b0149ce5 Fix: fix uninstallation continues when answer is no (#4712)
Signed-off-by: Charlie Chiang <charlie_c_0129@outlook.com>
(cherry picked from commit 81115ef6ff)

Co-authored-by: Charlie Chiang <charlie_c_0129@outlook.com>
2022-09-13 10:25:08 +08:00
github-actions[bot]
83fd9edcfe Feat: add nodeport in webservice (#4698)
Signed-off-by: Qiaozp <qiaozhongpei.qzp@alibaba-inc.com>
(cherry picked from commit 6579063fcf)

Co-authored-by: Qiaozp <qiaozhongpei.qzp@alibaba-inc.com>
2022-09-07 11:42:37 +08:00
github-actions[bot]
f986073273 [Backport release-1.5] Feat: request token when vela auth gen-kubeconfig (#4692)
* Feat: request token when vela auth gen-kubeconfig

Signed-off-by: Qiaozp <qiaozhongpei.qzp@alibaba-inc.com>
(cherry picked from commit 34014c3643)

* 1.24 test

Signed-off-by: Qiaozp <qiaozhongpei.qzp@alibaba-inc.com>
(cherry picked from commit 6e961765be)

* fix test

Signed-off-by: qiaozp <qiaozhongpei.qzp@alibaba-inc.com>
(cherry picked from commit 4acc0ed64f)

* fix test

Signed-off-by: qiaozp <qiaozhongpei.qzp@alibaba-inc.com>
(cherry picked from commit adfb0a2548)

* format

Signed-off-by: qiaozp <qiaozhongpei.qzp@alibaba-inc.com>
(cherry picked from commit 6f2b9538b6)

* more test on 1.24

Signed-off-by: Qiaozp <qiaozhongpei.qzp@alibaba-inc.com>
(cherry picked from commit d91ce9ea6a)

* rollback some logic and fix test

Signed-off-by: Qiaozp <qiaozhongpei.qzp@alibaba-inc.com>
(cherry picked from commit ba9a25ab2e)

* fix

Signed-off-by: Qiaozp <qiaozhongpei.qzp@alibaba-inc.com>
(cherry picked from commit 8fcb9f2ac2)

Co-authored-by: Qiaozp <qiaozhongpei.qzp@alibaba-inc.com>
2022-09-05 17:36:10 +08:00
github-actions[bot]
17872f9705 fix enable addon cannot update definition bug (#4686)
Signed-off-by: 楚岳 <wangyike.wyk@alibaba-inc.com>
(cherry picked from commit b5cd806efa)

Co-authored-by: 楚岳 <wangyike.wyk@alibaba-inc.com>
2022-09-02 17:47:42 +08:00
qiaozp
e463dbf1fb Chore: Add v1.23 to regular CI test (#4673) (#4677)
* Chore: use higher k8s version in CI

Signed-off-by: Qiaozp <qiaozhongpei.qzp@alibaba-inc.com>

* 1.23

Signed-off-by: Qiaozp <qiaozhongpei.qzp@alibaba-inc.com>

* egress-selector-mode=disabled

Signed-off-by: Qiaozp <qiaozhongpei.qzp@alibaba-inc.com>

* egress-selector-mode=disabled

Signed-off-by: Qiaozp <qiaozhongpei.qzp@alibaba-inc.com>

* fix

Signed-off-by: Qiaozp <qiaozhongpei.qzp@alibaba-inc.com>

* add hub args

Signed-off-by: Qiaozp <qiaozhongpei.qzp@alibaba-inc.com>

* fix e2e test

Signed-off-by: Qiaozp <qiaozhongpei.qzp@alibaba-inc.com>

* fix multicluster test

Signed-off-by: Qiaozp <qiaozhongpei.qzp@alibaba-inc.com>

* fix multicluster test

Signed-off-by: Qiaozp <qiaozhongpei.qzp@alibaba-inc.com>

* add all egress

Signed-off-by: Qiaozp <qiaozhongpei.qzp@alibaba-inc.com>

* fix test

Signed-off-by: Qiaozp <qiaozhongpei.qzp@alibaba-inc.com>

* version matrix

Signed-off-by: Qiaozp <qiaozhongpei.qzp@alibaba-inc.com>

* cal egress arg

Signed-off-by: Qiaozp <qiaozhongpei.qzp@alibaba-inc.com>

* fix

Signed-off-by: Qiaozp <qiaozhongpei.qzp@alibaba-inc.com>

* fix

Signed-off-by: Qiaozp <qiaozhongpei.qzp@alibaba-inc.com>

* regular v1.23 and restict concurency

Signed-off-by: Qiaozp <qiaozhongpei.qzp@alibaba-inc.com>

* fix

Signed-off-by: Qiaozp <qiaozhongpei.qzp@alibaba-inc.com>

Signed-off-by: Qiaozp <qiaozhongpei.qzp@alibaba-inc.com>

Signed-off-by: Qiaozp <qiaozhongpei.qzp@alibaba-inc.com>
2022-09-01 19:10:18 +08:00
qiaozp
44142c4c70 Chore: Migrate to k3d in CI & trim redundant steps (#4652) (#4676)
Fix: flaky CI test (#4669)
Signed-off-by: Qiaozp <qiaozhongpei.qzp@alibaba-inc.com>

Signed-off-by: Qiaozp <qiaozhongpei.qzp@alibaba-inc.com>
2022-09-01 17:22:02 +08:00
github-actions[bot]
e460638799 Fix: unknown field cluster (#4674)
Signed-off-by: barnettZQG <barnett.zqg@gmail.com>
(cherry picked from commit 77a22b0d72)

Co-authored-by: barnettZQG <barnett.zqg@gmail.com>
2022-08-31 17:45:36 +08:00
72 changed files with 831 additions and 397 deletions

View File

@@ -19,9 +19,8 @@ env:
# Common versions
GO_VERSION: '1.17'
GOLANGCI_VERSION: 'v1.38'
KIND_VERSION: 'v0.7.0'
KIND_IMAGE_VERSION: '[\"v1.20.7\"]'
KIND_IMAGE_VERSIONS: '[\"v1.18.20\",\"v1.20.7\",\"v1.22.7\"]'
K3D_IMAGE_VERSION: '[\"v1.20\",\"v1.24\"]'
K3D_IMAGE_VERSIONS: '[\"v1.20\",\"v1.24\"]'
jobs:
@@ -48,9 +47,9 @@ jobs:
run: |
if [[ "${{ github.ref }}" == refs/tags/v* ]]; then
echo "pushing tag: ${{ github.ref_name }}"
echo "::set-output name=matrix::${{ env.KIND_IMAGE_VERSIONS }}"
echo "::set-output name=matrix::${{ env.K3D_IMAGE_VERSIONS }}"
else
echo "::set-output name=matrix::${{ env.KIND_IMAGE_VERSION }}"
echo "::set-output name=matrix::${{ env.K3D_IMAGE_VERSION }}"
fi
apiserver-unit-tests:
@@ -111,6 +110,9 @@ jobs:
strategy:
matrix:
k8s-version: ${{ fromJson(needs.set-k8s-matrix.outputs.matrix) }}
concurrency:
group: ${{ github.workflow }}-${{ github.ref }}-${{ matrix.k8s-version }}
cancel-in-progress: true
steps:
- name: Set up Go
@@ -128,35 +130,50 @@ jobs:
run: |
go get -v -t -d ./...
- name: Setup Kind
uses: engineerd/setup-kind@v0.5.0
- name: Tear down K3d if exist
run: |
k3d cluster delete || true
k3d cluster delete worker || true
- name: Calculate K3d args
run: |
EGRESS_ARG=""
if [[ "${{ matrix.k8s-version }}" == v1.24 ]]; then
EGRESS_ARG="--k3s-arg --egress-selector-mode=disabled@server:0"
fi
echo "EGRESS_ARG=${EGRESS_ARG}" >> $GITHUB_ENV
- name: Setup K3d (Hub)
uses: nolar/setup-k3d-k3s@v1.0.8
with:
version: ${{ env.KIND_VERSION }}
skipClusterCreation: true
version: ${{ matrix.k8s-version }}
github-token: ${{ secrets.GITHUB_TOKEN }}
k3d-args: ${{ env.EGRESS_ARG }}
- name: Setup Kind Cluster (Worker)
- name: Setup K3d (Worker)
uses: nolar/setup-k3d-k3s@v1.0.8
with:
version: ${{ matrix.k8s-version }}
github-token: ${{ secrets.GITHUB_TOKEN }}
k3d-name: worker
k3d-args: --kubeconfig-update-default=false --network=k3d-k3s-default ${{ env.EGRESS_ARG }}
- name: Kind Cluster (Worker)
run: |
kind delete cluster --name worker
kind create cluster --image kindest/node:${{ matrix.k8s-version }} --name worker
kubectl version
kubectl cluster-info
kind get kubeconfig --name worker --internal > /tmp/worker.kubeconfig
kind get kubeconfig --name worker > /tmp/worker.client.kubeconfig
internal_ip=$(docker network inspect k3d-k3s-default|jq ".[0].Containers"| jq -r '.[]| select(.Name=="k3d-worker-server-0")|.IPv4Address' | cut -d/ -f1)
k3d kubeconfig get worker > /tmp/worker.client.kubeconfig
cp /tmp/worker.client.kubeconfig /tmp/worker.kubeconfig
sed -i "s/0.0.0.0:[0-9]\+/$internal_ip:6443/" /tmp/worker.kubeconfig
- name: Setup Kind Cluster (Hub)
run: |
kind delete cluster
kind create cluster --image kindest/node:${{ matrix.k8s-version }}
kubectl version
kubectl cluster-info
- name: Load Image to kind cluster
run: make kind-load
- name: Load image to k3d cluster
run: make image-load
- name: Cleanup for e2e tests
run: |
make e2e-cleanup
make vela-cli
make e2e-cleanup
make e2e-setup-core
bin/vela addon enable fluxcd
timeout 600s bash -c -- 'while true; do kubectl get ns flux-system; if [ $? -eq 0 ] ; then break; else sleep 5; fi;done'

View File

@@ -17,9 +17,8 @@ env:
# Common versions
GO_VERSION: '1.17'
GOLANGCI_VERSION: 'v1.38'
KIND_VERSION: 'v0.7.0'
KIND_IMAGE_VERSION: '[\"v1.20.7\"]'
KIND_IMAGE_VERSIONS: '[\"v1.18.20\",\"v1.20.7\",\"v1.22.7\"]'
K3D_IMAGE_VERSION: '[\"v1.20\",\"v1.24\"]'
K3D_IMAGE_VERSIONS: '[\"v1.20\",\"v1.24\"]'
jobs:
@@ -46,9 +45,9 @@ jobs:
run: |
if [[ "${{ github.ref }}" == refs/tags/v* ]]; then
echo "pushing tag: ${{ github.ref_name }}"
echo "::set-output name=matrix::${{ env.KIND_IMAGE_VERSIONS }}"
echo "::set-output name=matrix::${{ env.K3D_IMAGE_VERSIONS }}"
else
echo "::set-output name=matrix::${{ env.KIND_IMAGE_VERSION }}"
echo "::set-output name=matrix::${{ env.K3D_IMAGE_VERSION }}"
fi
@@ -59,6 +58,9 @@ jobs:
strategy:
matrix:
k8s-version: ${{ fromJson(needs.set-k8s-matrix.outputs.matrix) }}
concurrency:
group: ${{ github.workflow }}-${{ github.ref }}-${{ matrix.k8s-version }}
cancel-in-progress: true
steps:
@@ -74,37 +76,49 @@ jobs:
run: |
go get -v -t -d ./...
- name: Setup Kind
uses: engineerd/setup-kind@v0.5.0
- name: Tear down K3d if exist
run: |
k3d cluster delete || true
k3d cluster delete worker || true
- name: Calculate K3d args
run: |
EGRESS_ARG=""
if [[ "${{ matrix.k8s-version }}" == v1.24 ]]; then
EGRESS_ARG="--k3s-arg --egress-selector-mode=disabled@server:0"
fi
echo "EGRESS_ARG=${EGRESS_ARG}" >> $GITHUB_ENV
- name: Setup K3d (Hub)
uses: nolar/setup-k3d-k3s@v1.0.8
with:
version: ${{ env.KIND_VERSION }}
skipClusterCreation: true
version: ${{ matrix.k8s-version }}
github-token: ${{ secrets.GITHUB_TOKEN }}
k3d-args: ${{ env.EGRESS_ARG }}
- name: Setup Kind Cluster (Worker)
- name: Setup K3d (Worker)
uses: nolar/setup-k3d-k3s@v1.0.8
with:
version: ${{ matrix.k8s-version }}
github-token: ${{ secrets.GITHUB_TOKEN }}
k3d-name: worker
k3d-args: --kubeconfig-update-default=false --network=k3d-k3s-default ${{ env.EGRESS_ARG }}
- name: Generating internal worker kubeconfig
run: |
kind delete cluster --name worker
kind create cluster --image kindest/node:${{ matrix.k8s-version }} --name worker
kubectl version
kubectl cluster-info
kind get kubeconfig --name worker --internal > /tmp/worker.kubeconfig
kind get kubeconfig --name worker > /tmp/worker.client.kubeconfig
internal_ip=$(docker network inspect k3d-k3s-default|jq ".[0].Containers"| jq -r '.[]| select(.Name=="k3d-worker-server-0")|.IPv4Address' | cut -d/ -f1)
k3d kubeconfig get worker > /tmp/worker.client.kubeconfig
cp /tmp/worker.client.kubeconfig /tmp/worker.kubeconfig
sed -i "s/0.0.0.0:[0-9]\+/$internal_ip:6443/" /tmp/worker.kubeconfig
- name: Setup Kind Cluster (Hub)
run: |
kind delete cluster
kind create cluster --image kindest/node:${{ matrix.k8s-version }}
kubectl version
kubectl cluster-info
- name: Load Image to kind cluster (Hub)
run: make kind-load
- name: Load image to k3d cluster (hub and worker)
run: make image-load image-load-runtime-cluster
- name: Cleanup for e2e tests
run: |
make e2e-cleanup
make vela-cli
make e2e-cleanup
make e2e-setup-core-auth
make
make setup-runtime-e2e-cluster
- name: Run e2e multicluster tests

View File

@@ -17,9 +17,8 @@ env:
# Common versions
GO_VERSION: '1.17'
GOLANGCI_VERSION: 'v1.38'
KIND_VERSION: 'v0.7.0'
KIND_IMAGE_VERSION: '[\"v1.20.7\"]'
KIND_IMAGE_VERSIONS: '[\"v1.18.20\",\"v1.20.7\",\"v1.22.7\"]'
K3D_IMAGE_VERSION: '[\"v1.20\",\"v1.24\"]'
K3D_IMAGE_VERSIONS: '[\"v1.20\",\"v1.24\"]'
jobs:
@@ -46,9 +45,9 @@ jobs:
run: |
if [[ "${{ github.ref }}" == refs/tags/v* ]]; then
echo "pushing tag: ${{ github.ref_name }}"
echo "::set-output name=matrix::${{ env.KIND_IMAGE_VERSIONS }}"
echo "::set-output name=matrix::${{ env.K3D_IMAGE_VERSIONS }}"
else
echo "::set-output name=matrix::${{ env.KIND_IMAGE_VERSION }}"
echo "::set-output name=matrix::${{ env.K3D_IMAGE_VERSION }}"
fi
e2e-rollout-tests:
@@ -58,6 +57,10 @@ jobs:
strategy:
matrix:
k8s-version: ${{ fromJson(needs.set-k8s-matrix.outputs.matrix) }}
concurrency:
group: ${{ github.workflow }}-${{ github.ref }}-${{ matrix.k8s-version }}
cancel-in-progress: true
steps:
- name: Check out code into the Go module directory
@@ -72,33 +75,35 @@ jobs:
run: |
go get -v -t -d ./...
- name: Setup Kind
uses: engineerd/setup-kind@v0.5.0
with:
version: ${{ env.KIND_VERSION }}
skipClusterCreation: true
- name: Setup Kind Cluster
- name: Tear down K3d if exist
run: |
kind delete cluster
kind create cluster --image kindest/node:${{ matrix.k8s-version }}
kubectl version
kubectl cluster-info
k3d cluster delete || true
k3d cluster delete worker || true
- name: Load Image to kind cluster
run: make kind-load
- name: Calculate K3d args
run: |
EGRESS_ARG=""
if [[ "${{ matrix.k8s-version }}" == v1.24 ]]; then
EGRESS_ARG="--k3s-arg --egress-selector-mode=disabled@server:0"
fi
echo "EGRESS_ARG=${EGRESS_ARG}" >> $GITHUB_ENV
- name: Run Make
run: make
- name: Setup K3d
uses: nolar/setup-k3d-k3s@v1.0.8
with:
version: ${{ matrix.k8s-version }}
github-token: ${{ secrets.GITHUB_TOKEN }}
k3d-args: ${{ env.EGRESS_ARG }}
- name: Run Make Manager
run: make manager
- name: Load image to k3d cluster
run: make image-load image-load-runtime-cluster
- name: Prepare for e2e tests
run: |
make vela-cli
make e2e-cleanup
make e2e-setup
helm lint ./charts/vela-core
make e2e-setup-core
make setup-runtime-e2e-cluster
helm test -n vela-system kubevela --timeout 5m
- name: Run e2e tests

View File

@@ -17,9 +17,8 @@ env:
# Common versions
GO_VERSION: '1.17'
GOLANGCI_VERSION: 'v1.38'
KIND_VERSION: 'v0.7.0'
KIND_IMAGE_VERSION: '[\"v1.20.7\"]'
KIND_IMAGE_VERSIONS: '[\"v1.18.20\",\"v1.20.7\",\"v1.22.7\"]'
K3D_IMAGE_VERSION: '[\"v1.20\",\"v1.24\"]'
K3D_IMAGE_VERSIONS: '[\"v1.20\",\"v1.24\"]'
jobs:
@@ -46,9 +45,9 @@ jobs:
run: |
if [[ "${{ github.ref }}" == refs/tags/v* ]]; then
echo "pushing tag: ${{ github.ref_name }}"
echo "::set-output name=matrix::${{ env.KIND_IMAGE_VERSIONS }}"
echo "::set-output name=matrix::${{ env.K3D_IMAGE_VERSIONS }}"
else
echo "::set-output name=matrix::${{ env.KIND_IMAGE_VERSION }}"
echo "::set-output name=matrix::${{ env.K3D_IMAGE_VERSION }}"
fi
e2e-tests:
@@ -58,6 +57,10 @@ jobs:
strategy:
matrix:
k8s-version: ${{ fromJson(needs.set-k8s-matrix.outputs.matrix) }}
concurrency:
group: ${{ github.workflow }}-${{ github.ref }}-${{ matrix.k8s-version }}
cancel-in-progress: true
steps:
- name: Check out code into the Go module directory
@@ -72,33 +75,36 @@ jobs:
run: |
go get -v -t -d ./...
- name: Setup Kind
uses: engineerd/setup-kind@v0.5.0
with:
version: ${{ env.KIND_VERSION }}
skipClusterCreation: true
- name: Setup Kind Cluster
- name: Tear down K3d if exist
run: |
kind delete cluster
kind create cluster --image kindest/node:${{ matrix.k8s-version }}
kubectl version
kubectl cluster-info
k3d cluster delete || true
k3d cluster delete worker || true
- name: Load Image to kind cluster
run: make kind-load
- name: Calculate K3d args
run: |
EGRESS_ARG=""
if [[ "${{ matrix.k8s-version }}" == v1.24 ]]; then
EGRESS_ARG="--k3s-arg --egress-selector-mode=disabled@server:0"
fi
echo "EGRESS_ARG=${EGRESS_ARG}" >> $GITHUB_ENV
- name: Setup K3d
uses: nolar/setup-k3d-k3s@v1.0.8
with:
version: ${{ matrix.k8s-version }}
github-token: ${{ secrets.GITHUB_TOKEN }}
k3d-args: ${{ env.EGRESS_ARG }}
- name: Load image to k3d cluster
run: make image-load
- name: Run Make
run: make
- name: Run Make Manager
run: make manager
- name: Prepare for e2e tests
run: |
make e2e-cleanup
make e2e-setup
helm lint ./charts/vela-core
make e2e-setup-core
helm test -n vela-system kubevela --timeout 5m
- name: Run api e2e tests

View File

@@ -15,7 +15,6 @@ env:
# Common versions
GO_VERSION: '1.17'
GOLANGCI_VERSION: 'v1.38'
KIND_VERSION: 'v0.7.0'
jobs:

View File

@@ -15,7 +15,6 @@ env:
# Common versions
GO_VERSION: '1.17'
GOLANGCI_VERSION: 'v1.38'
KIND_VERSION: 'v0.7.0'
jobs:
@@ -61,10 +60,11 @@ jobs:
run: |
sudo apt-get install -y golang-ginkgo-dev
- name: Setup Kind Cluster
uses: engineerd/setup-kind@v0.5.0
- name: Setup K3d
uses: nolar/setup-k3d-k3s@v1.0.8
with:
version: ${{ env.KIND_VERSION }}
version: v1.20
github-token: ${{ secrets.GITHUB_TOKEN }}
- name: install Kubebuilder
uses: RyanSiu1995/kubebuilder-action@v1.2

View File

@@ -82,17 +82,17 @@ endif
# load docker image to the kind cluster
kind-load: kind-load-runtime-cluster
# load docker image to the k3d cluster
image-load:
docker build -t $(VELA_CORE_TEST_IMAGE) -f Dockerfile.e2e .
kind load docker-image $(VELA_CORE_TEST_IMAGE) || { echo >&2 "kind not installed or error loading image: $(VELA_CORE_TEST_IMAGE)"; exit 1; }
k3d image import $(VELA_CORE_TEST_IMAGE) || { echo >&2 "kind not installed or error loading image: $(VELA_CORE_TEST_IMAGE)"; exit 1; }
kind-load-runtime-cluster:
image-load-runtime-cluster:
/bin/sh hack/e2e/build_runtime_rollout.sh
docker build -t $(VELA_RUNTIME_ROLLOUT_TEST_IMAGE) -f runtime/rollout/e2e/Dockerfile.e2e runtime/rollout/e2e/
rm -rf runtime/rollout/e2e/tmp
kind load docker-image $(VELA_RUNTIME_ROLLOUT_TEST_IMAGE) || { echo >&2 "kind not installed or error loading image: $(VELA_RUNTIME_ROLLOUT_TEST_IMAGE)"; exit 1; }
kind load docker-image $(VELA_RUNTIME_ROLLOUT_TEST_IMAGE) --name=$(RUNTIME_CLUSTER_NAME) || { echo >&2 "kind not installed or error loading image: $(VELA_RUNTIME_ROLLOUT_TEST_IMAGE)"; exit 1; }
k3d image import $(VELA_RUNTIME_ROLLOUT_TEST_IMAGE) || { echo >&2 "kind not installed or error loading image: $(VELA_RUNTIME_ROLLOUT_TEST_IMAGE)"; exit 1; }
k3d cluster get $(RUNTIME_CLUSTER_NAME) && k3d image import $(VELA_RUNTIME_ROLLOUT_TEST_IMAGE) --cluster=$(RUNTIME_CLUSTER_NAME) || echo "no worker cluster"
# Run tests
core-test: fmt vet manifests

View File

@@ -13,7 +13,7 @@ spec:
template: |
parameter: {
// +usage=Specify the names of the clusters to select.
cluster?: [...string]
clusters?: [...string]
// +usage=Specify the label selector for clusters
clusterLabelSelector?: [string]: string
// +usage=Deprecated: Use clusterLabelSelector instead.

View File

@@ -306,6 +306,9 @@ spec:
if v.name == _|_ {
name: "port-" + strconv.FormatInt(v.port, 10)
}
if v.nodePort != _|_ && parameter.exposeType == "NodePort" {
nodePort: v.nodePort
}
},
]
outputs: {
@@ -354,6 +357,8 @@ spec:
protocol: *"TCP" | "UDP" | "SCTP"
// +usage=Specify if the port should be exposed
expose: *false | bool
// +usage=exposed node port. Only Valid when exposeType is NodePort
nodePort?: int
}]
// +ignore

View File

@@ -33,7 +33,7 @@ kind: ClusterRole
metadata:
name: {{ include "kubevela.fullname" . }}:manager
rules:
- apiGroups: ["core.oam.dev", "terraform.core.oam.dev", "prism.oam.dev"]
- apiGroups: ["core.oam.dev", "terraform.core.oam.dev", "prism.oam.dev", "standard.oam.dev"]
resources: ["*"]
verbs: ["*"]
- apiGroups: ["cluster.open-cluster-management.io"]

View File

@@ -13,12 +13,6 @@ spec:
properties:
image: {{ .Values.imageRegistry }}{{ .Values.test.app.repository }}:{{ .Values.test.app.tag }}
port: 8000
traits:
- type: ingress
properties:
domain: testsvc.example.com
http:
"/": 8000
---
apiVersion: v1
kind: Pod
@@ -52,13 +46,5 @@ spec:
kubectl -n {{ include "systemDefinitionNamespace" . }} wait --for=condition=available deployments helm-test-express-server --timeout 3m
echo "deployment being available"
# wait for ingress being created
while ! [ `kubectl -n {{ include "systemDefinitionNamespace" . }} get ing helm-test-express-server | grep -v NAME | wc -l` = 1 ]; do
echo "waiting for ingress being created"
sleep 1
done
echo "Application and its components are created"
restartPolicy: Never

View File

@@ -13,7 +13,7 @@ spec:
template: |
parameter: {
// +usage=Specify the names of the clusters to select.
cluster?: [...string]
clusters?: [...string]
// +usage=Specify the label selector for clusters
clusterLabelSelector?: [string]: string
// +usage=Deprecated: Use clusterLabelSelector instead.

View File

@@ -306,6 +306,9 @@ spec:
if v.name == _|_ {
name: "port-" + strconv.FormatInt(v.port, 10)
}
if v.nodePort != _|_ && parameter.exposeType == "NodePort" {
nodePort: v.nodePort
}
},
]
outputs: {
@@ -354,6 +357,8 @@ spec:
protocol: *"TCP" | "UDP" | "SCTP"
// +usage=Specify if the port should be exposed
expose: *false | bool
// +usage=exposed node port. Only Valid when exposeType is NodePort
nodePort?: int
}]
// +ignore

View File

@@ -36,7 +36,7 @@ kind: ClusterRole
metadata:
name: {{ include "kubevela.fullname" . }}:manager
rules:
- apiGroups: ["core.oam.dev", "terraform.core.oam.dev", "prism.oam.dev"]
- apiGroups: ["core.oam.dev", "terraform.core.oam.dev", "prism.oam.dev", "standard.oam.dev"]
resources: ["*"]
verbs: ["*"]
- apiGroups: ["cluster.open-cluster-management.io"]

View File

@@ -35,6 +35,7 @@ import (
"github.com/oam-dev/kubevela/pkg/apiserver/config"
"github.com/oam-dev/kubevela/pkg/apiserver/utils/log"
"github.com/oam-dev/kubevela/pkg/features"
"github.com/oam-dev/kubevela/pkg/utils"
"github.com/oam-dev/kubevela/version"
)
@@ -50,6 +51,7 @@ func main() {
flag.DurationVar(&s.serverConfig.LeaderConfig.Duration, "duration", time.Second*5, "the lease lock resource name")
flag.DurationVar(&s.serverConfig.AddonCacheTime, "addon-cache-duration", time.Minute*10, "how long between two addon cache operation")
flag.BoolVar(&s.serverConfig.DisableStatisticCronJob, "disable-statistic-cronJob", false, "close the system statistic info calculating cronJob")
flag.StringVar(&s.serverConfig.PprofAddr, "pprof-addr", "", "The address for pprof to use while exporting profiling results. The default value is empty which means do not expose it. Set it to address like :6666 to expose it.")
flag.Float64Var(&s.serverConfig.KubeQPS, "kube-api-qps", 100, "the qps for kube clients. Low qps may lead to low throughput. High qps may give stress to api-server.")
flag.IntVar(&s.serverConfig.KubeBurst, "kube-api-burst", 300, "the burst for kube clients. Recommend setting it qps*3.")
features.APIServerMutableFeatureGate.AddFlag(flag.CommandLine)
@@ -90,6 +92,11 @@ func main() {
errChan := make(chan error)
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
if s.serverConfig.PprofAddr != "" {
go utils.EnablePprof(s.serverConfig.PprofAddr, errChan)
}
go func() {
if err := s.run(ctx, errChan); err != nil {
errChan <- fmt.Errorf("failed to run apiserver: %w", err)

View File

@@ -22,8 +22,6 @@ import (
goflag "flag"
"fmt"
"io"
"net/http"
"net/http/pprof"
"os"
"path/filepath"
"strconv"
@@ -53,6 +51,7 @@ import (
"github.com/oam-dev/kubevela/pkg/oam"
"github.com/oam-dev/kubevela/pkg/oam/discoverymapper"
"github.com/oam-dev/kubevela/pkg/resourcekeeper"
pkgutils "github.com/oam-dev/kubevela/pkg/utils"
"github.com/oam-dev/kubevela/pkg/utils/common"
"github.com/oam-dev/kubevela/pkg/utils/system"
"github.com/oam-dev/kubevela/pkg/utils/util"
@@ -159,36 +158,7 @@ func main() {
if pprofAddr != "" {
// Start pprof server if enabled
mux := http.NewServeMux()
mux.HandleFunc("/debug/pprof/", pprof.Index)
mux.HandleFunc("/debug/pprof/cmdline", pprof.Cmdline)
mux.HandleFunc("/debug/pprof/profile", pprof.Profile)
mux.HandleFunc("/debug/pprof/symbol", pprof.Symbol)
mux.HandleFunc("/debug/pprof/trace", pprof.Trace)
pprofServer := http.Server{
Addr: pprofAddr,
Handler: mux,
}
klog.InfoS("Starting debug HTTP server", "addr", pprofServer.Addr)
go func() {
go func() {
ctx := context.Background()
<-ctx.Done()
ctx, cancelFunc := context.WithTimeout(context.Background(), 60*time.Minute)
defer cancelFunc()
if err := pprofServer.Shutdown(ctx); err != nil {
klog.Error(err, "Failed to shutdown debug HTTP server")
}
}()
if err := pprofServer.ListenAndServe(); !errors.Is(http.ErrServerClosed, err) {
klog.Error(err, "Failed to start debug HTTP server")
panic(err)
}
}()
go pkgutils.EnablePprof(pprofAddr, nil)
}
if logFilePath != "" {

View File

@@ -294,7 +294,7 @@ var VelaQLPodListContext = func(context string, velaQL string) bool {
gomega.Expect(v.Status.Phase).To(gomega.ContainSubstring("Running"))
}
if v.Status.NodeName != "" {
gomega.Expect(v.Status.NodeName).To(gomega.ContainSubstring("kind-control-plane"))
gomega.Expect(v.Status.NodeName).To(gomega.ContainSubstring("k3d-k3s-default-server-0"))
}
if v.Metadata.Namespace != "" {
gomega.Expect(v.Metadata.Namespace).To(gomega.ContainSubstring("default"))

View File

@@ -186,4 +186,14 @@ var (
})
})
}
ShowCapabilityReferenceMarkdown = func(context string, capabilityName string) bool {
return ginkgo.Context(context, func() {
ginkgo.It("should show capability reference in markdown", func() {
cli := fmt.Sprintf("vela show %s --format=markdown", capabilityName)
_, err := Exec(cli)
gomega.Expect(err).Should(gomega.BeNil())
})
})
}
)

View File

@@ -1,35 +0,0 @@
/*
Copyright 2021 The KubeVela Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package e2e
import (
"testing"
"github.com/oam-dev/kubevela/e2e"
"github.com/onsi/ginkgo"
"github.com/onsi/gomega"
)
var _ = ginkgo.BeforeSuite(func() {
e2e.BeforeSuit()
}, 30)
func TestApplication(t *testing.T) {
gomega.RegisterFailHandler(ginkgo.Fail)
ginkgo.RunSpecs(t, "Setup Suite")
}

View File

@@ -1,25 +0,0 @@
/*
Copyright 2021 The KubeVela Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package e2e
import (
"github.com/onsi/ginkgo"
)
var _ = ginkgo.Describe("Setup", func() {
})

View File

@@ -28,6 +28,7 @@ var _ = ginkgo.Describe("Trait", func() {
var _ = ginkgo.Describe("Test vela show", func() {
e2e.ShowCapabilityReference("show ingress", "ingress")
e2e.ShowCapabilityReferenceMarkdown("show ingress markdown", "ingress")
env := "namespace-xxxfwrr23erfm"
e2e.EnvInitWithNamespaceOptionContext("env init", env, env)

View File

@@ -37,6 +37,7 @@ var _ = Describe("Workload", func() {
var _ = Describe("Test vela show", func() {
e2e.ShowCapabilityReference("show webservice", "webservice")
e2e.ShowCapabilityReferenceMarkdown("show webservice markdown", "webservice")
env := "namespace-xxxfwrr23erfm"
e2e.EnvInitWithNamespaceOptionContext("env init", env, env)

4
go.mod
View File

@@ -118,7 +118,7 @@ require (
github.com/kevinburke/ssh_config v0.0.0-20201106050909-4977a11b4351 // indirect
github.com/openkruise/rollouts v0.1.1-0.20220622054609-149e5a48da5e
github.com/xanzy/ssh-agent v0.3.0 // indirect
golang.org/x/net v0.0.0-20220624214902-1bab6f366d9e // indirect
golang.org/x/net v0.0.0-20220906165146-f3363e06e74c // indirect
golang.org/x/time v0.0.0-20220224211638-0e9765cccd65 // indirect
google.golang.org/protobuf v1.28.0 // indirect
sigs.k8s.io/gateway-api v0.4.3
@@ -296,7 +296,7 @@ require (
go.uber.org/multierr v1.6.0 // indirect
golang.org/x/mod v0.6.0-dev.0.20220106191415-9b9b3d81d5e3 // indirect
golang.org/x/sync v0.0.0-20220601150217-0de741cfad7f // indirect
golang.org/x/sys v0.0.0-20220624220833-87e55d714810 // indirect
golang.org/x/sys v0.0.0-20220728004956-3c1f35247d10 // indirect
golang.org/x/text v0.3.7 // indirect
golang.org/x/xerrors v0.0.0-20220609144429-65e65417b02f // indirect
google.golang.org/appengine v1.6.7 // indirect

8
go.sum
View File

@@ -2410,8 +2410,8 @@ golang.org/x/net v0.0.0-20220412020605-290c469a71a5/go.mod h1:CfG3xpIq0wQ8r1q4Su
golang.org/x/net v0.0.0-20220425223048-2871e0cb64e4/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
golang.org/x/net v0.0.0-20220516155154-20f960328961/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
golang.org/x/net v0.0.0-20220607020251-c690dde0001d/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
golang.org/x/net v0.0.0-20220624214902-1bab6f366d9e h1:TsQ7F31D3bUCLeqPT0u+yjp1guoArKaNKmCr22PYgTQ=
golang.org/x/net v0.0.0-20220624214902-1bab6f366d9e/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
golang.org/x/net v0.0.0-20220906165146-f3363e06e74c h1:yKufUcDwucU5urd+50/Opbt4AYpqthk7wHpHok8f1lo=
golang.org/x/net v0.0.0-20220906165146-f3363e06e74c/go.mod h1:YDH+HFinaLZZlnHAfSS6ZXJJ9M9t4Dl22yv3iI2vPwk=
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
golang.org/x/oauth2 v0.0.0-20181106182150-f42d05182288/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
@@ -2608,8 +2608,8 @@ golang.org/x/sys v0.0.0-20220503163025-988cb79eb6c6/go.mod h1:oPkhp1MJrh7nUepCBc
golang.org/x/sys v0.0.0-20220513210249-45d2b4557a2a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220610221304-9f5ed59c137d/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220624220833-87e55d714810 h1:rHZQSjJdAI4Xf5Qzeh2bBc5YJIkPFVM6oDtMFYmgws0=
golang.org/x/sys v0.0.0-20220624220833-87e55d714810/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220728004956-3c1f35247d10 h1:WIoqL4EROvwiPdUtaip4VcDdpZ4kha7wBWZrbVKCIZg=
golang.org/x/sys v0.0.0-20220728004956-3c1f35247d10/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
golang.org/x/term v0.0.0-20210220032956-6a3ed077a48d/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=

View File

@@ -1,20 +1,20 @@
. ./hack/e2e/end_e2e_core.sh
OAM_CONTAINER_ID=$(docker exec kind-control-plane crictl ps | grep oam-runtime | grep --regexp '^.............' -o)
OAM_DOCKER_DIR=$(docker exec kind-control-plane crictl inspect --output go-template --template '{{range .info.runtimeSpec.mounts}}{{if (eq .destination "/workspace/data")}}{{.source}}{{end}}{{end}}' "${OAM_CONTAINER_ID}")
OAM_CONTAINER_ID=$(docker exec k3d-k3s-default-server-0 crictl ps | grep oam-runtime | grep --regexp '^.............' -o)
OAM_DOCKER_DIR=$(docker exec k3d-k3s-default-server-0 crictl inspect --output go-template --template '{{range .info.runtimeSpec.mounts}}{{if (eq .destination "/workspace/data")}}{{.source}}{{end}}{{end}}' "${OAM_CONTAINER_ID}")
echo "${OAM_CONTAINER_ID}"
echo "${OAM_DOCKER_DIR}"
docker exec kind-control-plane crictl exec "${OAM_CONTAINER_ID}" kill -2 1
docker exec k3d-k3s-default-server-0 crictl exec "${OAM_CONTAINER_ID}" kill -2 1
file=$OAM_DOCKER_DIR/e2e-profile.out
echo "$file"
n=1
while [ $n -le 60 ];do
if_exist=$(docker exec kind-control-plane sh -c "test -f $file && echo 'ok'")
if_exist=$(docker exec k3d-k3s-default-server-0 sh -c "test -f $file && echo 'ok'")
echo "$if_exist"
if [ -n "$if_exist" ];then
docker exec kind-control-plane cat "$file" > /tmp/oam-e2e-profile.out
docker exec k3d-k3s-default-server-0 cat "$file" > /tmp/oam-e2e-profile.out
break
fi
echo file not generated yet

View File

@@ -1,18 +1,18 @@
CONTAINER_ID=$(docker exec kind-control-plane crictl ps | grep 'kubevela\s' | grep --regexp '^.............' -o)
DOCKER_DIR=$(docker exec kind-control-plane crictl inspect --output go-template --template '{{range .info.runtimeSpec.mounts}}{{if (eq .destination "/workspace/data")}}{{.source}}{{end}}{{end}}' "${CONTAINER_ID}")
CONTAINER_ID=$(docker exec k3d-k3s-default-server-0 crictl ps | grep 'kubevela\s' | grep --regexp '^.............' -o)
DOCKER_DIR=$(docker exec k3d-k3s-default-server-0 crictl inspect --output go-template --template '{{range .info.runtimeSpec.mounts}}{{if (eq .destination "/workspace/data")}}{{.source}}{{end}}{{end}}' "${CONTAINER_ID}")
echo "${CONTAINER_ID}"
echo "${DOCKER_DIR}"
docker exec kind-control-plane crictl exec "${CONTAINER_ID}" kill -2 1
docker exec k3d-k3s-default-server-0 crictl exec "${CONTAINER_ID}" kill -2 1
file=$DOCKER_DIR/e2e-profile.out
echo "$file"
n=1
while [ $n -le 60 ];do
if_exist=$(docker exec kind-control-plane sh -c "test -f $file && echo 'ok'")
if_exist=$(docker exec k3d-k3s-default-server-0 sh -c "test -f $file && echo 'ok'")
echo "$if_exist"
if [ -n "$if_exist" ];then
docker exec kind-control-plane cat "$file" > /tmp/e2e-profile.out
docker exec k3d-k3s-default-server-0 cat "$file" > /tmp/e2e-profile.out
break
fi
echo file not generated yet

View File

@@ -9,9 +9,9 @@
template: {
patch: spec: template: spec: {
tolerations: [{
key: "node.kubernetes.io/network-unavailable"
key: "node.kubernetes.io/network-unavailable"
operator: "Exists"
effect: "NoSchedule"
effect: "NoSchedule"
}]
}
parameter: {}

View File

@@ -5,11 +5,18 @@ e2e-setup-core-pre-hook:
.PHONY: e2e-setup-core-post-hook
e2e-setup-core-post-hook:
kubectl wait --for=condition=Available deployment/kubevela-vela-core -n vela-system --timeout=180s
helm upgrade --install --namespace vela-system --wait oam-rollout --set image.repository=vela-runtime-rollout-test --set image.tag=$(GIT_COMMIT) ./runtime/rollout/charts
helm install kruise https://github.com/openkruise/charts/releases/download/kruise-1.1.0/kruise-1.1.0.tgz --set featureGates="PreDownloadImageForInPlaceUpdate=true" --set daemon.socketLocation=/run/k3s/containerd/
go run ./e2e/addon/mock &
sleep 15
bin/vela addon enable ./e2e/addon/mock/testdata/fluxcd
bin/vela addon enable ./e2e/addon/mock/testdata/rollout
bin/vela addon enable ./e2e/addon/mock/testdata/terraform
bin/vela addon enable ./e2e/addon/mock/testdata/terraform-alibaba ALICLOUD_ACCESS_KEY=xxx ALICLOUD_SECRET_KEY=yyy ALICLOUD_REGION=cn-beijing
timeout 600s bash -c -- 'while true; do kubectl get ns flux-system; if [ $$? -eq 0 ] ; then break; else sleep 5; fi;done'
kubectl wait --for=condition=Ready pod -l app.kubernetes.io/name=vela-core,app.kubernetes.io/instance=kubevela -n vela-system --timeout=600s
kubectl wait --for=condition=Ready pod -l app=source-controller -n flux-system --timeout=600s
kubectl wait --for=condition=Ready pod -l app=helm-controller -n flux-system --timeout=600s
.PHONY: e2e-setup-core-wo-auth
e2e-setup-core-wo-auth:
@@ -27,28 +34,26 @@ e2e-setup-core-auth: e2e-setup-core-pre-hook e2e-setup-core-w-auth e2e-setup-cor
.PHONY: setup-runtime-e2e-cluster
setup-runtime-e2e-cluster:
helm upgrade --install --create-namespace --namespace vela-system --kubeconfig=$(RUNTIME_CLUSTER_CONFIG) --set image.pullPolicy=IfNotPresent --set image.repository=vela-runtime-rollout-test --set image.tag=$(GIT_COMMIT) --wait vela-rollout ./runtime/rollout/charts
helm upgrade --install \
--namespace vela-system \
--wait oam-rollout \
--set image.repository=vela-runtime-rollout-test \
--set image.tag=$(GIT_COMMIT) \
./runtime/rollout/charts
.PHONY: e2e-setup
e2e-setup:
helm install kruise https://github.com/openkruise/charts/releases/download/kruise-1.1.0/kruise-1.1.0.tgz --set featureGates="PreDownloadImageForInPlaceUpdate=true"
sh ./hack/e2e/modify_charts.sh
helm upgrade --install --create-namespace --namespace vela-system --set image.pullPolicy=IfNotPresent --set image.repository=vela-core-test --set applicationRevisionLimit=5 --set dependCheckWait=10s --set image.tag=$(GIT_COMMIT) --wait kubevela ./charts/vela-core
helm upgrade --install --namespace vela-system --wait oam-rollout --set image.repository=vela-runtime-rollout-test --set image.tag=$(GIT_COMMIT) ./runtime/rollout/charts
k3d cluster get $(RUNTIME_CLUSTER_NAME) && \
helm upgrade --install \
--create-namespace \
--namespace vela-system \
--kubeconfig=$(RUNTIME_CLUSTER_CONFIG) \
--set image.pullPolicy=IfNotPresent \
--set image.repository=vela-runtime-rollout-test \
--set image.tag=$(GIT_COMMIT) \
--wait vela-rollout \
./runtime/rollout/charts || \
echo "no worker cluster" \
go run ./e2e/addon/mock &
sleep 15
bin/vela addon enable ./e2e/addon/mock/testdata/fluxcd
bin/vela addon enable ./e2e/addon/mock/testdata/terraform
bin/vela addon enable ./e2e/addon/mock/testdata/terraform-alibaba ALICLOUD_ACCESS_KEY=xxx ALICLOUD_SECRET_KEY=yyy ALICLOUD_REGION=cn-beijing
bin/vela addon enable ./e2e/addon/mock/testdata/rollout
ginkgo version
ginkgo -v -r e2e/setup
timeout 600s bash -c -- 'while true; do kubectl get ns flux-system; if [ $$? -eq 0 ] ; then break; else sleep 5; fi;done'
kubectl wait --for=condition=Ready pod -l app.kubernetes.io/name=vela-core,app.kubernetes.io/instance=kubevela -n vela-system --timeout=600s
kubectl wait --for=condition=Ready pod -l app=source-controller -n flux-system --timeout=600s
kubectl wait --for=condition=Ready pod -l app=helm-controller -n flux-system --timeout=600s
.PHONY: e2e-api-test
e2e-api-test:

View File

@@ -418,9 +418,12 @@ var _ = Describe("test override defs of addon", func() {
u := unstructured.Unstructured{Object: compUnstructured}
u.SetAPIVersion(v1beta1.SchemeGroupVersion.String())
u.SetKind(v1beta1.ComponentDefinitionKind)
u.SetLabels(map[string]string{"testUpdateLabel": "test"})
c, err := checkConflictDefs(ctx, k8sClient, []*unstructured.Unstructured{&u}, app.GetName())
Expect(err).Should(BeNil())
Expect(len(c)).Should(BeEquivalentTo(1))
// guarantee checkConflictDefs won't change source definition
Expect(u.GetLabels()["testUpdateLabel"]).Should(BeEquivalentTo("test"))
u.SetName("rollout")
c, err = checkConflictDefs(ctx, k8sClient, []*unstructured.Unstructured{&u}, app.GetName())

View File

@@ -448,20 +448,21 @@ func isErrorCueRenderPathNotFound(err error, path string) bool {
func checkConflictDefs(ctx context.Context, k8sClient client.Client, defs []*unstructured.Unstructured, appName string) (map[string]string, error) {
res := map[string]string{}
for _, def := range defs {
err := k8sClient.Get(ctx, client.ObjectKeyFromObject(def), def)
checkDef := def.DeepCopy()
err := k8sClient.Get(ctx, client.ObjectKeyFromObject(checkDef), checkDef)
if err == nil {
owner := metav1.GetControllerOf(def)
owner := metav1.GetControllerOf(checkDef)
if owner == nil || owner.Kind != v1beta1.ApplicationKind {
res[def.GetName()] = fmt.Sprintf("definition: %s already exist and not belong to any addon \n", def.GetName())
res[checkDef.GetName()] = fmt.Sprintf("definition: %s already exist and not belong to any addon \n", checkDef.GetName())
continue
}
if owner.Name != appName {
// if addon not belong to an addon or addon name is another one, we should put them in result
res[def.GetName()] = fmt.Sprintf("definition: %s in this addon already exist in %s \n", def.GetName(), addon.AppName2Addon(appName))
res[checkDef.GetName()] = fmt.Sprintf("definition: %s in this addon already exist in %s \n", checkDef.GetName(), addon.AppName2Addon(appName))
}
}
if err != nil && !errors2.IsNotFound(err) {
return nil, errors.Wrapf(err, "check definition %s", def.GetName())
return nil, errors.Wrapf(err, "check definition %s", checkDef.GetName())
}
}
return res, nil

View File

@@ -46,6 +46,9 @@ type Config struct {
// KubeQPS the QPS of kube client
KubeQPS float64
// PprofAddr the address for pprof to use while exporting profiling results.
PprofAddr string
}
type leaderConfig struct {

View File

@@ -150,7 +150,7 @@ func (w *WorkflowRecord) Index() map[string]string {
index["namespace"] = w.Namespace
}
if w.WorkflowName != "" {
index["workflowPrimaryKey"] = w.WorkflowName
index["workflowName"] = w.WorkflowName
}
if w.AppPrimaryKey != "" {
index["appPrimaryKey"] = w.AppPrimaryKey

View File

@@ -121,23 +121,22 @@ func (w *workflowServiceImpl) DeleteWorkflowByApp(ctx context.Context, app *mode
}
for i := range workflows {
workflow := workflows[i].(*model.Workflow)
var record = model.WorkflowRecord{
AppPrimaryKey: workflow.AppPrimaryKey,
WorkflowName: workflow.Name,
}
records, err := w.Store.List(ctx, &record, &datastore.ListOptions{})
if err != nil {
log.Logger.Errorf("list workflow %s record failure %s", workflow.PrimaryKey(), err.Error())
}
for _, record := range records {
if err := w.Store.Delete(ctx, record); err != nil {
log.Logger.Errorf("delete workflow record %s failure %s", record.PrimaryKey(), err.Error())
}
}
if err := w.Store.Delete(ctx, workflow); err != nil {
log.Logger.Errorf("delete workflow %s failure %s", workflow.PrimaryKey(), err.Error())
}
}
var record = model.WorkflowRecord{
AppPrimaryKey: workflow.AppPrimaryKey,
}
records, err := w.Store.List(ctx, &record, &datastore.ListOptions{})
if err != nil {
log.Logger.Errorf("list workflow %s record failure %s", workflow.PrimaryKey(), err.Error())
}
for _, record := range records {
if err := w.Store.Delete(ctx, record); err != nil {
log.Logger.Errorf("delete workflow record %s failure %s", record.PrimaryKey(), err.Error())
}
}
return nil
}
@@ -368,6 +367,10 @@ func (w *workflowServiceImpl) SyncWorkflowRecord(ctx context.Context) error {
continue
}
if app.Status.Workflow == nil {
continue
}
// there is a ":" in the default app revision
recordName := strings.Replace(app.Status.Workflow.AppRevision, ":", "-", 1)

View File

@@ -34,6 +34,7 @@ import (
"github.com/oam-dev/kubevela/apis/core.oam.dev/v1beta1"
"github.com/oam-dev/kubevela/pkg/apiserver/domain/model"
"github.com/oam-dev/kubevela/pkg/apiserver/infrastructure/datastore"
"github.com/oam-dev/kubevela/pkg/apiserver/infrastructure/datastore/mongodb"
apisv1 "github.com/oam-dev/kubevela/pkg/apiserver/interfaces/api/dto/v1"
"github.com/oam-dev/kubevela/pkg/oam"
"github.com/oam-dev/kubevela/pkg/utils/apply"
@@ -563,6 +564,55 @@ var _ = Describe("Test workflow service functions", func() {
Expect(record.Finished).Should(Equal("true"))
Expect(record.Steps[1].Phase).Should(Equal(common.WorkflowStepPhaseStopped))
})
It("Test deleting workflow", func() {
By("Test deleting the workflow from the mongo")
mongodbDriver, err := mongodb.New(context.TODO(), datastore.Config{
URL: "mongodb://localhost:27017",
Database: "kubevela",
})
Expect(err).ToNot(HaveOccurred())
Expect(mongodbDriver).ToNot(BeNil())
Expect(mongodbDriver.BatchAdd(context.Background(), []datastore.Entity{
&model.Workflow{
Name: "workflow-default",
AppPrimaryKey: "war-app",
},
&model.WorkflowRecord{
Name: "workflow-default-20220809081934217",
WorkflowName: "workflow-default",
AppPrimaryKey: "war-app",
RevisionPrimaryKey: "20220809081934216",
},
&model.WorkflowRecord{
WorkflowName: "workflow-default",
AppPrimaryKey: "war-app",
Name: "workflow-default-20220809082525833",
RevisionPrimaryKey: "20220809082525832",
},
})).ToNot(HaveOccurred())
var record = model.WorkflowRecord{
AppPrimaryKey: "war-app",
WorkflowName: "workflow-default",
}
records, err := mongodbDriver.List(context.TODO(), &record, &datastore.ListOptions{})
Expect(err).ToNot(HaveOccurred())
Expect(len(records)).Should(Equal(2))
srv := workflowServiceImpl{
Store: mongodbDriver,
}
Expect(srv.DeleteWorkflowByApp(context.TODO(), &model.Application{Name: "war-app"})).ToNot(HaveOccurred())
wc, err := mongodbDriver.Count(context.TODO(), &model.Workflow{AppPrimaryKey: "war-app"}, nil)
Expect(err).ToNot(HaveOccurred())
Expect(int(wc)).Should(Equal(0))
list, err := mongodbDriver.List(context.TODO(), &model.WorkflowRecord{AppPrimaryKey: "war-app"}, nil)
Expect(err).ToNot(HaveOccurred())
Expect(len(list)).Should(Equal(0))
})
})
var yamlStr = `apiVersion: core.oam.dev/v1beta1

View File

@@ -18,11 +18,12 @@ package sync
import (
"context"
"encoding/json"
"sync"
"github.com/fatih/color"
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/client-go/dynamic"
dynamicInformer "k8s.io/client-go/dynamic/dynamicinformer"
"k8s.io/client-go/rest"
@@ -58,14 +59,17 @@ func (a *ApplicationSync) Start(ctx context.Context, errorChan chan error) {
factory := dynamicInformer.NewFilteredDynamicSharedInformerFactory(dynamicClient, 0, v1.NamespaceAll, nil)
informer := factory.ForResource(v1beta1.SchemeGroupVersion.WithResource("applications")).Informer()
getApp := func(obj interface{}) *v1beta1.Application {
app := &v1beta1.Application{}
bs, err := json.Marshal(obj)
if err != nil {
log.Logger.Errorf("decode the application failure %s", err.Error())
if app, ok := obj.(*v1beta1.Application); ok {
return app
}
_ = json.Unmarshal(bs, app)
return app
var app v1beta1.Application
if object, ok := obj.(*unstructured.Unstructured); ok {
if err := runtime.DefaultUnstructuredConverter.FromUnstructured(object.Object, &app); err != nil {
log.Logger.Errorf("decode the application failure %s", err.Error())
return &app
}
}
return &app
}
cu := &CR2UX{
ds: a.Store,
@@ -89,6 +93,7 @@ func (a *ApplicationSync) Start(ctx context.Context, errorChan chan error) {
if err := cu.AddOrUpdate(ctx, app.(*v1beta1.Application)); err != nil {
log.Logger.Errorf("fail to add or update application %s", err.Error())
}
a.Queue.Done(app)
}
}()

View File

@@ -29,6 +29,7 @@ import (
"time"
"github.com/pkg/errors"
authenticationv1 "k8s.io/api/authentication/v1"
certificatesv1 "k8s.io/api/certificates/v1"
certificatesv1beta1 "k8s.io/api/certificates/v1beta1"
corev1 "k8s.io/api/core/v1"
@@ -46,6 +47,9 @@ import (
"github.com/oam-dev/kubevela/pkg/utils"
)
// DefaultExpireTime is default expire time for both X.509 and SA token apply
const DefaultExpireTime = time.Hour * 24 * 365
// KubeConfigGenerateOptions options for create KubeConfig
type KubeConfigGenerateOptions struct {
X509 *KubeConfigGenerateX509Options
@@ -64,6 +68,7 @@ type KubeConfigGenerateX509Options struct {
type KubeConfigGenerateServiceAccountOptions struct {
ServiceAccountName string
ServiceAccountNamespace string
ExpireTime time.Duration
}
// KubeConfigWithUserGenerateOption option for setting user in KubeConfig
@@ -96,6 +101,7 @@ func (opt KubeConfigWithServiceAccountGenerateOption) ApplyToOptions(options *Ku
options.ServiceAccount = &KubeConfigGenerateServiceAccountOptions{
ServiceAccountName: opt.Name,
ServiceAccountNamespace: opt.Namespace,
ExpireTime: DefaultExpireTime,
}
}
@@ -128,7 +134,7 @@ func newKubeConfigGenerateOptions(options ...KubeConfigGenerateOption) *KubeConf
X509: &KubeConfigGenerateX509Options{
User: user.Anonymous,
Groups: []string{KubeVelaClientGroup},
ExpireTime: time.Hour * 24 * 365,
ExpireTime: DefaultExpireTime,
PrivateKeyBits: 2048,
},
ServiceAccount: nil,
@@ -329,30 +335,53 @@ func generateX509KubeConfigV1Beta(ctx context.Context, cli kubernetes.Interface,
}
func generateServiceAccountKubeConfig(ctx context.Context, cli kubernetes.Interface, cfg *clientcmdapi.Config, writer io.Writer, opts *KubeConfigGenerateServiceAccountOptions) (*clientcmdapi.Config, error) {
var (
token string
CA []byte
)
sa, err := cli.CoreV1().ServiceAccounts(opts.ServiceAccountNamespace).Get(ctx, opts.ServiceAccountName, metav1.GetOptions{})
if err != nil {
return nil, err
}
_, _ = fmt.Fprintf(writer, "ServiceAccount %s/%s found.\n", opts.ServiceAccountNamespace, opts.ServiceAccountName)
if len(sa.Secrets) == 0 {
return nil, errors.Errorf("no secret found in serviceaccount %s/%s", opts.ServiceAccountNamespace, opts.ServiceAccountName)
_, _ = fmt.Fprintf(writer, "ServiceAccount %s/%s has no secret. Requesting token", opts.ServiceAccountNamespace, opts.ServiceAccountName)
request := authenticationv1.TokenRequest{
Spec: authenticationv1.TokenRequestSpec{
Audiences: []string{},
ExpirationSeconds: pointer.Int64(int64(opts.ExpireTime.Seconds())),
},
}
tokenRequest, err := cli.CoreV1().ServiceAccounts(opts.ServiceAccountNamespace).CreateToken(ctx, opts.ServiceAccountName, &request, metav1.CreateOptions{})
if err != nil {
return nil, errors.Wrap(err, "failed to request token")
}
token = tokenRequest.Status.Token
CAConfigMap, err := cli.CoreV1().ConfigMaps(sa.Namespace).Get(ctx, "kube-root-ca.crt", metav1.GetOptions{})
if err != nil {
return nil, errors.Wrap(err, "failed to get root CA secret")
}
CA = []byte(CAConfigMap.Data["ca.crt"])
} else {
secretKey := sa.Secrets[0]
if secretKey.Namespace == "" {
secretKey.Namespace = sa.Namespace
}
secret, err := cli.CoreV1().Secrets(secretKey.Namespace).Get(ctx, secretKey.Name, metav1.GetOptions{})
if err != nil {
return nil, err
}
_, _ = fmt.Fprintf(writer, "ServiceAccount secret %s/%s found.\n", secretKey.Namespace, secret.Name)
if len(secret.Data["token"]) == 0 {
return nil, errors.Errorf("no token found in secret %s/%s", secret.Namespace, secret.Name)
}
_, _ = fmt.Fprintf(writer, "ServiceAccount token found.\n")
token = string(secret.Data["token"])
CA = secret.Data["ca.crt"]
}
secretKey := sa.Secrets[0]
if secretKey.Namespace == "" {
secretKey.Namespace = sa.Namespace
}
secret, err := cli.CoreV1().Secrets(secretKey.Namespace).Get(ctx, secretKey.Name, metav1.GetOptions{})
if err != nil {
return nil, err
}
_, _ = fmt.Fprintf(writer, "ServiceAccount secret %s/%s found.\n", secretKey.Namespace, secret.Name)
if len(secret.Data["token"]) == 0 {
return nil, errors.Errorf("no token found in secret %s/%s", secret.Namespace, secret.Name)
}
_, _ = fmt.Fprintf(writer, "ServiceAccount token found.\n")
return genKubeConfig(cfg, &clientcmdapi.AuthInfo{
Token: string(secret.Data["token"]),
}, secret.Data["ca.crt"])
Token: token,
}, CA)
}
// ReadIdentityFromKubeConfig extract identity from kubeconfig

View File

@@ -1612,6 +1612,7 @@ var _ = Describe("Test Application Controller", func() {
Expect(err).Should(BeNil())
rolloutTrait := &v1beta1.TraitDefinition{}
Expect(json.Unmarshal([]byte(rolloutTdDef), rolloutTrait)).Should(BeNil())
rolloutTrait.Spec.SkipRevisionAffect = false
ns := corev1.Namespace{
ObjectMeta: metav1.ObjectMeta{
Name: "app-with-rollout-trait",
@@ -1673,15 +1674,13 @@ var _ = Describe("Test Application Controller", func() {
deploy = &v1.Deployment{}
Expect(k8sClient.Get(ctx, types.NamespacedName{Name: "myweb1", Namespace: ns.Name}, deploy)).Should(util.NotFoundMatcher{})
By("check update rollout trait won't generate new appRevision")
appRevName := checkApp.Status.LatestRevision.Name
By("check update rollout trait generate new appRevision")
checkApp.Spec.Components[0].Traits[0].Properties = &runtime.RawExtension{Raw: []byte(`{"targetRevision":"myweb1-v3"}`)}
Expect(k8sClient.Update(ctx, checkApp)).Should(BeNil())
testutil.ReconcileOnce(reconciler, reconcile.Request{NamespacedName: appKey})
testutil.ReconcileOnce(reconciler, reconcile.Request{NamespacedName: appKey})
checkApp = &v1beta1.Application{}
Expect(k8sClient.Get(ctx, appKey, checkApp)).Should(BeNil())
Expect(checkApp.Status.LatestRevision.Name).Should(BeEquivalentTo(appRevName))
checkRollout = &stdv1alpha1.Rollout{}
Expect(k8sClient.Get(ctx, types.NamespacedName{Name: "myweb1", Namespace: ns.Name}, checkRollout)).Should(BeNil())
Expect(checkRollout.Spec.TargetRevisionName).Should(BeEquivalentTo("myweb1-v3"))

View File

@@ -235,7 +235,7 @@ func (def *Definition) FromCUE(val *cue.Value, templateString string) error {
labels := map[string]string{}
for k, v := range def.GetLabels() {
if !strings.HasPrefix(k, UserPrefix) {
annotations[k] = v
labels[k] = v
}
}
spec, ok := def.Object["spec"].(map[string]interface{})

View File

@@ -165,10 +165,25 @@ func (h *gcHandler) monitor(stage string) func() {
}
}
func (h *gcHandler) regularizeResourceTracker(rts ...*v1beta1.ResourceTracker) {
for _, rt := range rts {
if rt == nil {
continue
}
for i, mr := range rt.Spec.ManagedResources {
if ok, err := utils.IsClusterScope(mr.GroupVersionKind(), h.Client.RESTMapper()); err == nil && ok {
rt.Spec.ManagedResources[i].Namespace = ""
}
}
}
}
func (h *gcHandler) Init() {
cb := h.monitor("init")
defer cb()
h.cache.registerResourceTrackers(append(h._historyRTs, h._currentRT, h._rootRT)...)
rts := append(h._historyRTs, h._currentRT, h._rootRT)
h.regularizeResourceTracker(rts...)
h.cache.registerResourceTrackers(rts...)
}
func (h *gcHandler) scan(ctx context.Context) (inactiveRTs []*v1beta1.ResourceTracker) {

View File

@@ -27,6 +27,7 @@ import (
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
corev1 "k8s.io/api/core/v1"
rbacv1 "k8s.io/api/rbac/v1"
apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1"
"k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
@@ -221,4 +222,46 @@ var _ = Describe("Test ResourceKeeper garbage collection", func() {
}, 5*time.Second).Should(Succeed())
})
It("Test gc same cluster-scoped resource but legacy resource recorded with namespace", func() {
ctx := context.Background()
cr := &unstructured.Unstructured{Object: map[string]interface{}{
"apiVersion": "rbac.authorization.k8s.io/v1",
"kind": "ClusterRole",
"metadata": map[string]interface{}{
"name": "test-cluster-scoped-resource",
"labels": map[string]interface{}{
oam.LabelAppName: "app",
oam.LabelAppNamespace: namespace,
},
},
}}
Expect(testClient.Create(ctx, cr)).Should(Succeed())
app := &v1beta1.Application{ObjectMeta: metav1.ObjectMeta{Name: "app", Namespace: namespace}}
keeper := &resourceKeeper{
Client: testClient,
app: app,
applicator: apply.NewAPIApplicator(testClient),
cache: newResourceCache(testClient, app),
}
h := gcHandler{resourceKeeper: keeper, cfg: newGCConfig()}
h._currentRT = &v1beta1.ResourceTracker{ObjectMeta: metav1.ObjectMeta{Name: "test-cluster-scoped-resource-v2"}}
Expect(testClient.Create(ctx, h._currentRT)).Should(Succeed())
h._historyRTs = []*v1beta1.ResourceTracker{{ObjectMeta: metav1.ObjectMeta{Name: "test-cluster-scoped-resource-v1"}}}
t := metav1.Now()
h._historyRTs[0].SetDeletionTimestamp(&t)
h._historyRTs[0].SetFinalizers([]string{resourcetracker.Finalizer})
h._currentRT.AddManagedResource(cr, true, false, "")
_cr := cr.DeepCopy()
_cr.SetNamespace(namespace)
h._historyRTs[0].AddManagedResource(_cr, true, false, "")
h.Init()
Expect(h.Finalize(ctx)).Should(Succeed())
Expect(testClient.Get(ctx, client.ObjectKeyFromObject(cr), &rbacv1.ClusterRole{})).Should(Succeed())
h._currentRT.Spec.ManagedResources[0].Name = "not-equal"
keeper.cache = newResourceCache(testClient, app)
h.Init()
Expect(h.Finalize(ctx)).Should(Succeed())
Expect(testClient.Get(ctx, client.ObjectKeyFromObject(cr), &rbacv1.ClusterRole{})).Should(Satisfy(errors.IsNotFound))
})
})

View File

@@ -17,18 +17,15 @@ limitations under the License.
package resourcekeeper
import (
"k8s.io/apimachinery/pkg/api/meta"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"github.com/oam-dev/kubevela/pkg/utils"
)
// ClearNamespaceForClusterScopedResources clear namespace for cluster scoped resources
func (h *resourceKeeper) ClearNamespaceForClusterScopedResources(manifests []*unstructured.Unstructured) {
for _, manifest := range manifests {
mappings, err := h.Client.RESTMapper().RESTMappings(manifest.GroupVersionKind().GroupKind(), manifest.GroupVersionKind().Version)
if err != nil {
continue
}
if len(mappings) > 0 && mappings[0].Scope.Name() == meta.RESTScopeNameRoot {
if ok, err := utils.IsClusterScope(manifest.GroupVersionKind(), h.Client.RESTMapper()); err == nil && ok {
manifest.SetNamespace("")
}
}

View File

@@ -26,7 +26,9 @@ import (
authv1 "k8s.io/api/authentication/v1"
corev1 "k8s.io/api/core/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/api/meta"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime/schema"
k8stypes "k8s.io/apimachinery/pkg/types"
"k8s.io/client-go/rest"
"sigs.k8s.io/controller-runtime/pkg/client"
@@ -189,3 +191,10 @@ func CreateOrUpdate(ctx context.Context, cli client.Client, obj client.Object) (
func EscapeResourceNameToLabelValue(resourceName string) string {
return strings.ReplaceAll(resourceName, ":", "_")
}
// IsClusterScope check if the gvk is cluster scoped
func IsClusterScope(gvk schema.GroupVersionKind, mapper meta.RESTMapper) (bool, error) {
mappings, err := mapper.RESTMappings(gvk.GroupKind(), gvk.Version)
isClusterScope := len(mappings) > 0 && mappings[0].Scope.Name() == meta.RESTScopeNameRoot
return isClusterScope, err
}

View File

@@ -22,6 +22,7 @@ import (
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
v1 "k8s.io/api/core/v1"
rbacv1 "k8s.io/api/rbac/v1"
apierror "k8s.io/apimachinery/pkg/api/errors"
"sigs.k8s.io/controller-runtime/pkg/client"
@@ -162,4 +163,13 @@ var _ = Describe("Test Create Or Update Namespace functions", func() {
Expect(gotNS.Labels).Should(HaveKeyWithValue(k, v))
}
})
It("Test IsClusterScope", func() {
ok, err := IsClusterScope(v1.SchemeGroupVersion.WithKind("ConfigMap"), k8sClient.RESTMapper())
Expect(err).Should(Succeed())
Expect(ok).Should(BeFalse())
ok, err = IsClusterScope(rbacv1.SchemeGroupVersion.WithKind("ClusterRole"), k8sClient.RESTMapper())
Expect(err).Should(Succeed())
Expect(ok).Should(BeTrue())
})
})

51
pkg/utils/pprof.go Normal file
View File

@@ -0,0 +1,51 @@
/*
Copyright 2022 The KubeVela Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package utils
import (
"net/http"
"net/http/pprof"
"k8s.io/klog/v2"
)
// EnablePprof listen to the pprofAddr and export the profiling results
// If the errChan is nil, this function will panic when the listening error occurred.
func EnablePprof(pprofAddr string, errChan chan error) {
// Start pprof server if enabled
mux := http.NewServeMux()
mux.HandleFunc("/debug/pprof/", pprof.Index)
mux.HandleFunc("/debug/pprof/cmdline", pprof.Cmdline)
mux.HandleFunc("/debug/pprof/profile", pprof.Profile)
mux.HandleFunc("/debug/pprof/symbol", pprof.Symbol)
mux.HandleFunc("/debug/pprof/trace", pprof.Trace)
pprofServer := http.Server{
Addr: pprofAddr,
Handler: mux,
}
klog.InfoS("Starting debug HTTP server", "addr", pprofServer.Addr)
if err := pprofServer.ListenAndServe(); err != nil {
klog.Error(err, "Failed to start debug HTTP server")
if errChan != nil {
errChan <- err
} else {
panic(err)
}
}
}

View File

@@ -81,19 +81,26 @@ func (c *AppCollector) CollectResourceFromApp() ([]Resource, error) {
}
// ListApplicationResources list application applied resources from tracker
func (c *AppCollector) ListApplicationResources(app *v1beta1.Application, queryTree bool) ([]*types.AppliedResource, error) {
ctx := context.Background()
func (c *AppCollector) ListApplicationResources(ctx context.Context, app *v1beta1.Application) ([]*types.AppliedResource, error) {
rootRT, currentRT, historyRTs, _, err := resourcetracker.ListApplicationResourceTrackers(ctx, c.k8sClient, app)
if err != nil {
return nil, err
}
var managedResources []*types.AppliedResource
existResources := make(map[common.ClusterObjectReference]bool, len(app.Spec.Components))
for _, rt := range append(historyRTs, rootRT, currentRT) {
if rt != nil {
for _, managedResource := range rt.Spec.ManagedResources {
if isResourceInTargetCluster(c.opt.Filter, managedResource.ClusterObjectReference) &&
isResourceInTargetComponent(c.opt.Filter, managedResource.Component) &&
(queryTree || isResourceMatchKindAndVersion(c.opt.Filter, managedResource.Kind, managedResource.APIVersion)) {
(c.opt.WithTree || isResourceMatchKindAndVersion(c.opt.Filter, managedResource.Kind, managedResource.APIVersion)) {
if c.opt.WithTree {
// If we want to query the tree, we only need to query once for the same resource.
if _, exist := existResources[managedResource.ClusterObjectReference]; exist {
continue
}
existResources[managedResource.ClusterObjectReference] = true
}
managedResources = append(managedResources, &types.AppliedResource{
Cluster: func() string {
if managedResource.Cluster != "" {
@@ -125,7 +132,7 @@ func (c *AppCollector) ListApplicationResources(app *v1beta1.Application, queryT
}
}
if !queryTree {
if !c.opt.WithTree {
return managedResources, nil
}

View File

@@ -65,7 +65,7 @@ func (h *provider) GeneratorServiceEndpoints(wfctx wfContext.Context, v *value.V
serviceEndpoints := make([]querytypes.ServiceEndpoint, 0)
var clusterGatewayNodeIP = make(map[string]string)
collector := NewAppCollector(h.cli, opt)
resources, err := collector.ListApplicationResources(app, opt.WithTree)
resources, err := collector.ListApplicationResources(ctx, app)
if err != nil {
return err
}
@@ -210,10 +210,10 @@ func generatorFromService(service corev1.Service, selectorNodeIP func() string,
appp := judgeAppProtocol(port.Port)
for _, ingress := range service.Status.LoadBalancer.Ingress {
if ingress.Hostname != "" {
serviceEndpoints = append(serviceEndpoints, formatEndpoint(ingress.Hostname, appp, port.Protocol, port.Port, false))
serviceEndpoints = append(serviceEndpoints, formatEndpoint(ingress.Hostname, appp, port.Protocol, port.NodePort, false))
}
if ingress.IP != "" {
serviceEndpoints = append(serviceEndpoints, formatEndpoint(ingress.IP, appp, port.Protocol, port.Port, false))
serviceEndpoints = append(serviceEndpoints, formatEndpoint(ingress.IP, appp, port.Protocol, port.NodePort, false))
}
}
}

View File

@@ -146,10 +146,26 @@ var _ = Describe("Test Query Provider", func() {
},
"type": corev1.ServiceTypeClusterIP,
},
{
"name": "load-balancer",
"ports": []corev1.ServicePort{
{Port: 8080, TargetPort: intstr.FromInt(8080), Name: "8080port", NodePort: 30020},
},
"type": corev1.ServiceTypeLoadBalancer,
"status": corev1.ServiceStatus{
LoadBalancer: corev1.LoadBalancerStatus{
Ingress: []corev1.LoadBalancerIngress{
{
IP: "2.2.2.2",
},
},
},
},
},
{
"name": "seldon-ambassador-2",
"ports": []corev1.ServicePort{
{Port: 80, TargetPort: intstr.FromInt(80), Name: "80port"},
{Port: 80, TargetPort: intstr.FromInt(80), Name: "80port", NodePort: 30010},
},
"type": corev1.ServiceTypeLoadBalancer,
"status": corev1.ServiceStatus{
@@ -232,10 +248,11 @@ var _ = Describe("Test Query Provider", func() {
Expect(err).Should(BeNil())
urls := []string{
"http://1.1.1.1/seldon/default/sdep2",
"http://1.1.1.1:30010/seldon/default/sdep2",
"http://clusterip-2.default",
"clusterip-2.default:81",
"http://1.1.1.1",
"http://2.2.2.2:30020",
"http://1.1.1.1:30010",
}
endValue, err := v.Field("list")
Expect(err).Should(BeNil())

View File

@@ -127,7 +127,7 @@ func (h *provider) ListAppliedResources(ctx wfContext.Context, v *value.Value, a
if err = h.cli.Get(context.Background(), appKey, app); err != nil {
return v.FillObject(err.Error(), "err")
}
appResList, err := collector.ListApplicationResources(app, opt.WithTree)
appResList, err := collector.ListApplicationResources(context.Background(), app)
if err != nil {
return v.FillObject(err.Error(), "err")
}
@@ -152,7 +152,7 @@ func (h *provider) CollectResources(ctx wfContext.Context, v *value.Value, act t
if err = h.cli.Get(context.Background(), appKey, app); err != nil {
return v.FillObject(err.Error(), "err")
}
appResList, err := collector.ListApplicationResources(app, opt.WithTree)
appResList, err := collector.ListApplicationResources(context.Background(), app)
if err != nil {
return v.FillObject(err.Error(), "err")
}

View File

@@ -656,8 +656,8 @@ options: {
{
"name": "loadbalancer",
"ports": []corev1.ServicePort{
{Port: 80, TargetPort: intstr.FromInt(80), Name: "80port"},
{Port: 81, TargetPort: intstr.FromInt(81), Name: "81port"},
{Port: 80, TargetPort: intstr.FromInt(80), Name: "80port", NodePort: 30080},
{Port: 81, TargetPort: intstr.FromInt(81), Name: "81port", NodePort: 30081},
},
"type": corev1.ServiceTypeLoadBalancer,
"status": corev1.ServiceStatus{
@@ -687,7 +687,7 @@ options: {
{
"name": "seldon-ambassador",
"ports": []corev1.ServicePort{
{Port: 80, TargetPort: intstr.FromInt(80), Name: "80port"},
{Port: 80, TargetPort: intstr.FromInt(80), Name: "80port", NodePort: 30011},
},
"type": corev1.ServiceTypeLoadBalancer,
"status": corev1.ServiceStatus{
@@ -937,13 +937,13 @@ options: {
"https://ingress.domain.path/test",
"https://ingress.domain.path/test2",
fmt.Sprintf("http://%s:30229", gatewayIP),
"http://10.10.10.10",
"http://text.example.com",
"10.10.10.10:81",
"text.example.com:81",
"http://10.10.10.10:30080",
"http://text.example.com:30080",
"10.10.10.10:30081",
"text.example.com:30081",
fmt.Sprintf("http://%s:30002", gatewayIP),
"http://ingress.domain.helm",
"http://1.1.1.1/seldon/default/sdep",
"http://1.1.1.1:30011/seldon/default/sdep",
"http://gateway.domain",
"http://gateway.domain/api",
"https://demo.kubevela.net",

View File

@@ -65,6 +65,9 @@ func (s *ServiceEndpoint) String() string {
if protocol == "tcp" {
return fmt.Sprintf("%s:%d%s", s.Endpoint.Host, s.Endpoint.Port, path)
}
if s.Endpoint.Port == 0 {
return fmt.Sprintf("%s://%s%s", protocol, s.Endpoint.Host, path)
}
return fmt.Sprintf("%s://%s:%d%s", protocol, s.Endpoint.Host, s.Endpoint.Port, path)
}

View File

@@ -23,6 +23,8 @@ import (
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"github.com/oam-dev/kubevela/pkg/cue/model/value"
"github.com/oam-dev/kubevela/pkg/oam"
querytypes "github.com/oam-dev/kubevela/pkg/velaql/providers/query/types"
)
@@ -61,11 +63,21 @@ func buildResourceArray(res querytypes.AppliedResource, parent, node *querytypes
func buildResourceItem(res querytypes.AppliedResource, workload querytypes.Workload, object unstructured.Unstructured) querytypes.ResourceItem {
return querytypes.ResourceItem{
Cluster: res.Cluster,
Workload: workload,
Component: res.Component,
Object: object,
PublishVersion: res.PublishVersion,
DeployVersion: res.DeployVersion,
Cluster: res.Cluster,
Workload: workload,
Component: res.Component,
Object: object,
PublishVersion: func() string {
if object.GetAnnotations()[oam.AnnotationPublishVersion] != "" {
return object.GetAnnotations()[oam.AnnotationPublishVersion]
}
return res.PublishVersion
}(),
DeployVersion: func() string {
if object.GetAnnotations()[oam.AnnotationDeployVersion] != "" {
return object.GetAnnotations()[oam.AnnotationDeployVersion]
}
return res.DeployVersion
}(),
}
}

View File

@@ -184,7 +184,9 @@ func (g *DeployPreApproveWorkflowStepGenerator) Generate(app *v1beta1.Applicatio
for _, step := range existingSteps {
if step.Type == "deploy" && !lastSuspend {
props := DeployWorkflowStepSpec{}
_ = utils.StrictUnmarshal(step.Properties.Raw, &props)
if step.Properties != nil {
_ = utils.StrictUnmarshal(step.Properties.Raw, &props)
}
if props.Auto != nil && !*props.Auto {
steps = append(steps, v1beta1.WorkflowStep{
Name: "manual-approve-" + step.Name,

View File

@@ -20,6 +20,7 @@ import (
"encoding/json"
"fmt"
"math"
"strings"
"sync"
"time"
@@ -93,6 +94,28 @@ func NewWorkflow(app *oamcore.Application, cli client.Client, mode common.Workfl
}
}
// needRestart check if application workflow need restart
// 1. If workflow status is empty, it means no previous running record, the
// workflow will restart (cold start)
// 2. If workflow status is not empty, and publishVersion is set, the desired
// rev will be the publishVersion
// 3. If workflow status is not empty, and publishVersion is not set, the legacy
// style rev <rev>:<hash> will be recognized and <rev> will be compared to
// revName
func needRestart(app *oamcore.Application, revName string) bool {
if app.Status.Workflow == nil {
return true
}
if metav1.HasAnnotation(app.ObjectMeta, oam.AnnotationPublishVersion) {
return app.Status.Workflow.AppRevision != app.GetAnnotations()[oam.AnnotationPublishVersion]
}
current := app.Status.Workflow.AppRevision
if idx := strings.LastIndexAny(current, ":"); idx >= 0 {
current = current[:idx]
}
return current != revName
}
// ExecuteSteps process workflow step in order.
func (w *workflow) ExecuteSteps(ctx monitorContext.Context, appRev *oamcore.ApplicationRevision, taskRunners []wfTypes.TaskRunner) (common.WorkflowState, error) {
revAndSpecHash, err := ComputeWorkflowRevisionHash(appRev.Name, w.app)
@@ -104,7 +127,7 @@ func (w *workflow) ExecuteSteps(ctx monitorContext.Context, appRev *oamcore.Appl
return common.WorkflowStateFinished, nil
}
if w.app.Status.Workflow == nil || w.app.Status.Workflow.AppRevision != revAndSpecHash {
if needRestart(w.app, appRev.Name) {
return w.restartWorkflow(ctx, revAndSpecHash)
}
@@ -195,7 +218,7 @@ func checkWorkflowSuspended(wfStatus *common.WorkflowStatus) bool {
return wfStatus.Suspend
}
func (w *workflow) restartWorkflow(ctx monitorContext.Context, revAndSpecHash string) (common.WorkflowState, error) {
func (w *workflow) restartWorkflow(ctx monitorContext.Context, rev string) (common.WorkflowState, error) {
ctx.Info("Restart Workflow")
status := w.app.Status.Workflow
if status != nil && !status.Finished {
@@ -207,7 +230,7 @@ func (w *workflow) restartWorkflow(ctx monitorContext.Context, revAndSpecHash st
mode = common.WorkflowModeDAG
}
w.app.Status.Workflow = &common.WorkflowStatus{
AppRevision: revAndSpecHash,
AppRevision: rev,
Mode: mode,
StartTime: metav1.Now(),
}
@@ -373,7 +396,7 @@ func (w *workflow) allDone(taskRunners []wfTypes.TaskRunner) (bool, bool) {
for _, ss := range status.Steps {
if ss.Name == t.Name() {
done = wfTypes.IsStepFinish(ss.Phase, ss.Reason)
success = done && (ss.Phase == common.WorkflowStepPhaseSucceeded || ss.Phase == common.WorkflowStepPhaseSkipped)
success = success && done && (ss.Phase == common.WorkflowStepPhaseSucceeded || ss.Phase == common.WorkflowStepPhaseSkipped)
break
}
}

View File

@@ -122,7 +122,8 @@ var _ = Describe("Test Workflow", func() {
Type: "success",
},
})
revision = revision.DeepCopy()
revision.Name = "app-v2"
app.Status.Workflow = workflowStatus
wf = NewWorkflow(app, k8sClient, common.WorkflowModeStep, false, nil)
state, err = wf.ExecuteSteps(ctx, revision, runners)

View File

@@ -170,13 +170,6 @@ func startReferenceDocsSite(ctx context.Context, ns string, c common.Args, ioStr
if err != nil {
return err
}
ref := &docgen.MarkdownReference{
ParseReference: docgen.ParseReference{
Client: cli,
I18N: &docgen.En,
},
}
config, err := c.GetConfig()
if err != nil {
return err
@@ -185,6 +178,18 @@ func startReferenceDocsSite(ctx context.Context, ns string, c common.Args, ioStr
if err != nil {
return err
}
dm, err := c.GetDiscoveryMapper()
if err != nil {
return err
}
ref := &docgen.MarkdownReference{
ParseReference: docgen.ParseReference{
Client: cli,
I18N: &docgen.En,
},
DiscoveryMapper: dm,
}
if err := ref.CreateMarkdown(ctx, capabilities, docsPath, true, pd); err != nil {
return err
}
@@ -453,6 +458,10 @@ func ShowReferenceMarkdown(ctx context.Context, c common.Args, ioStreams cmdutil
return err
}
ref.ParseReference = paserRef
ref.DiscoveryMapper, err = c.GetDiscoveryMapper()
if err != nil {
return err
}
if err := ref.GenerateReferenceDocs(ctx, c, outputPath); err != nil {
return errors.Wrap(err, "failed to generate reference docs")
}

View File

@@ -17,6 +17,7 @@ limitations under the License.
package cli
import (
"bufio"
"context"
"fmt"
"time"
@@ -47,11 +48,15 @@ type UnInstallArgs struct {
Namespace string
Detail bool
force bool
cancel bool
}
// NewUnInstallCommand creates `uninstall` command to uninstall vela core
func NewUnInstallCommand(c common.Args, order string, ioStreams util.IOStreams) *cobra.Command {
unInstallArgs := &UnInstallArgs{Args: c, userInput: NewUserInput(), helmHelper: helm.NewHelper()}
unInstallArgs := &UnInstallArgs{Args: c, userInput: &UserInput{
Writer: ioStreams.Out,
Reader: bufio.NewReader(ioStreams.In),
}, helmHelper: helm.NewHelper()}
cmd := &cobra.Command{
Use: "uninstall",
Short: "Uninstalls KubeVela from a Kubernetes cluster",
@@ -59,8 +64,8 @@ func NewUnInstallCommand(c common.Args, order string, ioStreams util.IOStreams)
Long: "Uninstalls KubeVela from a Kubernetes cluster.",
Args: cobra.ExactArgs(0),
PreRunE: func(cmd *cobra.Command, args []string) error {
userConfirmation := unInstallArgs.userInput.AskBool("Would you like to uninstall KubeVela from this cluster?", &UserInputOptions{AssumeYes: assumeYes})
if !userConfirmation {
unInstallArgs.cancel = unInstallArgs.userInput.AskBool("Would you like to uninstall KubeVela from this cluster?", &UserInputOptions{AssumeYes: assumeYes})
if !unInstallArgs.cancel {
return nil
}
kubeClient, err := c.GetClient()
@@ -97,6 +102,9 @@ func NewUnInstallCommand(c common.Args, order string, ioStreams util.IOStreams)
return nil
},
RunE: func(cmd *cobra.Command, args []string) error {
if !unInstallArgs.cancel {
return nil
}
ioStreams.Info("Starting to uninstall KubeVela")
restConfig, err := c.GetConfig()
if err != nil {

View File

@@ -19,14 +19,20 @@ package cli
import (
"context"
"fmt"
"os"
"strings"
"testing"
"time"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"github.com/stretchr/testify/assert"
"sigs.k8s.io/yaml"
"github.com/oam-dev/kubevela/apis/core.oam.dev/v1beta1"
"github.com/oam-dev/kubevela/pkg/oam/util"
"github.com/oam-dev/kubevela/pkg/utils/common"
pkgutils "github.com/oam-dev/kubevela/pkg/utils/util"
)
var _ = Describe("Test Install Command", func() {
@@ -63,6 +69,17 @@ var _ = Describe("Test Install Command", func() {
})
})
func TestUninstall(t *testing.T) {
// Test answering NO when prompted. Should just exit.
cmd := NewUnInstallCommand(common.Args{}, "", pkgutils.IOStreams{
Out: os.Stdout,
In: strings.NewReader("n\n"),
})
cmd.SetArgs([]string{})
err := cmd.Execute()
assert.Nil(t, err, "should just exit if answer is no")
}
var fluxcdYaml = `
apiVersion: core.oam.dev/v1beta1
kind: Application

View File

@@ -215,8 +215,8 @@ var _ = Describe("Test velaQL", func() {
{
"name": "loadbalancer",
"ports": []corev1.ServicePort{
{Port: 80, TargetPort: intstr.FromInt(80), Name: "80port"},
{Port: 81, TargetPort: intstr.FromInt(81), Name: "81port"},
{Port: 80, TargetPort: intstr.FromInt(80), Name: "80port", NodePort: 30180},
{Port: 81, TargetPort: intstr.FromInt(81), Name: "81port", NodePort: 30181},
},
"type": corev1.ServiceTypeLoadBalancer,
"status": corev1.ServiceStatus{
@@ -436,10 +436,10 @@ var _ = Describe("Test velaQL", func() {
"https://ingress.domain.path/test",
"https://ingress.domain.path/test2",
fmt.Sprintf("http://%s:30229", gatewayIP),
"http://10.10.10.10",
"http://text.example.com",
"10.10.10.10:81",
"text.example.com:81",
"http://10.10.10.10:30180",
"http://text.example.com:30180",
"10.10.10.10:30181",
"text.example.com:30181",
// helmRelease
fmt.Sprintf("http://%s:30002", gatewayIP),
"http://ingress.domain.helm",

View File

@@ -230,14 +230,17 @@ func prepareToForceDeleteTerraformComponents(ctx context.Context, k8sClient clie
for _, c := range app.Spec.Components {
var def corev1beta1.ComponentDefinition
if err := k8sClient.Get(ctx, client.ObjectKey{Name: c.Type, Namespace: types.DefaultKubeVelaNS}, &def); err != nil {
return err
if !apierrors.IsNotFound(err) {
return err
}
if err := k8sClient.Get(ctx, client.ObjectKey{Name: c.Type, Namespace: namespace}, &def); err != nil {
return err
}
}
if def.Spec.Schematic != nil && def.Spec.Schematic.Terraform != nil {
var conf terraformapi.Configuration
if err := k8sClient.Get(ctx, client.ObjectKey{Name: c.Name, Namespace: namespace}, &conf); err != nil {
if !apierrors.IsNotFound(err) {
return err
}
return err
}
conf.Spec.ForceDelete = &forceDelete
if err := k8sClient.Update(ctx, &conf); err != nil {

View File

@@ -19,7 +19,7 @@ import (
"context"
"testing"
terraformapi "github.com/oam-dev/terraform-controller/api/v1beta1"
terraformapi "github.com/oam-dev/terraform-controller/api/v1beta2"
"github.com/stretchr/testify/assert"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
@@ -55,7 +55,7 @@ func TestPrepareToForceDeleteTerraformComponents(t *testing.T) {
def1 := &v1beta1.ComponentDefinition{
TypeMeta: metav1.TypeMeta{
Kind: "ComponentDefinition",
APIVersion: "core.oam.dev/v1beta1",
APIVersion: "core.oam.dev/v1beta2",
},
ObjectMeta: metav1.ObjectMeta{
Name: "d1",
@@ -75,6 +75,16 @@ func TestPrepareToForceDeleteTerraformComponents(t *testing.T) {
Namespace: "default",
},
}
userNamespace := "another-namespace"
def2 := def1.DeepCopy()
def2.SetNamespace(userNamespace)
app2 := app1.DeepCopy()
app2.SetNamespace(userNamespace)
app2.SetName("app2")
conf2 := conf1.DeepCopy()
conf2.SetNamespace(userNamespace)
k8sClient1 := fake.NewClientBuilder().WithScheme(s).WithObjects(app1, def1, conf1).Build()
k8sClient2 := fake.NewClientBuilder().Build()
@@ -83,6 +93,7 @@ func TestPrepareToForceDeleteTerraformComponents(t *testing.T) {
k8sClient4 := fake.NewClientBuilder().WithScheme(s).WithObjects(app1, def1).Build()
k8sClient5 := fake.NewClientBuilder().WithScheme(s).WithObjects(app2, def2, conf2).Build()
type args struct {
k8sClient client.Client
namespace string
@@ -141,16 +152,27 @@ func TestPrepareToForceDeleteTerraformComponents(t *testing.T) {
"app1",
},
want: want{
errMsg: "no kind is registered for the type",
errMsg: "configurations.terraform.core.oam.dev \"c1\" not found",
},
},
"can read definition from application namespace": {
args: args{
k8sClient5,
userNamespace,
"app2",
},
want: want{},
},
}
for name, tc := range testcases {
t.Run(name, func(t *testing.T) {
err := prepareToForceDeleteTerraformComponents(ctx, tc.args.k8sClient, tc.args.namespace, tc.args.name)
if err != nil || tc.want.errMsg != "" {
if err != nil {
assert.NotEmpty(t, tc.want.errMsg)
assert.Contains(t, err.Error(), tc.want.errMsg)
} else {
assert.Empty(t, tc.want.errMsg)
}
})
}

View File

@@ -32,6 +32,7 @@ import (
"github.com/oam-dev/kubevela/apis/types"
"github.com/oam-dev/kubevela/pkg/cue"
"github.com/oam-dev/kubevela/pkg/cue/packages"
"github.com/oam-dev/kubevela/pkg/oam/discoverymapper"
"github.com/oam-dev/kubevela/pkg/utils/common"
)
@@ -43,6 +44,7 @@ type MarkdownReference struct {
Filter func(types.Capability) bool
AllInOne bool
CustomDocHeader string
DiscoveryMapper discoverymapper.DiscoveryMapper
ParseReference
}

View File

@@ -164,6 +164,29 @@ var _ = Describe("Test velaQL rest api", func() {
}, time.Minute*1, 3*time.Second).Should(BeNil())
})
It("Test query application pod when upgrading the app", func() {
// Create a new RT to simulate upgrading the application
rt := &v1beta1.ResourceTracker{}
Expect(k8sClient.Get(context.TODO(), types.NamespacedName{
Name: fmt.Sprintf("%s-v1-%s", appName, namespace),
}, rt)).Should(BeNil())
newRT := rt.DeepCopy()
newRT.Name = fmt.Sprintf("%s-v2-%s", appName, namespace)
newRT.Spec.ApplicationGeneration = 0
newRT.UID = ""
newRT.ResourceVersion = ""
Expect(k8sClient.Create(context.TODO(), newRT)).Should(BeNil())
queryRes := get(fmt.Sprintf("/query?velaql=%s{appName=%s,appNs=%s,name=%s}.%s", "test-component-pod-view", appName, namespace, component1Name, "status"))
status := new(Status)
fmt.Println(status.Error)
Expect(decodeResponseBody(queryRes, status)).Should(Succeed())
Expect(len(status.PodList)).Should(Equal(1))
Expect(status.PodList[0].Component).Should(Equal(component1Name))
// Clear the test data
Expect(k8sClient.Delete(context.TODO(), newRT))
})
It("Test collect pod from cronJob", func() {
cronJob := new(v1beta1.ComponentDefinition)
Expect(yaml.Unmarshal([]byte(cronJobComponentDefinition), cronJob)).Should(BeNil())

View File

@@ -43,7 +43,11 @@ import (
)
var _ = Describe("Test multicluster standalone scenario", func() {
waitObject := func(ctx context.Context, un unstructured.Unstructured) {
Eventually(func(g Gomega) error {
return k8sClient.Get(ctx, client.ObjectKeyFromObject(&un), &un)
}, 10*time.Second).Should(Succeed())
}
var namespace string
var hubCtx context.Context
var workerCtx context.Context
@@ -116,10 +120,13 @@ var _ = Describe("Test multicluster standalone scenario", func() {
deploy := readFile("deployment.yaml")
Expect(k8sClient.Create(hubCtx, deploy)).Should(Succeed())
waitObject(hubCtx, *deploy)
workflow := readFile("workflow-suspend.yaml")
Expect(k8sClient.Create(hubCtx, workflow)).Should(Succeed())
waitObject(hubCtx, *workflow)
policy := readFile("policy-zero-replica.yaml")
Expect(k8sClient.Create(hubCtx, policy)).Should(Succeed())
waitObject(hubCtx, *policy)
app := readFile("app-with-publish-version.yaml")
Expect(k8sClient.Create(hubCtx, app)).Should(Succeed())
appKey := client.ObjectKeyFromObject(app)

View File

@@ -25,27 +25,26 @@ import (
"strings"
"time"
"k8s.io/apimachinery/pkg/runtime"
"github.com/oam-dev/kubevela/apis/core.oam.dev/common"
"github.com/oam-dev/kubevela/pkg/oam"
"github.com/oam-dev/kubevela/pkg/utils"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
appsv1 "k8s.io/api/apps/v1"
v1 "k8s.io/api/authentication/v1"
corev1 "k8s.io/api/core/v1"
rbacv1 "k8s.io/api/rbac/v1"
kerrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/types"
"k8s.io/client-go/tools/clientcmd"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/yaml"
"github.com/oam-dev/kubevela/apis/core.oam.dev/common"
"github.com/oam-dev/kubevela/apis/core.oam.dev/v1alpha1"
"github.com/oam-dev/kubevela/apis/core.oam.dev/v1beta1"
"github.com/oam-dev/kubevela/pkg/multicluster"
"github.com/oam-dev/kubevela/pkg/oam"
"github.com/oam-dev/kubevela/pkg/utils"
)
func initializeContext() (hubCtx context.Context, workerCtx context.Context) {
@@ -151,11 +150,12 @@ var _ = Describe("Test multicluster scenario", func() {
It("Test generate service account kubeconfig", func() {
_, workerCtx := initializeContext()
// create service account kubeconfig in worker cluster
By("create service account kubeconfig in worker cluster")
key := time.Now().UnixNano()
serviceAccountName := fmt.Sprintf("test-service-account-%d", key)
serviceAccountNamespace := "kube-system"
serviceAccount := &corev1.ServiceAccount{
ObjectMeta: metav1.ObjectMeta{Namespace: "kube-system", Name: serviceAccountName},
ObjectMeta: metav1.ObjectMeta{Namespace: serviceAccountNamespace, Name: serviceAccountName},
}
Expect(k8sClient.Create(workerCtx, serviceAccount)).Should(Succeed())
defer func() {
@@ -165,30 +165,26 @@ var _ = Describe("Test multicluster scenario", func() {
clusterRoleBindingName := fmt.Sprintf("test-cluster-role-binding-%d", key)
clusterRoleBinding := &rbacv1.ClusterRoleBinding{
ObjectMeta: metav1.ObjectMeta{Name: clusterRoleBindingName},
Subjects: []rbacv1.Subject{{Kind: "ServiceAccount", Name: serviceAccountName, Namespace: "kube-system"}},
Subjects: []rbacv1.Subject{{Kind: "ServiceAccount", Name: serviceAccountName, Namespace: serviceAccountNamespace}},
RoleRef: rbacv1.RoleRef{Name: "cluster-admin", APIGroup: "rbac.authorization.k8s.io", Kind: "ClusterRole"},
}
Expect(k8sClient.Create(workerCtx, clusterRoleBinding)).Should(Succeed())
defer func() {
Expect(k8sClient.Get(workerCtx, types.NamespacedName{Namespace: "kube-system", Name: clusterRoleBindingName}, clusterRoleBinding)).Should(Succeed())
Expect(k8sClient.Get(workerCtx, types.NamespacedName{Namespace: serviceAccountNamespace, Name: clusterRoleBindingName}, clusterRoleBinding)).Should(Succeed())
Expect(k8sClient.Delete(workerCtx, clusterRoleBinding)).Should(Succeed())
}()
serviceAccount = &corev1.ServiceAccount{}
Eventually(func(g Gomega) {
g.Expect(k8sClient.Get(workerCtx, types.NamespacedName{Name: serviceAccountName, Namespace: "kube-system"}, serviceAccount)).Should(Succeed())
g.Expect(len(serviceAccount.Secrets)).Should(Equal(1))
}, time.Second*30).Should(Succeed())
secret := &corev1.Secret{}
Expect(k8sClient.Get(workerCtx, types.NamespacedName{Name: serviceAccount.Secrets[0].Name, Namespace: "kube-system"}, secret)).Should(Succeed())
token, ok := secret.Data["token"]
Expect(ok).Should(BeTrue())
By("Generating a token for SA")
tr := &v1.TokenRequest{}
token, err := k8sCli.CoreV1().ServiceAccounts(serviceAccountNamespace).CreateToken(workerCtx, serviceAccountName, tr, metav1.CreateOptions{})
Expect(err).Should(BeNil())
config, err := clientcmd.LoadFromFile(WorkerClusterKubeConfigPath)
Expect(err).Should(Succeed())
currentContext, ok := config.Contexts[config.CurrentContext]
Expect(ok).Should(BeTrue())
authInfo, ok := config.AuthInfos[currentContext.AuthInfo]
Expect(ok).Should(BeTrue())
authInfo.Token = string(token)
authInfo.Token = token.Status.Token
authInfo.ClientKeyData = nil
authInfo.ClientCertificateData = nil
kubeconfigFilePath := fmt.Sprintf("/tmp/worker.sa-%d.kubeconfig", key)

View File

@@ -23,11 +23,16 @@ import (
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/kubernetes"
"k8s.io/utils/strings/slices"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/client/config"
"github.com/oam-dev/kubevela/apis/core.oam.dev/v1beta1"
"github.com/oam-dev/kubevela/apis/types"
"github.com/oam-dev/kubevela/pkg/multicluster"
oamutil "github.com/oam-dev/kubevela/pkg/oam/util"
"github.com/oam-dev/kubevela/pkg/utils/common"
"github.com/oam-dev/kubevela/pkg/utils/util"
"github.com/oam-dev/kubevela/references/cli"
@@ -40,6 +45,7 @@ const (
var (
k8sClient client.Client
k8sCli kubernetes.Interface
)
func execCommand(args ...string) (string, error) {
@@ -66,6 +72,8 @@ var _ = BeforeSuite(func() {
config.Wrap(multicluster.NewSecretModeMultiClusterRoundTripper)
k8sClient, err = client.New(config, options)
Expect(err).Should(Succeed())
k8sCli, err = kubernetes.NewForConfig(config)
Expect(err).Should(Succeed())
// join worker cluster
_, err = execCommand("cluster", "join", WorkerClusterKubeConfigPath, "--name", WorkerClusterName)
@@ -73,12 +81,25 @@ var _ = BeforeSuite(func() {
})
var _ = AfterSuite(func() {
holdAddons := []string{"addon-terraform", "addon-fluxcd"}
Eventually(func(g Gomega) {
apps := &v1beta1.ApplicationList{}
g.Expect(k8sClient.List(context.Background(), apps)).Should(Succeed())
for _, app := range apps.Items {
if slices.Contains(holdAddons, app.Name) {
continue
}
g.Expect(k8sClient.Delete(context.Background(), app.DeepCopy())).Should(Succeed())
}
}, 3*time.Minute).Should(Succeed())
Eventually(func(g Gomega) {
// Delete terraform and fluxcd in order
app := &v1beta1.Application{}
apps := &v1beta1.ApplicationList{}
for _, addon := range holdAddons {
g.Expect(k8sClient.Delete(context.Background(), &v1beta1.Application{ObjectMeta: v1.ObjectMeta{Name: addon, Namespace: types.DefaultKubeVelaNS}})).Should(SatisfyAny(Succeed(), oamutil.NotFoundMatcher{}))
g.Expect(k8sClient.Get(context.Background(), client.ObjectKey{Name: addon, Namespace: types.DefaultKubeVelaNS}, app)).Should(SatisfyAny(Succeed(), oamutil.NotFoundMatcher{}))
}
err := k8sClient.List(context.Background(), apps)
g.Expect(err, nil)
g.Expect(len(apps.Items)).Should(Equal(0))

View File

@@ -8,7 +8,7 @@ spec:
- name: bad-resource
properties:
objects:
- apiVersion: apiregistration.k8s.io/v1beta1
- apiVersion: apiregistration.k8s.io/v1
kind: APIService
metadata:
name: test-bad-resource

View File

@@ -18,6 +18,7 @@ package controllers_test
import (
"context"
"encoding/json"
"fmt"
"strings"
"time"
@@ -27,6 +28,7 @@ import (
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"sigs.k8s.io/controller-runtime/pkg/client"
logf "sigs.k8s.io/controller-runtime/pkg/log"
@@ -80,6 +82,7 @@ var _ = Describe("HealthScope", func() {
var healthyAppName, unhealthyAppName string
Expect(utilcommon.ReadYamlToObject("testdata/app/app_healthscope.yaml", &newApp)).Should(BeNil())
newApp.Namespace = namespace
convertToLegacyIngressTrait(&newApp)
Eventually(func() error {
return k8sClient.Create(ctx, newApp.DeepCopy())
}, 10*time.Second, 500*time.Millisecond).Should(Succeed())
@@ -101,6 +104,7 @@ var _ = Describe("HealthScope", func() {
newApp = v1beta1.Application{}
Expect(utilcommon.ReadYamlToObject("testdata/app/app_healthscope_unhealthy.yaml", &newApp)).Should(BeNil())
newApp.Namespace = namespace
convertToLegacyIngressTrait(&newApp)
Eventually(func() error {
return k8sClient.Create(ctx, newApp.DeepCopy())
}, 10*time.Second, 500*time.Millisecond).Should(Succeed())
@@ -154,9 +158,11 @@ var _ = Describe("HealthScope", func() {
return fmt.Errorf("expect healthy comp, but %v is unhealthy, msg: %q", compSts1.Name, compSts1.Message)
}
if len(compSts1.Traits) != 1 {
return fmt.Errorf("expect 2 traits statuses, but got %d", len(compSts1.Traits))
return fmt.Errorf("expect 1 trait statuses, but got %d", len(compSts1.Traits))
}
if !strings.Contains(compSts1.Traits[0].Message, "visit the cluster or load balancer in front of the cluster") {
return fmt.Errorf("trait message isn't right, now is %s", compSts1.Traits[0].Message)
}
Expect(compSts1.Traits[0].Message).Should(ContainSubstring("No loadBalancer found"))
return nil
}, time.Second*30, time.Millisecond*500).Should(Succeed())
@@ -205,3 +211,34 @@ var _ = Describe("HealthScope", func() {
}, time.Second*30, time.Millisecond*500).Should(Succeed())
})
})
// convertToLegacyIngressTrait convert app's gateway trait to ingress
func convertToLegacyIngressTrait(app *v1beta1.Application) {
if noNetworkingV1 {
for i := range app.Spec.Components {
for j := range app.Spec.Components[i].Traits {
if app.Spec.Components[i].Traits[j].Type == "gateway" {
app.Spec.Components[i].Traits[j].Type = "ingress"
}
props := app.Spec.Components[i].Traits[j].Properties
propMap, err := util.RawExtension2Map(props)
if err != nil {
return
}
newPropMap := map[string]interface{}{}
for k := range propMap {
if k != "class" {
newPropMap[k] = propMap[k]
}
}
ext, err := json.Marshal(newPropMap)
if err != nil {
return
}
app.Spec.Components[i].Traits[j].Properties = &runtime.RawExtension{
Raw: ext,
}
}
}
}
}

View File

@@ -31,13 +31,17 @@ import (
. "github.com/onsi/gomega"
kruise "github.com/openkruise/kruise-api/apps/v1alpha1"
"github.com/pkg/errors"
networkv1 "k8s.io/api/networking/v1"
rbac "k8s.io/api/rbac/v1"
crdv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1"
"k8s.io/apimachinery/pkg/api/meta"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
clientgoscheme "k8s.io/client-go/kubernetes/scheme"
k8sutils "k8s.io/utils/pointer"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/client/config"
"sigs.k8s.io/controller-runtime/pkg/envtest/printer"
@@ -58,6 +62,10 @@ var manualscalertrait v1alpha2.TraitDefinition
var roleName = "oam-example-com"
var roleBindingName = "oam-role-binding"
var (
noNetworkingV1 bool
)
// A DefinitionExtension is an Object type for xxxDefinitin.spec.extension
type DefinitionExtension struct {
Alias string `json:"alias,omitempty"`
@@ -104,6 +112,8 @@ var _ = BeforeSuite(func(done Done) {
}
By("Finished setting up test environment")
detectAPIVersion()
// Create manual scaler trait definition
manualscalertrait = v1alpha2.TraitDefinition{
ObjectMeta: metav1.ObjectMeta{
@@ -219,3 +229,20 @@ func RequestReconcileNow(ctx context.Context, o client.Object) {
func randomNamespaceName(basic string) string {
return fmt.Sprintf("%s-%s", basic, strconv.FormatInt(rand.Int63(), 16))
}
// detectAPIVersion helps detect legacy GVK
func detectAPIVersion() {
err := k8sClient.Create(context.Background(), &networkv1.Ingress{
ObjectMeta: metav1.ObjectMeta{
Name: "test-ingress",
},
Spec: networkv1.IngressSpec{
IngressClassName: k8sutils.StringPtr("nginx"),
Rules: []networkv1.IngressRule{},
},
})
var noKindMatchErr = &meta.NoKindMatchError{}
if err != nil && errors.As(err, &noKindMatchErr) {
noNetworkingV1 = true
}
}

View File

@@ -13,8 +13,9 @@ spec:
image: oamdev/testapp:v1
port: 8080
traits:
- type: ingress
- type: gateway
properties:
class: traefik
domain: test.my.domain
http:
"/": 8080
@@ -27,8 +28,9 @@ spec:
image: oamdev/testapp:v1
port: 8080
traits:
- type: ingress
- type: gateway
properties:
class: traefik
domain: test.my.domain
http:
"/": 8080

View File

@@ -21,8 +21,9 @@ spec:
image: oamdev/testapp:v1
port: 8080
traits:
- type: ingress
- type: gateway
properties:
class: traefik
domain: test.my.domain
http:
"/": 8080

View File

@@ -351,6 +351,9 @@ template: {
if v.name == _|_ {
name: "port-" + strconv.FormatInt(v.port, 10)
}
if v.nodePort != _|_ && parameter.exposeType == "NodePort" {
nodePort: v.nodePort
}
},
]
@@ -401,6 +404,8 @@ template: {
protocol: *"TCP" | "UDP" | "SCTP"
// +usage=Specify if the port should be exposed
expose: *false | bool
// +usage=exposed node port. Only Valid when exposeType is NodePort
nodePort?: int
}]
// +ignore

View File

@@ -9,7 +9,7 @@
template: {
parameter: {
// +usage=Specify the names of the clusters to select.
cluster?: [...string]
clusters?: [...string]
// +usage=Specify the label selector for clusters
clusterLabelSelector?: [string]: string
// +usage=Deprecated: Use clusterLabelSelector instead.