Compare commits

...

13 Commits

Author SHA1 Message Date
github-actions[bot]
360f69bea5 Fix: fix the typo error in vela top (#5983) 2023-05-12 22:42:41 +08:00
Somefive
974644ddc4 [Backport release-1.8] Chore: add e2e ci env bootstrap & remove multlcluster legacy rollout ci #5926 (#5939) 2023-05-04 16:48:14 +08:00
qiaozp
f3a00984da Fix: migrate CI to CNCF machines in release-x branch (#5952) 2023-05-04 16:32:37 +08:00
github-actions[bot]
92be5b3424 [Backport release-1.8] Fix: trait not added in Go SDK (#5951)
Signed-off-by: qiaozp <qiaozhongpei.qzp@alibaba-inc.com>
(cherry picked from commit e9b1b63dae)

Co-authored-by: qiaozp <qiaozhongpei.qzp@alibaba-inc.com>
2023-05-04 10:27:03 +08:00
github-actions[bot]
e528902bea [Backport release-1.8] Fix: install dependency is invalid for runtime addon when it's cluster arg is nil (#5935)
* Fix: install dependency is invalid for runtime addon when it's clusters arg is nil

Signed-off-by: zhaohuihui <zhaohuihui_yewu@cmss.chinamobile.com>
(cherry picked from commit 142242a02d)

* Fix: add unit test for getDependencyArgs and checkDependencyNeedInstall

Signed-off-by: zhaohuihui <zhaohuihui_yewu@cmss.chinamobile.com>
(cherry picked from commit c38f6e43b7)

* Fix: Simplified the checkDependencyNeedInstall func logic

Signed-off-by: zhaohuihui <zhaohuihui_yewu@cmss.chinamobile.com>
(cherry picked from commit f038023a98)

* Fix: add comments for checkDependencyNeedInstall

Signed-off-by: zhaohuihui <zhaohuihui_yewu@cmss.chinamobile.com>
(cherry picked from commit 1009de2fc5)

---------

Co-authored-by: zhaohuihui <zhaohuihui_yewu@cmss.chinamobile.com>
2023-04-28 10:46:32 +08:00
Somefive
c3b736f753 [Backport release-1.8] Fix: multi cluster inline policy load extra definitions (#5915)
* Fix: multi cluster inline policy load extra definitions

Signed-off-by: Somefive <yd219913@alibaba-inc.com>

* Fix: refactor readme

Signed-off-by: Somefive <yd219913@alibaba-inc.com>

---------

Signed-off-by: Somefive <yd219913@alibaba-inc.com>
2023-04-27 11:18:22 +08:00
github-actions[bot]
f343111f87 Fix: multicluster disable installation (#5925)
Signed-off-by: Somefive <yd219913@alibaba-inc.com>
(cherry picked from commit 33134ff0d0)

Co-authored-by: Somefive <yd219913@alibaba-inc.com>
2023-04-26 14:02:40 +08:00
github-actions[bot]
007901f9f1 Fix: fix terminate suspending steps (#5874)
Signed-off-by: FogDong <dongtianxin.tx@alibaba-inc.com>
(cherry picked from commit ecaa84ccfd)

Co-authored-by: FogDong <dongtianxin.tx@alibaba-inc.com>
2023-04-18 16:58:40 +08:00
github-actions[bot]
fcd721ffed fix bug if addon parameter is Empty (#5859)
Signed-off-by: 楚岳 <wangyike.wyk@alibaba-inc.com>
(cherry picked from commit ebc631d7cf)

Co-authored-by: 楚岳 <wangyike.wyk@alibaba-inc.com>
2023-04-14 17:45:57 +08:00
github-actions[bot]
37718a095d Feat: add new fields for addon metadata (#5857)
Signed-off-by: Jianbo Sun <jianbo.sjb@alibaba-inc.com>
(cherry picked from commit ed2cf3c742)

Co-authored-by: Jianbo Sun <jianbo.sjb@alibaba-inc.com>
2023-04-14 17:19:20 +08:00
Tianxin Dong
2ca81e037d Fix: fix the operate order in suspend & resume (#5842)
Signed-off-by: FogDong <dongtianxin.tx@alibaba-inc.com>
2023-04-13 14:15:29 +08:00
github-actions[bot]
33940c621a Fix: fix multi clusters bottom in vela adopt (#5837)
Signed-off-by: FogDong <dongtianxin.tx@alibaba-inc.com>
(cherry picked from commit 0f5a5de6e4)

Co-authored-by: FogDong <dongtianxin.tx@alibaba-inc.com>
2023-04-12 13:19:37 +08:00
github-actions[bot]
705ea38158 [Backport release-1.8] Refactor: the addon dependency installation logic is accurate to the cluster (#5817)
* Refactor: the addon dependency installation logic is accurate to the cluster

Signed-off-by: zhaohuihui <zhaohuihui_yewu@cmss.chinamobile.com>
(cherry picked from commit 5b10c3aae8)

* Fix: optimize the code based on the review

Signed-off-by: zhaohuihui <zhaohuihui_yewu@cmss.chinamobile.com>
(cherry picked from commit 2fb4c605fc)

* Feat: add unit test for func checkDependencyNeedInstall

Signed-off-by: zhaohuihui <zhaohuihui_yewu@cmss.chinamobile.com>
(cherry picked from commit 69d27f090c)

---------

Co-authored-by: zhaohuihui <zhaohuihui_yewu@cmss.chinamobile.com>
2023-04-07 17:16:57 +08:00
50 changed files with 810 additions and 89 deletions

View File

@@ -39,7 +39,7 @@ jobs:
continue-on-error: true
e2e-multi-cluster-tests:
runs-on: aliyun
runs-on: self-hosted
needs: [ detect-noop ]
if: needs.detect-noop.outputs.noop != 'true'
strategy:
@@ -49,11 +49,18 @@ jobs:
group: ${{ github.workflow }}-${{ github.ref }}-${{ matrix.k8s-version }}
cancel-in-progress: true
steps:
- name: Check out code into the Go module directory
uses: actions/checkout@24cb9080177205b6e8c946b17badbe402adc938f
- name: Install tools
run: |
sudo apt-get update
sudo apt-get install make gcc jq ca-certificates curl gnupg -y
snap install docker
snap install kubectl --classic
snap install helm --classic
- name: Setup Go
uses: actions/setup-go@4d34df0c2316fe8122ab82dc22947d607c0c91f9
with:

View File

@@ -39,7 +39,7 @@ jobs:
continue-on-error: true
e2e-rollout-tests:
runs-on: aliyun
runs-on: self-hosted
needs: [ detect-noop ]
if: needs.detect-noop.outputs.noop != 'true'
strategy:
@@ -62,6 +62,8 @@ jobs:
- name: Get dependencies
run: |
go get -v -t -d ./...
go install github.com/onsi/ginkgo/ginkgo
go get github.com/onsi/gomega/...
- name: Tear down K3d if exist
run: |

View File

@@ -39,7 +39,7 @@ jobs:
continue-on-error: true
e2e-tests:
runs-on: aliyun
runs-on: self-hosted
needs: [ detect-noop ]
if: needs.detect-noop.outputs.noop != 'true'
strategy:
@@ -54,6 +54,14 @@ jobs:
- name: Check out code into the Go module directory
uses: actions/checkout@24cb9080177205b6e8c946b17badbe402adc938f
- name: Install tools
run: |
sudo apt-get update
sudo apt-get install make gcc jq ca-certificates curl gnupg -y
snap install docker
snap install kubectl --classic
snap install helm --classic
- name: Setup Go
uses: actions/setup-go@4d34df0c2316fe8122ab82dc22947d607c0c91f9
with:
@@ -62,6 +70,8 @@ jobs:
- name: Get dependencies
run: |
go get -v -t -d ./...
go install github.com/onsi/ginkgo/ginkgo
go get github.com/onsi/gomega/...
- name: Tear down K3d if exist
run: |

View File

@@ -88,7 +88,7 @@ jobs:
version: ${{ env.GOLANGCI_VERSION }}
check-diff:
runs-on: aliyun
runs-on: self-hosted
needs: detect-noop
if: needs.detect-noop.outputs.noop != 'true'

View File

@@ -8,7 +8,7 @@ permissions:
jobs:
clean-image:
runs-on: aliyun
runs-on: self-hosted
steps:
- name: Cleanup image
run: docker image prune -f

View File

@@ -49,7 +49,6 @@ helm install --create-namespace -n vela-system kubevela kubevela/vela-core --wai
| `disableCaps` | Disable capability | `rollout` |
| `dependCheckWait` | dependCheckWait is the time to wait for ApplicationConfiguration's dependent-resource ready | `30s` |
### KubeVela workflow parameters
| Name | Description | Value |
@@ -59,7 +58,6 @@ helm install --create-namespace -n vela-system kubevela kubevela/vela-core --wai
| `workflow.backoff.maxTime.failedState` | The max backoff time of workflow in a failed condition | `300` |
| `workflow.step.errorRetryTimes` | The max retry times of a failed workflow step | `10` |
### KubeVela controller parameters
| Name | Description | Value |
@@ -77,7 +75,6 @@ helm install --create-namespace -n vela-system kubevela kubevela/vela-core --wai
| `webhookService.port` | KubeVela webhook service port | `9443` |
| `healthCheck.port` | KubeVela health check port | `9440` |
### KubeVela controller optimization parameters
| Name | Description | Value |
@@ -104,7 +101,6 @@ helm install --create-namespace -n vela-system kubevela kubevela/vela-core --wai
| `featureGates.sharedDefinitionStorageForApplicationRevision` | use definition cache to reduce duplicated definition storage for application revision, must be used with InformerCacheFilterUnnecessaryFields | `true` |
| `featureGates.disableWorkflowContextConfigMapCache` | disable the workflow context's configmap informer cache | `true` |
### MultiCluster parameters
| Name | Description | Value |
@@ -125,7 +121,6 @@ helm install --create-namespace -n vela-system kubevela kubevela/vela-core --wai
| `multicluster.clusterGateway.secureTLS.certPath` | Path to the certificate file | `/etc/k8s-cluster-gateway-certs` |
| `multicluster.clusterGateway.secureTLS.certManager.enabled` | Whether to enable cert-manager | `false` |
### Test parameters
| Name | Description | Value |
@@ -135,7 +130,6 @@ helm install --create-namespace -n vela-system kubevela kubevela/vela-core --wai
| `test.k8s.repository` | Test k8s repository | `oamdev/alpine-k8s` |
| `test.k8s.tag` | Test k8s tag | `1.18.2` |
### Common parameters
| Name | Description | Value |

View File

@@ -26,7 +26,6 @@ spec:
duration: parameter.duration
failDuration: parameter.failDuration
}
fail: op.#Steps & {
if check.failed != _|_ {
if check.failed == true {
@@ -36,14 +35,12 @@ spec:
}
}
}
wait: op.#ConditionalWait & {
continue: check.result
if check.message != _|_ {
message: check.message
}
}
parameter: {
// +usage=Query is a raw prometheus query to perform
query: string

View File

@@ -350,7 +350,7 @@ spec:
- mountPath: {{ .Values.admissionWebhooks.certificate.mountPath }}
name: tls-cert-vol
readOnly: true
{{ if and .Values.multicluster.clusterGateway.secureTLS.enabled .Values.multicluster.clusterGateway.direct }}
{{ if and .Values.multicluster.enabled .Values.multicluster.clusterGateway.secureTLS.enabled .Values.multicluster.clusterGateway.direct }}
- mountPath: /cluster-gateway-tls-cert
name: tls-cert-vol-cg
readOnly: true
@@ -362,7 +362,7 @@ spec:
secret:
defaultMode: 420
secretName: {{ template "kubevela.fullname" . }}-admission
{{ if and .Values.multicluster.clusterGateway.secureTLS.enabled .Values.multicluster.clusterGateway.direct }}
{{ if and .Values.multicluster.enabled .Values.multicluster.clusterGateway.secureTLS.enabled .Values.multicluster.clusterGateway.direct }}
- name: tls-cert-vol-cg
secret:
defaultMode: 420

View File

@@ -67,7 +67,6 @@ helm install --create-namespace -n vela-system kubevela kubevela/vela-minimal --
| `disableCaps` | Disable capability | `envbinding,rollout` |
| `dependCheckWait` | dependCheckWait is the time to wait for ApplicationConfiguration's dependent-resource ready | `30s` |
### KubeVela workflow parameters
| Name | Description | Value |
@@ -77,7 +76,6 @@ helm install --create-namespace -n vela-system kubevela kubevela/vela-minimal --
| `workflow.backoff.maxTime.failedState` | The max backoff time of workflow in a failed condition | `300` |
| `workflow.step.errorRetryTimes` | The max retry times of a failed workflow step | `10` |
### KubeVela controller parameters
| Name | Description | Value |
@@ -95,14 +93,12 @@ helm install --create-namespace -n vela-system kubevela kubevela/vela-minimal --
| `webhookService.port` | KubeVela webhook service port | `9443` |
| `healthCheck.port` | KubeVela health check port | `9440` |
### KubeVela controller optimization parameters
| Name | Description | Value |
| ------------------------ | ------------------------------------------------------------------------------------------------------------------------------------- | ------- |
| `featureGates.applyOnce` | if enabled, the apply-once feature will be applied to all applications, no state-keep and no resource data storage in ResourceTracker | `false` |
### MultiCluster parameters
| Name | Description | Value |
@@ -120,7 +116,6 @@ helm install --create-namespace -n vela-system kubevela kubevela/vela-minimal --
| `multicluster.clusterGateway.secureTLS.enabled` | Whether to enable secure TLS | `true` |
| `multicluster.clusterGateway.secureTLS.certPath` | Path to the certificate file | `/etc/k8s-cluster-gateway-certs` |
### Test parameters
| Name | Description | Value |
@@ -130,7 +125,6 @@ helm install --create-namespace -n vela-system kubevela kubevela/vela-minimal --
| `test.k8s.repository` | Test k8s repository | `oamdev/alpine-k8s` |
| `test.k8s.tag` | Test k8s tag | `1.18.2` |
### Common parameters
| Name | Description | Value |

View File

@@ -26,7 +26,6 @@ spec:
duration: parameter.duration
failDuration: parameter.failDuration
}
fail: op.#Steps & {
if check.failed != _|_ {
if check.failed == true {
@@ -36,14 +35,12 @@ spec:
}
}
}
wait: op.#ConditionalWait & {
continue: check.result
if check.message != _|_ {
message: check.message
}
}
parameter: {
// +usage=Query is a raw prometheus query to perform
query: string

View File

@@ -18,6 +18,7 @@ package e2e
import (
"context"
"encoding/json"
"fmt"
"strings"
"time"
@@ -31,6 +32,7 @@ import (
"github.com/oam-dev/kubevela/apis/core.oam.dev/v1beta1"
"github.com/oam-dev/kubevela/e2e"
"github.com/oam-dev/kubevela/pkg/addon"
"github.com/oam-dev/kubevela/pkg/utils/common"
)
@@ -176,4 +178,160 @@ var _ = Describe("Addon Test", func() {
Expect(output).To(ContainSubstring("Successfully delete an addon registry my-repo"))
})
})
Context("Enable dependency addon test", func() {
It(" enable mock-dependence-rely without specified clusters when mock-dependence addon is not enabled", func() {
output, err := e2e.Exec("vela addon enable mock-dependence-rely")
Expect(err).NotTo(HaveOccurred())
Expect(output).To(ContainSubstring("enabled successfully."))
Eventually(func(g Gomega) {
app := &v1beta1.Application{}
Expect(k8sClient.Get(context.Background(), types.NamespacedName{Name: "addon-mock-dependence", Namespace: "vela-system"}, app)).Should(Succeed())
topologyPolicyValue := map[string]interface{}{}
for _, policy := range app.Spec.Policies {
if policy.Type == "topology" {
Expect(json.Unmarshal(policy.Properties.Raw, &topologyPolicyValue)).Should(Succeed())
break
}
}
Expect(topologyPolicyValue["clusterLabelSelector"]).Should(Equal(map[string]interface{}{}))
}, 30*time.Second).Should(Succeed())
})
It("enable mock-dependence-rely with specified clusters when mock-dependence addon is not enabled ", func() {
output, err := e2e.Exec("vela addon enable mock-dependence-rely2 --clusters local")
Expect(err).NotTo(HaveOccurred())
Expect(output).To(ContainSubstring("enabled successfully."))
Eventually(func(g Gomega) {
app := &v1beta1.Application{}
Expect(k8sClient.Get(context.Background(), types.NamespacedName{Name: "addon-mock-dependence2", Namespace: "vela-system"}, app)).Should(Succeed())
topologyPolicyValue := map[string]interface{}{}
for _, policy := range app.Spec.Policies {
if policy.Type == "topology" {
Expect(json.Unmarshal(policy.Properties.Raw, &topologyPolicyValue)).Should(Succeed())
break
}
}
Expect(topologyPolicyValue["clusters"]).Should(Equal([]interface{}{"local"}))
}, 30*time.Second).Should(Succeed())
})
It("enable mock-dependence-rely without specified clusters when mock-dependence addon was enabled with specified clusters", func() {
// 1. enable mock-dependence addon with local clusters
output, err := e2e.InteractiveExec("vela addon enable mock-dependence --clusters local myparam=test", func(c *expect.Console) {
_, err = c.SendLine("y")
Expect(err).NotTo(HaveOccurred())
})
Expect(err).NotTo(HaveOccurred())
Expect(output).To(ContainSubstring("enabled successfully."))
Eventually(func(g Gomega) {
// check application render cluster
app := &v1beta1.Application{}
Expect(k8sClient.Get(context.Background(), types.NamespacedName{Name: "addon-mock-dependence", Namespace: "vela-system"}, app)).Should(Succeed())
topologyPolicyValue := map[string]interface{}{}
for _, policy := range app.Spec.Policies {
if policy.Type == "topology" {
Expect(json.Unmarshal(policy.Properties.Raw, &topologyPolicyValue)).Should(Succeed())
break
}
}
Expect(topologyPolicyValue["clusters"]).Should(Equal([]interface{}{"local"}))
Expect(topologyPolicyValue["clusterLabelSelector"]).Should(BeNil())
}, 600*time.Second).Should(Succeed())
// 2. enable mock-dependence-rely addon without clusters
output1, err := e2e.InteractiveExec("vela addon enable mock-dependence-rely", func(c *expect.Console) {
_, err = c.SendLine("y")
Expect(err).NotTo(HaveOccurred())
})
Expect(err).NotTo(HaveOccurred())
Expect(output1).To(ContainSubstring("enabled successfully."))
// 3. enable mock-dependence-rely addon changes the mock-dependence topology policy
Eventually(func(g Gomega) {
app := &v1beta1.Application{}
Expect(k8sClient.Get(context.Background(), types.NamespacedName{Name: "addon-mock-dependence", Namespace: "vela-system"}, app)).Should(Succeed())
topologyPolicyValue := map[string]interface{}{}
for _, policy := range app.Spec.Policies {
if policy.Type == "topology" {
Expect(json.Unmarshal(policy.Properties.Raw, &topologyPolicyValue)).Should(Succeed())
break
}
}
Expect(topologyPolicyValue["clusterLabelSelector"]).Should(Equal(map[string]interface{}{}))
Expect(topologyPolicyValue["clusters"]).Should(BeNil())
}, 30*time.Second).Should(Succeed())
})
It("Test addon dependency with specified clusters", func() {
const clusterName = "k3s-default"
// enable addon
output, err := e2e.InteractiveExec("vela addon enable mock-dependence --clusters local myparam=test", func(c *expect.Console) {
_, err = c.SendLine("y")
Expect(err).NotTo(HaveOccurred())
})
Expect(err).NotTo(HaveOccurred())
Expect(output).To(ContainSubstring("enabled successfully."))
output1, err := e2e.Exec("vela ls -A")
Expect(err).NotTo(HaveOccurred())
Expect(output1).To(ContainSubstring("mock-dependence"))
output2, err := e2e.Exec("vela addon list")
Expect(err).NotTo(HaveOccurred())
Expect(output2).To(ContainSubstring("mock-dependence"))
// check dependence application parameter
Eventually(func(g Gomega) {
// check parameter
sec := &v1.Secret{}
g.Expect(k8sClient.Get(context.Background(), types.NamespacedName{Name: "addon-secret-mock-dependence", Namespace: "vela-system"}, sec)).Should(Succeed())
parameters := map[string]interface{}{}
json.Unmarshal(sec.Data[addon.AddonParameterDataKey], &parameters)
g.Expect(parameters).Should(BeEquivalentTo(map[string]interface{}{
"clusters": []interface{}{"local"},
"myparam": "test",
}))
// check application render cluster
app := &v1beta1.Application{}
Expect(k8sClient.Get(context.Background(), types.NamespacedName{Name: "addon-mock-dependence", Namespace: "vela-system"}, app)).Should(Succeed())
topologyPolicyValue := map[string]interface{}{}
for _, policy := range app.Spec.Policies {
if policy.Type == "topology" {
Expect(json.Unmarshal(policy.Properties.Raw, &topologyPolicyValue)).Should(Succeed())
break
}
}
fluxcdYaml, err1 := e2e.Exec("vela status addon-mock-dependence -n vela-system -oyaml")
Expect(err1).NotTo(HaveOccurred())
Expect(fluxcdYaml).To(ContainSubstring("mock-dependence"))
fluxcdStatus, err2 := e2e.Exec("vela addon status mock-dependence -v")
Expect(err2).NotTo(HaveOccurred())
Expect(fluxcdStatus).To(ContainSubstring("mock-dependence"))
Expect(topologyPolicyValue["clusters"]).Should(Equal([]interface{}{"local"}))
}, 600*time.Second).Should(Succeed())
// enable addon which rely on mock-dependence addon
e2e.InteractiveExec("vela addon enable mock-dependence-rely --clusters local,"+clusterName, func(c *expect.Console) {
_, err = c.SendLine("y")
Expect(err).NotTo(HaveOccurred())
})
// check mock-dependence application parameter
Eventually(func(g Gomega) {
sec := &v1.Secret{}
g.Expect(k8sClient.Get(context.Background(), types.NamespacedName{Name: "addon-secret-mock-dependence", Namespace: "vela-system"}, sec)).Should(Succeed())
parameters := map[string]interface{}{}
json.Unmarshal(sec.Data[addon.AddonParameterDataKey], &parameters)
g.Expect(parameters).Should(BeEquivalentTo(map[string]interface{}{
"clusters": []interface{}{"local", clusterName},
"myparam": "test",
}))
app := &v1beta1.Application{}
Expect(k8sClient.Get(context.Background(), types.NamespacedName{Name: "addon-mock-dependence", Namespace: "vela-system"}, app)).Should(Succeed())
topologyPolicyValue := map[string]interface{}{}
for _, policy := range app.Spec.Policies {
if policy.Type == "topology" {
Expect(json.Unmarshal(policy.Properties.Raw, &topologyPolicyValue)).Should(Succeed())
break
}
}
Expect(topologyPolicyValue["clusters"]).Should(Equal([]interface{}{"local", clusterName}))
}, 30*time.Second).Should(Succeed())
})
})
})

View File

@@ -0,0 +1,3 @@
# mock-dependence-rely
This is an addon template. Check how to build your own addon: https://kubevela.net/docs/platform-engineers/addon/intro

View File

@@ -0,0 +1,25 @@
// We put Definitions in definitions directory.
// References:
// - https://kubevela.net/docs/platform-engineers/cue/definition-edit
// - https://kubevela.net/docs/platform-engineers/addon/intro#definitions-directoryoptional
"mytraitb": {
alias: "mtb"
annotations: {}
attributes: {
appliesToWorkloads: [
"deployments.apps",
"replicasets.apps",
"statefulsets.apps",
]
conflictsWith: []
podDisruptive: false
workloadRefPath: ""
}
description: "My trait description."
labels: {}
type: "trait"
}
template: {
parameter: {param: ""}
outputs: {sample: {}}
}

View File

@@ -0,0 +1,10 @@
description: An addon for testing addon dependency with specified clusters.
icon: ""
invisible: false
name: mock-dependence-rely
tags:
- my-tag
version: 1.0.0
dependencies:
# install controller by helm.
- name: mock-dependence

View File

@@ -0,0 +1,10 @@
// parameter.cue is used to store addon parameters.
//
// You can use these parameters in template.cue or in resources/ by 'parameter.myparam'
//
// For example, you can use parameters to allow the user to customize
// container images, ports, and etc.
parameter: {
// +usage=Custom parameter description
myparam: *"mynsrely" | string
}

View File

@@ -0,0 +1,18 @@
// We put Components in resources directory.
// References:
// - https://kubevela.net/docs/end-user/components/references
// - https://kubevela.net/docs/platform-engineers/addon/intro#resources-directoryoptional
output: {
type: "k8s-objects"
properties: {
objects: [
{
// This creates a plain old Kubernetes namespace
apiVersion: "v1"
kind: "Namespace"
// We can use the parameter defined in parameter.cue like this.
metadata: name: parameter.myparam
},
]
}
}

View File

@@ -0,0 +1,9 @@
package main
output: {
apiVersion: "core.oam.dev/v1beta1"
kind: "Application"
spec: {
components: []
policies: []
}
}

View File

@@ -0,0 +1,3 @@
# mock-dependence-rely2
This is an addon template. Check how to build your own addon: https://kubevela.net/docs/platform-engineers/addon/intro

View File

@@ -0,0 +1,10 @@
description: An addon for testing addon dependency with specified clusters.
icon: ""
invisible: false
name: mock-dependence-rely2
tags:
- my-tag
version: 1.0.0
dependencies:
# install controller by helm.
- name: mock-dependence2

View File

@@ -0,0 +1,4 @@
parameter: {
// +usage=Custom parameter description
myparam: *"mynsrely" | string
}

View File

@@ -0,0 +1,9 @@
package main
output: {
apiVersion: "core.oam.dev/v1beta1"
kind: "Application"
spec: {
components: []
policies: []
}
}

View File

@@ -0,0 +1,3 @@
# mock-dependence
This is an addon template. Check how to build your own addon: https://kubevela.net/docs/platform-engineers/addon/intro

View File

@@ -0,0 +1,25 @@
// We put Definitions in definitions directory.
// References:
// - https://kubevela.net/docs/platform-engineers/cue/definition-edit
// - https://kubevela.net/docs/platform-engineers/addon/intro#definitions-directoryoptional
"mytrait-depend": {
alias: "mt-depend"
annotations: {}
attributes: {
appliesToWorkloads: [
"deployments.apps",
"replicasets.apps",
"statefulsets.apps",
]
conflictsWith: []
podDisruptive: false
workloadRefPath: ""
}
description: "My trait description."
labels: {}
type: "trait"
}
template: {
parameter: {param: ""}
outputs: {sample: {}}
}

View File

@@ -0,0 +1,7 @@
description: An addon for testing addon dependency with specified clusters.
icon: ""
invisible: false
name: mock-dependence
tags:
- my-tag
version: 1.0.0

View File

@@ -0,0 +1,12 @@
// parameter.cue is used to store addon parameters.
//
// You can use these parameters in template.cue or in resources/ by 'parameter.myparam'
//
// For example, you can use parameters to allow the user to customize
// container images, ports, and etc.
parameter: {
// +usage=Custom parameter description
myparam: *"myns" | string
//+usage=Deploy to specified clusters. Leave empty to deploy to all clusters.
clusters?: [...string]
}

View File

@@ -0,0 +1,18 @@
// We put Components in resources directory.
// References:
// - https://kubevela.net/docs/end-user/components/references
// - https://kubevela.net/docs/platform-engineers/addon/intro#resources-directoryoptional
output: {
type: "k8s-objects"
properties: {
objects: [
{
// This creates a plain old Kubernetes namespace
apiVersion: "v1"
kind: "Namespace"
// We can use the parameter defined in parameter.cue like this.
metadata: name: parameter.myparam
},
]
}
}

View File

@@ -0,0 +1,24 @@
package main
_targetNamespace: parameter.myparam
output: {
apiVersion: "core.oam.dev/v1beta1"
kind: "Application"
spec: {
components: [],
policies: [
{
type: "topology"
name: "deploy-mock-dependency-ns"
properties: {
namespace: _targetNamespace
if parameter.clusters != _|_ {
clusters: parameter.clusters
}
if parameter.clusters == _|_ {
clusterLabelSelector: {}
}
}
},
]
}
}

View File

@@ -0,0 +1,3 @@
# mock-dependence2
This is an addon template. Check how to build your own addon: https://kubevela.net/docs/platform-engineers/addon/intro

View File

@@ -0,0 +1,7 @@
description: An addon for testing addon dependency with specified clusters.
icon: ""
invisible: false
name: mock-dependence2
tags:
- my-tag
version: 1.0.0

View File

@@ -0,0 +1,6 @@
parameter: {
// +usage=Custom parameter description
myparam: *"myns" | string
//+usage=Deploy to specified clusters. Leave empty to deploy to all clusters.
clusters?: [...string]
}

View File

@@ -0,0 +1,24 @@
package main
_targetNamespace: parameter.myparam
output: {
apiVersion: "core.oam.dev/v1beta1"
kind: "Application"
spec: {
components: [],
policies: [
{
type: "topology"
name: "deploy-mock-dependency-ns"
properties: {
namespace: _targetNamespace
if parameter.clusters != _|_ {
clusters: parameter.clusters
}
if parameter.clusters == _|_ {
clusterLabelSelector: {}
}
}
},
]
}
}

2
go.mod
View File

@@ -52,7 +52,7 @@ require (
github.com/imdario/mergo v0.3.13
github.com/kubevela/pkg v0.0.0-20230316114047-e2b41b377bac
github.com/kubevela/prism v1.7.0-alpha.1
github.com/kubevela/workflow v0.5.1-0.20230404061444-a4f3ec81fca7
github.com/kubevela/workflow v0.5.1-0.20230412142923-1f15ba091699
github.com/kyokomi/emoji v2.2.4+incompatible
github.com/mitchellh/hashstructure/v2 v2.0.2
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect

4
go.sum
View File

@@ -954,8 +954,8 @@ github.com/kubevela/pkg v0.0.0-20230316114047-e2b41b377bac h1:TLQchMx+BRTnHyebDp
github.com/kubevela/pkg v0.0.0-20230316114047-e2b41b377bac/go.mod h1:GilLxt+9L4sU2tLeZAGHga8wiYmjjfPX/Q6JkyuuXSM=
github.com/kubevela/prism v1.7.0-alpha.1 h1:oeZFn1Oy6gxSSFzMTfsWjLOCKaaooMVm1JGNK4j4Mlo=
github.com/kubevela/prism v1.7.0-alpha.1/go.mod h1:AJSDfdA+RkRSnWx3xEcogbmOTpX+l7RSIwqVHxwUtaI=
github.com/kubevela/workflow v0.5.1-0.20230404061444-a4f3ec81fca7 h1:iYsJLZRD/eLZkVxUgM3AdbHpsMHKxhf5YhsQDolEZ/U=
github.com/kubevela/workflow v0.5.1-0.20230404061444-a4f3ec81fca7/go.mod h1:+Ah40fwzX9fi/xeWdphew9J4kqfJiGwXw5MDeGPq3IU=
github.com/kubevela/workflow v0.5.1-0.20230412142923-1f15ba091699 h1:XvHs/8a10AvHnetlGSpylFnx8PFvONTzmR9CAzFAHbY=
github.com/kubevela/workflow v0.5.1-0.20230412142923-1f15ba091699/go.mod h1:+Ah40fwzX9fi/xeWdphew9J4kqfJiGwXw5MDeGPq3IU=
github.com/kylelemons/godebug v0.0.0-20160406211939-eadb3ce320cb/go.mod h1:B69LEHPfb2qLo0BaaOLcbitczOKLWTsrBG9LczfCD4k=
github.com/kylelemons/godebug v0.0.0-20170820004349-d65d576e9348/go.mod h1:B69LEHPfb2qLo0BaaOLcbitczOKLWTsrBG9LczfCD4k=
github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc=

View File

@@ -54,6 +54,7 @@ import (
"k8s.io/client-go/rest"
"k8s.io/client-go/util/retry"
"k8s.io/klog/v2"
stringslices "k8s.io/utils/strings/slices"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/yaml"
@@ -999,20 +1000,29 @@ func (h *Installer) getAddonMeta() (map[string]SourceMeta, error) {
// installDependency checks if addon's dependency and install it
func (h *Installer) installDependency(addon *InstallPackage) error {
var dependencies []string
var addonClusters = getClusters(h.args)
for _, dep := range addon.Dependencies {
_, err := FetchAddonRelatedApp(h.ctx, h.cli, dep.Name)
if err == nil {
continue
}
if !apierrors.IsNotFound(err) {
needInstallAddonDep, depClusters, err := checkDependencyNeedInstall(h.ctx, h.cli, dep.Name, addonClusters)
if err != nil {
return err
}
if !needInstallAddonDep {
continue
}
dependencies = append(dependencies, dep.Name)
if h.dryRun {
continue
}
depHandler := *h
depHandler.args = nil
// reset dependency addon clusters parameter
depArgs, depArgsErr := getDependencyArgs(h.ctx, h.cli, dep.Name, depClusters)
if depArgsErr != nil {
return depArgsErr
}
depHandler.args = depArgs
var depAddon *InstallPackage
// try to install the dependent addon from the same registry with the current addon
depAddon, err = h.loadInstallPackage(dep.Name, dep.Version)
@@ -1062,6 +1072,72 @@ func (h *Installer) installDependency(addon *InstallPackage) error {
return nil
}
// checkDependencyNeedInstall checks whether dependency addon needs to be installed on other clusters
func checkDependencyNeedInstall(ctx context.Context, k8sClient client.Client, depName string, addonClusters []string) (bool, []string, error) {
depApp, err := FetchAddonRelatedApp(ctx, k8sClient, depName)
if err != nil {
if !apierrors.IsNotFound(err) {
return false, nil, err
}
// dependent addon is not exist
return true, addonClusters, nil
}
topologyPolicyValue := map[string]interface{}{}
for _, policy := range depApp.Spec.Policies {
if policy.Type == "topology" {
unmarshalErr := json.Unmarshal(policy.Properties.Raw, &topologyPolicyValue)
if unmarshalErr != nil {
return false, nil, unmarshalErr
}
break
}
}
// nil clusters indicates that the dependent addon is installed on all clusters
if topologyPolicyValue["clusters"] == nil {
return false, nil, nil
}
// nil addonClusters indicates the addon will be installed,
// thus we should set the dependent addon's clusters arg to be nil so that it is installed on all clusters
if addonClusters == nil {
return true, nil, nil
}
// Determine whether the dependent addon's existing clusters can cover the new addon's clusters
var needInstallAddonDep = false
var depClusters []string
originClusters := topologyPolicyValue["clusters"].([]interface{})
for _, r := range originClusters {
depClusters = append(depClusters, r.(string))
}
for _, addonCluster := range addonClusters {
if !stringslices.Contains(depClusters, addonCluster) {
depClusters = append(depClusters, addonCluster)
needInstallAddonDep = true
}
}
return needInstallAddonDep, depClusters, nil
}
// getDependencyArgs resets the dependency clusters arg according needed install depClusters
func getDependencyArgs(ctx context.Context, k8sClient client.Client, depName string, depClusters []string) (map[string]interface{}, error) {
depArgs, depArgsErr := GetAddonLegacyParameters(ctx, k8sClient, depName)
if depArgsErr != nil && !apierrors.IsNotFound(depArgsErr) {
return nil, depArgsErr
}
// reset the cluster arg
if depClusters == nil {
// delete clusters args, when render addon, it will use clusterLabelSelector then render addon to all clusters
if depArgs != nil && depArgs[types.ClustersArg] != nil {
delete(depArgs, types.ClustersArg)
}
} else {
if depArgs == nil {
depArgs = map[string]interface{}{}
}
depArgs[types.ClustersArg] = depClusters
}
return depArgs, nil
}
// checkDependency checks if addon's dependency
func (h *Installer) checkDependency(addon *InstallPackage) ([]string, error) {
var app v1beta1.Application

View File

@@ -174,6 +174,107 @@ var _ = Describe("Addon test", func() {
}, 30*time.Second, 300*time.Millisecond).Should(BeNil())
})
It("checkDependencyNeedInstall func test", func() {
// case1: dependency addon not exist, adonClusters is not nil
depAddonName := "legacy-addon"
addonClusters := []string{"cluster1", "cluster2"}
needInstallAddonDep, depClusters, err := checkDependencyNeedInstall(ctx, k8sClient, depAddonName, addonClusters)
Expect(needInstallAddonDep).Should(BeTrue())
Expect(depClusters).Should(Equal(addonClusters))
Expect(err).Should(BeNil())
// case1.1: dependency addon not exist, adonClusters is nil
needInstallAddonDep1, depClusters1, err := checkDependencyNeedInstall(ctx, k8sClient, depAddonName, nil)
Expect(needInstallAddonDep1).Should(BeTrue())
Expect(depClusters1).Should(BeNil())
Expect(err).Should(BeNil())
// case2: dependency addon exist, no topology policy, addonClusters is not nil
app = v1beta1.Application{}
Expect(yaml.Unmarshal([]byte(legacyAppYaml), &app)).Should(BeNil())
app.SetNamespace(testns)
Expect(k8sClient.Create(ctx, &app)).Should(BeNil())
Eventually(func(g Gomega) {
needInstallAddonDep, depClusters, err := checkDependencyNeedInstall(ctx, k8sClient, depAddonName, addonClusters)
Expect(err).Should(BeNil())
Expect(needInstallAddonDep).Should(BeFalse())
Expect(depClusters).Should(BeNil())
}, 30*time.Second).Should(Succeed())
// case3: clusters is nil (no topology policy), addonClusters is nil
needInstallAddonDep2, depClusters2, err := checkDependencyNeedInstall(ctx, k8sClient, depAddonName, nil)
Expect(needInstallAddonDep2).Should(BeFalse())
Expect(depClusters2).Should(BeNil())
Expect(err).Should(BeNil())
// case4: clusters is nil, addonClusters is nil
app = v1beta1.Application{}
Expect(yaml.Unmarshal([]byte(legacy3AppYaml), &app)).Should(BeNil())
app.SetNamespace(testns)
Expect(k8sClient.Create(ctx, &app)).Should(BeNil())
Eventually(func(g Gomega) {
needInstallAddonDep, depClusters, err := checkDependencyNeedInstall(ctx, k8sClient, "legacy-addon3", nil)
Expect(err).Should(BeNil())
Expect(needInstallAddonDep).Should(BeFalse())
Expect(depClusters).Should(BeNil())
}, 60*time.Second).Should(Succeed())
// case5: clusters is not nil, addonClusters is nil,
app = v1beta1.Application{}
Expect(yaml.Unmarshal([]byte(legacy2AppYaml), &app)).Should(BeNil())
app.SetNamespace(testns)
Expect(k8sClient.Create(ctx, &app)).Should(BeNil())
Eventually(func(g Gomega) {
needInstallAddonDep, depClusters, err := checkDependencyNeedInstall(ctx, k8sClient, "legacy-addon2", nil)
Expect(err).Should(BeNil())
Expect(needInstallAddonDep).Should(BeTrue())
Expect(depClusters).Should(BeNil())
}, 60*time.Second).Should(Succeed())
// case6: clusters is [local], addonClusters is ["cluster1", "cluster2"]
Eventually(func(g Gomega) {
needInstallAddonDep, depClusters, err := checkDependencyNeedInstall(ctx, k8sClient, "legacy-addon2", addonClusters)
Expect(err).Should(BeNil())
Expect(needInstallAddonDep).Should(BeTrue())
Expect(depClusters).Should(Equal(append([]string{"local"}, addonClusters...)))
}, 60*time.Second).Should(Succeed())
})
It("getDependencyArgs func test", func() {
// case1: depClusters is nil
depAddonName := "legacy-addon"
depArgs, err := getDependencyArgs(ctx, k8sClient, depAddonName, nil)
Expect(depArgs).Should(BeNil())
Expect(err).Should(BeNil())
// case2: depClusters is not nil
app = v1beta1.Application{}
Expect(yaml.Unmarshal([]byte(legacyAppYaml), &app)).Should(BeNil())
app.SetNamespace(testns)
//Expect(k8sClient.Create(ctx, &app)).Should(BeNil())
depClusters := []string{"cluster1", "cluster2"}
depArgs2, err := getDependencyArgs(ctx, k8sClient, depAddonName, depClusters)
Expect(depArgs2["clusters"]).Should(Equal(depClusters))
Expect(err).Should(BeNil())
// clusters exist, depClusters is nil
sec := v1.Secret{}
Expect(yaml.Unmarshal([]byte(secretYaml), &sec)).Should(BeNil())
Expect(k8sClient.Create(ctx, &sec)).Should(BeNil())
depArgs3, err := getDependencyArgs(ctx, k8sClient, "fluxcd", nil)
Expect(depArgs3).ToNot(BeNil())
Expect(depArgs3["clusters"]).Should(BeNil())
Expect(err).Should(BeNil())
// getArgs throw exception
sec1 := v1.Secret{}
Expect(yaml.Unmarshal([]byte(secretErrorYaml), &sec1)).Should(BeNil())
Expect(k8sClient.Create(ctx, &sec1)).Should(BeNil())
depArgs4, err := getDependencyArgs(ctx, k8sClient, "fluxcd1", nil)
Expect(depArgs4).Should(BeNil())
Expect(err).ToNot(BeNil())
})
It(" determineAddonAppName func test", func() {
app = v1beta1.Application{}
Expect(yaml.Unmarshal([]byte(legacyAppYaml), &app)).Should(BeNil())
@@ -577,6 +678,60 @@ spec:
properties:
image: crccheck/hello-world
port: 8000
`
legacy2AppYaml = `apiVersion: core.oam.dev/v1beta1
kind: Application
metadata:
name: legacy-addon2
spec:
components:
- name: express-server
type: webservice
properties:
image: crccheck/hello-world
port: 8000
policies:
- name: target-default
type: topology
properties:
clusters: ["local"]
namespace: "default"
`
legacy3AppYaml = `apiVersion: core.oam.dev/v1beta1
kind: Application
metadata:
name: legacy-addon3
spec:
components:
- name: express-server
type: webservice
properties:
image: crccheck/hello-world
port: 8000
policies:
- name: target-default
type: topology
properties:
clusterLabelSelector: {}
namespace: "default"
`
secretYaml = `apiVersion: v1
data:
addonParameterDataKey: eyJjbHVzdGVycyI6WyJsb2NhbCIsInZlbGEtbTEiXX0K
kind: Secret
metadata:
name: addon-secret-fluxcd
namespace: vela-system
type: Opaque
`
secretErrorYaml = `apiVersion: v1
data:
addonParameterDataKey: eyJjbHVzdGVycyI6WyJsb2NhbCIsInZlbGEtbTEiXQo=
kind: Secret
metadata:
name: addon-secret-fluxcd1
namespace: vela-system
type: Opaque
`
deployYaml = `apiVersion: apps/v1
kind: Deployment

View File

@@ -83,17 +83,21 @@ type WholeAddonPackage struct {
// Meta defines the format for a single addon
type Meta struct {
Name string `json:"name" validate:"required"`
Version string `json:"version"`
Description string `json:"description"`
Icon string `json:"icon"`
URL string `json:"url,omitempty"`
Tags []string `json:"tags,omitempty"`
Name string `json:"name" validate:"required"`
Version string `json:"version"`
Description string `json:"description"`
Icon string `json:"icon"`
URL string `json:"url,omitempty"`
Tags []string `json:"tags,omitempty"`
// UXPlugins used for velaux plugins download/install with the use of addon registry.
UXPlugins map[string]string `json:"uxPlugins,omitempty"`
DeployTo *DeployTo `json:"deployTo,omitempty"`
Dependencies []*Dependency `json:"dependencies,omitempty"`
NeedNamespace []string `json:"needNamespace,omitempty"`
Invisible bool `json:"invisible"`
SystemRequirements *SystemRequirements `json:"system,omitempty"`
// Annotations used for addon maintainers to add their own description or extensions to metadata.
Annotations map[string]string `json:"annotations,omitempty"`
}
// DeployTo defines where the addon to deploy to

View File

@@ -22,6 +22,7 @@ import (
"fmt"
"strings"
"github.com/kubevela/pkg/multicluster"
"github.com/pkg/errors"
kerrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
@@ -70,6 +71,7 @@ type Template struct {
// It returns a helper struct, Template, which will be used for further
// processing.
func LoadTemplate(ctx context.Context, dm discoverymapper.DiscoveryMapper, cli client.Reader, capName string, capType types.CapType) (*Template, error) {
ctx = multicluster.WithCluster(ctx, multicluster.Local)
// Application Controller only load template from ComponentDefinition and TraitDefinition
switch capType {
case types.TypeComponentDefinition, types.TypeWorkload:

View File

@@ -600,9 +600,9 @@ func (m *GoDefModifier) genDedicatedFunc() []*j.Statement {
j.Id("found").Op("=").True(),
j.Break(),
),
j.If(j.Op("!").Id("found")).Block(
j.Id(m.defFuncReceiver).Dot("Base").Dot("Traits").Op("=").Append(j.Id(m.defFuncReceiver).Dot("Base").Dot("Traits"), j.Id("addTrait")),
),
),
j.If(j.Op("!").Id("found")).Block(
j.Id(m.defFuncReceiver).Dot("Base").Dot("Traits").Op("=").Append(j.Id(m.defFuncReceiver).Dot("Base").Dot("Traits"), j.Id("addTrait")),
),
),
j.Return(j.Id(m.defFuncReceiver)),

View File

@@ -126,12 +126,12 @@ func init() {
// HTTPOption define the https options
type HTTPOption struct {
Username string
Password string
CaFile string
CertFile string
KeyFile string
InsecureSkipTLS bool
Username string `json:"username,omitempty"`
Password string `json:"password,omitempty"`
CaFile string `json:"caFile,omitempty"`
CertFile string `json:"certFile,omitempty"`
KeyFile string `json:"keyFile,omitempty"`
InsecureSkipTLS bool `json:"insecureSkipTLS,omitempty"`
}
// InitBaseRestConfig will return reset config for create controller runtime client

View File

@@ -115,16 +115,6 @@ func SuspendWorkflow(ctx context.Context, kubecli client.Client, app *v1beta1.Ap
found := stepName == ""
for i, step := range steps {
if step.Phase != workflowv1alpha1.WorkflowStepPhaseRunning {
continue
}
if stepName == "" {
wfUtils.OperateSteps(steps, i, -1, workflowv1alpha1.WorkflowStepPhaseSuspending)
} else if stepName == step.Name {
wfUtils.OperateSteps(steps, i, -1, workflowv1alpha1.WorkflowStepPhaseSuspending)
found = true
break
}
for j, sub := range step.SubStepsStatus {
if sub.Phase != workflowv1alpha1.WorkflowStepPhaseRunning {
continue
@@ -137,6 +127,16 @@ func SuspendWorkflow(ctx context.Context, kubecli client.Client, app *v1beta1.Ap
break
}
}
if step.Phase != workflowv1alpha1.WorkflowStepPhaseRunning {
continue
}
if stepName == "" {
wfUtils.OperateSteps(steps, i, -1, workflowv1alpha1.WorkflowStepPhaseSuspending)
} else if stepName == step.Name {
wfUtils.OperateSteps(steps, i, -1, workflowv1alpha1.WorkflowStepPhaseSuspending)
found = true
break
}
}
if !found {
return fmt.Errorf("can not find step %s", stepName)
@@ -209,16 +209,6 @@ func ResumeWorkflow(ctx context.Context, kubecli client.Client, app *v1beta1.App
found := stepName == ""
for i, step := range steps {
if step.Phase != workflowv1alpha1.WorkflowStepPhaseSuspending {
continue
}
if stepName == "" {
wfUtils.OperateSteps(steps, i, -1, workflowv1alpha1.WorkflowStepPhaseRunning)
} else if stepName == step.Name {
wfUtils.OperateSteps(steps, i, -1, workflowv1alpha1.WorkflowStepPhaseRunning)
found = true
break
}
for j, sub := range step.SubStepsStatus {
if sub.Phase != workflowv1alpha1.WorkflowStepPhaseSuspending {
continue
@@ -231,6 +221,16 @@ func ResumeWorkflow(ctx context.Context, kubecli client.Client, app *v1beta1.App
break
}
}
if step.Phase != workflowv1alpha1.WorkflowStepPhaseSuspending {
continue
}
if stepName == "" {
wfUtils.OperateSteps(steps, i, -1, workflowv1alpha1.WorkflowStepPhaseRunning)
} else if stepName == step.Name {
wfUtils.OperateSteps(steps, i, -1, workflowv1alpha1.WorkflowStepPhaseRunning)
found = true
break
}
}
if !found {
@@ -489,7 +489,7 @@ func TerminateWorkflow(ctx context.Context, kubecli client.Client, app *v1beta1.
if step.Reason != wfTypes.StatusReasonFailedAfterRetries && step.Reason != wfTypes.StatusReasonTimeout {
steps[i].Reason = wfTypes.StatusReasonTerminate
}
case workflowv1alpha1.WorkflowStepPhaseRunning:
case workflowv1alpha1.WorkflowStepPhaseRunning, workflowv1alpha1.WorkflowStepPhaseSuspending:
steps[i].Phase = workflowv1alpha1.WorkflowStepPhaseFailed
steps[i].Reason = wfTypes.StatusReasonTerminate
default:
@@ -500,7 +500,7 @@ func TerminateWorkflow(ctx context.Context, kubecli client.Client, app *v1beta1.
if sub.Reason != wfTypes.StatusReasonFailedAfterRetries && sub.Reason != wfTypes.StatusReasonTimeout {
steps[i].SubStepsStatus[j].Reason = wfTypes.StatusReasonTerminate
}
case workflowv1alpha1.WorkflowStepPhaseRunning:
case workflowv1alpha1.WorkflowStepPhaseRunning, workflowv1alpha1.WorkflowStepPhaseSuspending:
steps[i].SubStepsStatus[j].Phase = workflowv1alpha1.WorkflowStepPhaseFailed
steps[i].SubStepsStatus[j].Reason = wfTypes.StatusReasonTerminate
default:

View File

@@ -72,6 +72,17 @@ var _ = Describe("Kruise rollout test", func() {
It("Terminate workflow", func() {
checkApp := v1beta1.Application{}
Expect(k8sClient.Get(ctx, types.NamespacedName{Namespace: "default", Name: "opt-app"}, &checkApp)).Should(BeNil())
checkApp.Status.Workflow = &common.WorkflowStatus{
Steps: []workflowv1alpha1.WorkflowStepStatus{
{
StepStatus: workflowv1alpha1.StepStatus{
Name: "step1",
Type: "suspend",
Phase: workflowv1alpha1.WorkflowStepPhaseSuspending,
},
},
},
}
operator := NewApplicationWorkflowOperator(k8sClient, nil, checkApp.DeepCopy())
Expect(operator.Terminate(ctx)).Should(BeNil())
checkApp = v1beta1.Application{}
@@ -98,7 +109,7 @@ var _ = Describe("Kruise rollout test", func() {
StepStatus: workflowv1alpha1.StepStatus{
Name: "step1",
Type: "suspend",
Phase: workflowv1alpha1.WorkflowStepPhaseRunning,
Phase: workflowv1alpha1.WorkflowStepPhaseSuspending,
},
},
},

View File

@@ -87,9 +87,11 @@ import "list"
for key, kinds in resourceCategoryMap if list.Contains(kinds, r.kind) {
_category: key
},
_cluster: r.metadata.annotations["app.oam.dev/cluster"]
if r.metadata.annotations != _|_ if r.metadata.annotations["app.oam.dev/cluster"] != _|_ {
_cluster: r.metadata.annotations["app.oam.dev/cluster"]
}
}]
_clusters: [ for r in _resources {r._cluster} ]
_clusters: [ for r in _resources if r._cluster != _|_ {r._cluster} ]
resourceMap: {
for key, val in resourceCategoryMap {
"\(key)": [ for r in _resources if r._category == key {r}]
@@ -111,7 +113,9 @@ import "list"
apiVersion: r.apiVersion
kind: r.kind
metadata: name: r.metadata.name
metadata: annotations: "app.oam.dev/cluster": "\(r._cluster)"
if r._cluster != _|_ {
metadata: annotations: "app.oam.dev/cluster": (r._cluster)
}
}]
},
if len(resourceMap.ns) > 0 {
@@ -121,7 +125,9 @@ import "list"
apiVersion: r.apiVersion
kind: r.kind
metadata: name: r.metadata.name
metadata: annotations: "app.oam.dev/cluster": "\(r._cluster)"
if r._cluster != _|_ {
metadata: annotations: "app.oam.dev/cluster": (r._cluster)
}
}]
},
for r in resourceMap.workload + resourceMap.service {
@@ -147,7 +153,9 @@ import "list"
if r.metadata.namespace != _|_ {
metadata: namespace: r.metadata.namespace
}
metadata: annotations: "app.oam.dev/cluster": "\(r._cluster)"
if r._cluster != _|_ {
metadata: annotations: "app.oam.dev/cluster": (r._cluster)
}
}]
},
for kind, rs in unknownByKinds {
@@ -157,19 +165,21 @@ import "list"
apiVersion: r.apiVersion
kind: r.kind
metadata: name: r.metadata.name
metadata: annotations: "app.oam.dev/cluster": "\(r._cluster)"
if r._cluster != _|_ {
metadata: annotations: "app.oam.dev/cluster": (r._cluster)
}
}]
},
]
clusterCompMap: {
for cluster in _clusters {
"\(cluster)": [ for comp in comps if comp.properties.objects[0].metadata.annotations["app.oam.dev/cluster"] == cluster {comp.name} ]
"\(cluster)": [ for comp in comps if comp.properties.objects[0].metadata.annotations != _|_ if comp.properties.objects[0].metadata.annotations["app.oam.dev/cluster"] == cluster {comp.name} ]
}
}
compClusterMap: {
for comp in comps {
for comp in comps if comp.properties.objects[0].metadata.annotations != _|_ {
"\(comp.name)": comp.properties.objects[0].metadata.annotations["app.oam.dev/cluster"]
}
}

View File

@@ -288,9 +288,9 @@ func (opt *AdoptOptions) MultipleRun(f velacmd.Factory, cmd *cobra.Command) erro
_, _ = fmt.Fprintf(opt.Out, "Warning: failed to list resources from %s/%s: %s", apiVersion, kind, err.Error())
continue
}
engine := resourcetopology.New(opt.ResourceTopologyRule)
dedup := make([]k8s.ResourceIdentifier, 0)
for _, item := range list.Items {
engine := resourcetopology.New(opt.ResourceTopologyRule)
itemIdentifier := k8s.ResourceIdentifier{
Name: item.GetName(),
Namespace: item.GetNamespace(),

View File

@@ -58,8 +58,10 @@ commonPeerResources: [{
resource: "configMap"
selectors: {
name: [
for v in context.data.spec.template.spec.volumes if v.configMap != _|_ if v.configMap.name != _|_ {
v.configMap.name
if context.data.spec.template.spec.volumes != _|_ {
for v in context.data.spec.template.spec.volumes if v.configMap != _|_ if v.configMap.name != _|_ {
v.configMap.name
},
},
]
}
@@ -68,8 +70,10 @@ commonPeerResources: [{
resource: "secret"
selectors: {
name: [
for v in context.data.spec.template.spec.volumes if v.secret != _|_ if v.secret.name != _|_ {
v.secret.name
if context.data.spec.template.spec.volumes != _|_ {
for v in context.data.spec.template.spec.volumes if v.secret != _|_ if v.secret.name != _|_ {
v.secret.name
},
},
]
}

View File

@@ -48,7 +48,6 @@ func (m *Menu) StackPop(_, new model.View) {
} else {
m.UpdateMenu(new.Hint())
}
}
// StackPush change itself when accept "push" notify from app's main view

View File

@@ -134,7 +134,7 @@ func (v *ApplicationView) Title() string {
func (v *ApplicationView) bindKeys() {
v.Actions().Delete([]tcell.Key{tcell.KeyEnter})
v.Actions().Add(model.KeyActions{
tcell.KeyESC: model.KeyAction{Description: "Exist", Action: v.app.Exist, Visible: true, Shared: true},
tcell.KeyESC: model.KeyAction{Description: "Exit", Action: v.app.Exist, Visible: true, Shared: true},
tcell.KeyEnter: model.KeyAction{Description: "Managed Resource", Action: v.managedResourceView, Visible: true, Shared: true},
component.KeyN: model.KeyAction{Description: "Select Namespace", Action: v.namespaceView, Visible: true, Shared: true},
component.KeyY: model.KeyAction{Description: "Yaml", Action: v.yamlView, Visible: true, Shared: true},

View File

@@ -991,5 +991,32 @@ var _ = Describe("Test multicluster scenario", func() {
g.Expect(app.Status.Services[0].Traits[0].Healthy).Should(BeTrue())
}).WithTimeout(20 * time.Second).Should(Succeed())
})
It("Test application carrying deploy step with inline policy", func() {
ctx := context.Background()
wsDef := &v1beta1.WorkflowStepDefinition{}
bs, err := os.ReadFile("./testdata/def/inline-deploy.yaml")
Expect(err).Should(Succeed())
Expect(yaml.Unmarshal(bs, wsDef)).Should(Succeed())
wsDef.SetNamespace(namespace)
Expect(k8sClient.Create(ctx, wsDef)).Should(Succeed())
app := &v1beta1.Application{}
bs, err = os.ReadFile("./testdata/app/app-carrying-deploy-step-with-inline-policy.yaml")
Expect(err).Should(Succeed())
Expect(yaml.Unmarshal(bs, app)).Should(Succeed())
app.SetNamespace(namespace)
Eventually(func(g Gomega) {
g.Expect(k8sClient.Create(ctx, app)).Should(Succeed())
}).WithPolling(2 * time.Second).WithTimeout(5 * time.Second).Should(Succeed())
appKey := client.ObjectKeyFromObject(app)
Eventually(func(g Gomega) {
_app := &v1beta1.Application{}
g.Expect(k8sClient.Get(ctx, appKey, _app)).Should(Succeed())
g.Expect(_app.Status.Phase).Should(Equal(common.ApplicationRunning))
}).WithPolling(2 * time.Second).WithTimeout(20 * time.Second).Should(Succeed())
_deploy := &appsv1.Deployment{}
Expect(k8sClient.Get(ctx, appKey, _deploy)).Should(Succeed())
Expect(int(*_deploy.Spec.Replicas)).Should(Equal(0))
})
})
})

View File

@@ -0,0 +1,19 @@
apiVersion: core.oam.dev/v1beta1
kind: Application
metadata:
name: test
spec:
components:
- name: test
type: webservice
properties:
image: nginx:1.20
policies:
- type: topology
name: topo
properties:
clusters: ["cluster-worker"]
workflow:
steps:
- type: inline-deploy
name: deploy

View File

@@ -0,0 +1,25 @@
apiVersion: core.oam.dev/v1beta1
kind: WorkflowStepDefinition
metadata:
name: inline-deploy
spec:
schematic:
cue:
template: |
import "vela/op"
deploy: op.#Deploy & {
policies: []
parallelism: 5
ignoreTerraformComponent: true
inlinePolicies: [{
type: "override"
name: "set-replica"
properties: components: [{
traits: [{
type: "scaler"
properties: replicas: 0
}]
}]
}]
}
parameter: {}

View File

@@ -76,7 +76,7 @@ var _ = Describe("HealthScope", func() {
Expect(k8sClient.Delete(ctx, &ns, client.PropagationPolicy(metav1.DeletePropagationForeground))).Should(BeNil())
})
It("Test an application with health policy", func() {
PIt("Test an application with health policy", func() {
By("Apply a healthy application")
var newApp v1beta1.Application
var healthyAppName, unhealthyAppName string