Compare commits

..

43 Commits

Author SHA1 Message Date
github-actions[bot]
b1e7408ffb Fix: fix logs to record the right publish version (#4474)
Signed-off-by: yangsoon <songyang.song@alibaba-inc.com>
(cherry picked from commit 4846104c8f)

Co-authored-by: yangsoon <songyang.song@alibaba-inc.com>
2022-07-27 01:12:17 +08:00
github-actions[bot]
7b4e771ff8 Fix: The apply failure error is ignored when the workflow is executed (#4459)
Signed-off-by: yangsoon <songyang.song@alibaba-inc.com>
(cherry picked from commit b1d8e6c88b)

Co-authored-by: yangsoon <songyang.song@alibaba-inc.com>
2022-07-25 22:17:28 +08:00
github-actions[bot]
21534a5909 Fix: fix the goroutine leak in http request (#4302)
Signed-off-by: FogDong <dongtianxin.tx@alibaba-inc.com>
(cherry picked from commit 559ef83abd)

Co-authored-by: FogDong <dongtianxin.tx@alibaba-inc.com>
2022-07-01 17:53:59 +08:00
github-actions[bot]
e6bcc7de1f Fix: add parse comments in lookupScript to make patch work (#3843)
Signed-off-by: FogDong <dongtianxin.tx@alibaba-inc.com>
(cherry picked from commit 1758dc319d)

Co-authored-by: FogDong <dongtianxin.tx@alibaba-inc.com>
2022-05-10 13:38:07 +08:00
github-actions[bot]
a67270fb00 Fix: pend registry test (#3422)
Signed-off-by: Somefive <yd219913@alibaba-inc.com>
(cherry picked from commit c8d9035109)

Co-authored-by: Somefive <yd219913@alibaba-inc.com>
2022-03-11 11:38:25 +08:00
yangs
ef80b6617e Fix: Helm Chart values.yaml typo (#3405)
Signed-off-by: yangsoon <songyang.song@alibaba-inc.com>

Co-authored-by: yangsoon <songyang.song@alibaba-inc.com>
2022-03-10 10:03:52 +08:00
github-actions[bot]
54469a970a Fix: upgrade dependency to fix alerts (#3383)
Signed-off-by: Somefive <yd219913@alibaba-inc.com>
(cherry picked from commit 910460b0a7)

Co-authored-by: Somefive <yd219913@alibaba-inc.com>
2022-03-07 14:28:25 +08:00
github-actions[bot]
7af36b0971 Fix: remove unused variable (#3380)
Signed-off-by: Somefive <yd219913@alibaba-inc.com>
(cherry picked from commit dc7b799d97)

Co-authored-by: Somefive <yd219913@alibaba-inc.com>
2022-03-07 11:59:57 +08:00
github-actions[bot]
316e21791f fix windows bug (#3357)
Signed-off-by: 楚岳 <wangyike.wyk@alibaba-inc.com>
(cherry picked from commit 7b87a23f5a)

Co-authored-by: 楚岳 <wangyike.wyk@alibaba-inc.com>
2022-03-03 18:13:13 +08:00
github-actions[bot]
b7935e88d0 [Backport release-1.2] Fix: Fix the inaccurate judgment of ready status (#3349)
* fix: Fix the inaccurate judgment of ready status

Signed-off-by: kingram <kingram@163.com>
(cherry picked from commit 8ef9115766)

* fix: solve inaccurate isHealth

Signed-off-by: kingram <kingram@163.com>
(cherry picked from commit f5b3ba9483)

* fix: update readyReplicas type

Signed-off-by: kingram <kingram@163.com>
(cherry picked from commit 6b8a8a191c)

Co-authored-by: kingram <kingram@163.com>
2022-03-02 13:19:00 +08:00
github-actions[bot]
07c5b26eaa fix hard code service account name in test (#3348)
Signed-off-by: 楚岳 <wangyike.wyk@alibaba-inc.com>
(cherry picked from commit 988b877997)

Co-authored-by: 楚岳 <wangyike.wyk@alibaba-inc.com>
2022-03-02 13:17:27 +08:00
github-actions[bot]
5b59db5f0b [Backport release-1.2] Fix: add process context in workflow (#3335)
* Fix: add process context in workflow

Signed-off-by: FogDong <dongtianxin.tx@alibaba-inc.com>
(cherry picked from commit 9af66e2c0b)

* add context data in process context

Signed-off-by: FogDong <dongtianxin.tx@alibaba-inc.com>
(cherry picked from commit 3b17fdf525)

* delete usesless func

Signed-off-by: FogDong <dongtianxin.tx@alibaba-inc.com>
(cherry picked from commit 81f0c56f76)

* fix ut

Signed-off-by: FogDong <dongtianxin.tx@alibaba-inc.com>
(cherry picked from commit d7d2670260)

* use multi cluster ctx in process ctx

Signed-off-by: FogDong <dongtianxin.tx@alibaba-inc.com>
(cherry picked from commit 019e32c321)

* remove debug log

Signed-off-by: FogDong <dongtianxin.tx@alibaba-inc.com>
(cherry picked from commit 92af8a013a)

Co-authored-by: FogDong <dongtianxin.tx@alibaba-inc.com>
2022-02-28 11:03:26 +08:00
Jianbo Sun
f6032f3de9 Revert "[Backport release-1.2] Feat: rollout controller is disabled by default (#3322)" (#3325)
This reverts commit 2f825f3e0c.
2022-02-23 20:22:46 +08:00
github-actions[bot]
2f825f3e0c [Backport release-1.2] Feat: rollout controller is disabled by default (#3322)
* Feat: rollout controller is disabled by default

Signed-off-by: barnettZQG <barnett.zqg@gmail.com>
(cherry picked from commit 8c22096226)

* Feat: change rollout image pull policy

Signed-off-by: barnettZQG <barnett.zqg@gmail.com>
(cherry picked from commit ddbcdfb2a6)

* Fix: remove controller from the rollout addon in testdata

Signed-off-by: barnettZQG <barnett.zqg@gmail.com>
(cherry picked from commit 3517569a1a)

* Feat: rollout controller is disabled by default

Signed-off-by: barnettZQG <barnett.zqg@gmail.com>
(cherry picked from commit f5c48fca8a)

* Fix: extended waiting time for the addon mock server

Signed-off-by: barnettZQG <barnett.zqg@gmail.com>
(cherry picked from commit 718b58ffe9)

Co-authored-by: barnettZQG <barnett.zqg@gmail.com>
2022-02-23 19:45:38 +08:00
github-actions[bot]
4c6292b1c2 Fix: The order of status displayed by vela ls is not correct (#3316)
Signed-off-by: StevenLeiZhang <zhangleiic@163.com>
(cherry picked from commit f1e0e45324)

Co-authored-by: StevenLeiZhang <zhangleiic@163.com>
2022-02-23 14:53:07 +08:00
github-actions[bot]
ec7aa50584 [Backport release-1.2] Fix: addon store&show complicated parameter (#3315)
* fix

fix complicate args storage

Signed-off-by: 楚岳 <wangyike.wyk@alibaba-inc.com>
(cherry picked from commit c6ff4f1241)

* wrap logic in func and add mock test

Signed-off-by: 楚岳 <wangyike.wyk@alibaba-inc.com>

solve confict

Signed-off-by: 楚岳 <wangyike.wyk@alibaba-inc.com>

fix

(cherry picked from commit 1e000ea33f)

Co-authored-by: 楚岳 <wangyike.wyk@alibaba-inc.com>
2022-02-23 14:36:27 +08:00
github-actions[bot]
739bed82c2 Feat: add style and immutable parameters for uischema (#3314)
Signed-off-by: barnettZQG <barnett.zqg@gmail.com>
(cherry picked from commit 01093c7170)

Co-authored-by: barnettZQG <barnett.zqg@gmail.com>
2022-02-23 14:33:19 +08:00
wyike
bf03898851 cherry-pick 3340 to release 1.2 (#3313)
Signed-off-by: 楚岳 <wangyike.wyk@alibaba-inc.com>
2022-02-23 13:31:09 +08:00
github-actions[bot]
1ae4216a7a Feat: rework cluster to align velaux and cli (#3307)
Signed-off-by: Somefive <yd219913@alibaba-inc.com>
(cherry picked from commit 800d46f038)

Co-authored-by: Somefive <yd219913@alibaba-inc.com>
2022-02-22 16:24:01 +08:00
github-actions[bot]
2b97960608 [Backport release-1.2] Fix: Let versioned controller uses a separate election locker in one cluster. (#3306)
* versioned leader election id

Signed-off-by: Jian.Li <lj176172@alibaba-inc.com>
(cherry picked from commit f55b839b0f)

* modify GenerateLeaderElectionID

Signed-off-by: Jian.Li <lj176172@alibaba-inc.com>
(cherry picked from commit 23230f711c)

* add license header

Signed-off-by: Jian.Li <lj176172@alibaba-inc.com>
(cherry picked from commit ec987e5fde)

Co-authored-by: Jian.Li <lj176172@alibaba-inc.com>
2022-02-21 22:00:31 +08:00
github-actions[bot]
af29eb020f [Backport release-1.2] Fix: change seldon service from istio to ambassador (#3303)
* Fix: change seldon service from istio to ambassador

Signed-off-by: FogDong <dongtianxin.tx@alibaba-inc.com>
(cherry picked from commit 29328cc86d)

* fix lint

Signed-off-by: FogDong <dongtianxin.tx@alibaba-inc.com>
(cherry picked from commit ade7a7de48)

* get service name form sdep

Signed-off-by: FogDong <dongtianxin.tx@alibaba-inc.com>
(cherry picked from commit 553cb68fb3)

Co-authored-by: FogDong <dongtianxin.tx@alibaba-inc.com>
2022-02-20 13:07:03 +08:00
wyike
f9ee044d45 cherry pick 3280 (#3301)
Signed-off-by: 楚岳 <wangyike.wyk@alibaba-inc.com>
2022-02-20 13:05:37 +08:00
github-actions[bot]
91d37e7773 Fix(charts): the qps and brust of reconcile client configurable (#3297)
Signed-off-by: yangsoon <songyang.song@alibaba-inc.com>
(cherry picked from commit 976bd39016)

Co-authored-by: yangsoon <songyang.song@alibaba-inc.com>
2022-02-18 16:28:38 +08:00
github-actions[bot]
eb5c730e36 Fix: not steady unit test (#3295)
The UT `TestGetOpenAPISchemaFromTerraformComponentDefinition` won't
succeed 100%. The required variables in the generated isn't in the expected
order all the time.

Signed-off-by: Zheng Xi Zhou <zzxwill@gmail.com>
(cherry picked from commit 0632044bca)

Co-authored-by: Zheng Xi Zhou <zzxwill@gmail.com>
2022-02-18 15:18:34 +08:00
github-actions[bot]
b1f76f6087 Fix: make e2e CI more stable in vela show case (#3294)
Signed-off-by: Jianbo Sun <jianbo.sjb@alibaba-inc.com>
(cherry picked from commit 386ae82d0f)

Co-authored-by: Jianbo Sun <jianbo.sjb@alibaba-inc.com>
2022-02-18 15:07:42 +08:00
Jian.Li
4d50017622 Feat: application support controller requirement (#3192) (#3290)
* application controller version control

Signed-off-by: Jian.Li <lj176172@alibaba-inc.com>

* modify command arg name

Signed-off-by: Jian.Li <lj176172@alibaba-inc.com>
2022-02-18 13:05:33 +08:00
github-actions[bot]
f4d4416789 concurrent reconciles configurable (#3288)
Signed-off-by: jrkeen <jrkeen@hotmail.com>
(cherry picked from commit bc05d68292)

Co-authored-by: jrkeen <jrkeen@hotmail.com>
2022-02-18 11:05:48 +08:00
github-actions[bot]
b108801b60 Chore: remove useless controller args to avoid confusion (#3286)
Signed-off-by: Jianbo Sun <jianbo.sjb@alibaba-inc.com>
(cherry picked from commit 12832fed5c)

Co-authored-by: Jianbo Sun <jianbo.sjb@alibaba-inc.com>
2022-02-17 19:14:38 +08:00
github-actions[bot]
6d7180af2d Fix: vela status api can not return customized arguments of addon (#3277)
Signed-off-by: StevenLeiZhang <zhangleiic@163.com>
(cherry picked from commit 1580b1030a)

Co-authored-by: StevenLeiZhang <zhangleiic@163.com>
2022-02-17 09:44:12 +08:00
github-actions[bot]
3e47887b72 Fix: add DisableUpdateAnnotation for addon (#3272)
Signed-off-by: FogDong <dongtianxin.tx@alibaba-inc.com>
(cherry picked from commit 936a2fe1db)

Co-authored-by: FogDong <dongtianxin.tx@alibaba-inc.com>
2022-02-16 15:30:20 +08:00
github-actions[bot]
4934447e75 [Backport release-1.2] Feat: add seldon virtual service support in endpoints (#3269)
* Feat: add seldon virtual service support in endpoints

Signed-off-by: FogDong <dongtianxin.tx@alibaba-inc.com>
(cherry picked from commit 6c4015792e)

* fix ut

Signed-off-by: FogDong <dongtianxin.tx@alibaba-inc.com>
(cherry picked from commit 4bd9716030)

Co-authored-by: FogDong <dongtianxin.tx@alibaba-inc.com>
2022-02-16 14:29:07 +08:00
github-actions[bot]
d36718969f Fix: properties table of cloud resource doc is broken (#3265)
If one column of a table contianers multiple line of a json
struct, it will break the table.

Signed-off-by: Zheng Xi Zhou <zzxwill@gmail.com>
(cherry picked from commit 093a35768e)

Co-authored-by: Zheng Xi Zhou <zzxwill@gmail.com>
2022-02-16 11:30:36 +08:00
github-actions[bot]
859ca7567f Fix: fixed required items for a Terraform ComponentDefinition (#3258)
If a Terraform variable is required, the item in OpenAPI schema
is required.

Signed-off-by: Zheng Xi Zhou <zzxwill@gmail.com>
(cherry picked from commit 87a6e44e97)

Co-authored-by: Zheng Xi Zhou <zzxwill@gmail.com>
2022-02-15 19:54:00 +08:00
github-actions[bot]
10dce9debc Feat: update logo to .svg (#3255)
(cherry picked from commit c104d92425)

Co-authored-by: BinaryHB0916 <davidduan0916@gmail.com>
2022-02-15 15:29:59 +08:00
github-actions[bot]
2e67238b61 Fix: registry don't have enough info to build a reader (#3249)
Signed-off-by: barnettZQG <barnett.zqg@gmail.com>
(cherry picked from commit 8db1d2b616)

Co-authored-by: barnettZQG <barnett.zqg@gmail.com>
2022-02-14 18:40:24 +08:00
github-actions[bot]
341e07b636 Fix: fix panic when user disable create apprevision (#3247)
Signed-off-by: yangsoon <songyang.song@alibaba-inc.com>
(cherry picked from commit fb0983041d)

Co-authored-by: yangsoon <songyang.song@alibaba-inc.com>
2022-02-14 17:55:50 +08:00
github-actions[bot]
b5e04f2060 Add cli support for provider gcp and baidu. (#3243)
Signed-off-by: Nicola115 <2225992901@qq.com>
(cherry picked from commit 9d93b99084)

Co-authored-by: Avery <2225992901@qq.com>
2022-02-14 17:10:08 +08:00
github-actions[bot]
99c4a130d3 [Backport release-1.2] Feat: support complicated addon parameter (#3241)
* support complicated addon parameter

Signed-off-by: 楚岳 <wangyike.wyk@alibaba-inc.com>
(cherry picked from commit c287698791)

* fix: go mod tidy

Signed-off-by: 楚岳 <wangyike.wyk@alibaba-inc.com>
(cherry picked from commit 353bca84c8)

Co-authored-by: 楚岳 <wangyike.wyk@alibaba-inc.com>
2022-02-14 15:25:24 +08:00
github-actions[bot]
f8ba3d5d00 [Backport release-1.2] Fix: can not collector pod list with rollout trait (#3240)
* Fix: can not collector pod list with rollout trait

Signed-off-by: barnettZQG <barnett.zqg@gmail.com>
(cherry picked from commit 6365df4737)

* Fix: cue format error

Signed-off-by: barnettZQG <barnett.zqg@gmail.com>
(cherry picked from commit 0a59d0c051)

* Fix: default values and optional parameters cannot coexist

Signed-off-by: barnettZQG <barnett.zqg@gmail.com>
(cherry picked from commit d8e08b09d8)

Co-authored-by: barnettZQG <barnett.zqg@gmail.com>
2022-02-14 13:05:44 +08:00
github-actions[bot]
d540491f46 Fix: remove duplicative route in application webservice. (#3239)
Signed-off-by: wangcanfeng <wangcanfeng@corp.netease.com>
(cherry picked from commit dab3d2d2c6)

Co-authored-by: wangcanfeng <wangcanfeng@corp.netease.com>
2022-02-14 13:05:24 +08:00
github-actions[bot]
30c492a50a Fix: apply crd error that the annotations too lang (#3234)
Signed-off-by: barnettZQG <barnett.zqg@gmail.com>
(cherry picked from commit 8067f3fe5f)

Co-authored-by: barnettZQG <barnett.zqg@gmail.com>
2022-02-14 13:03:06 +08:00
github-actions[bot]
84422e581c Feat: remove the duplicate command (#3235)
Signed-off-by: barnettZQG <barnett.zqg@gmail.com>
(cherry picked from commit 5db766885d)

Co-authored-by: barnettZQG <barnett.zqg@gmail.com>
2022-02-14 13:02:29 +08:00
github-actions[bot]
38d2bf6839 Fix: the definition namespace is empty (#3230)
Signed-off-by: barnettZQG <barnett.zqg@gmail.com>
(cherry picked from commit c0c9b415a6)

Co-authored-by: barnettZQG <barnett.zqg@gmail.com>
2022-02-12 11:36:39 +08:00
114 changed files with 19029 additions and 1238 deletions

View File

@@ -19,8 +19,6 @@ package types
import "github.com/oam-dev/kubevela/pkg/oam"
const (
// DefaultKubeVelaNS defines the default KubeVela namespace in Kubernetes
DefaultKubeVelaNS = "vela-system"
// DefaultKubeVelaReleaseName defines the default name of KubeVela Release
DefaultKubeVelaReleaseName = "kubevela"
// DefaultKubeVelaChartName defines the default chart name of KubeVela, this variable MUST align to the chart name of this repo
@@ -33,8 +31,13 @@ const (
DefaultAppNamespace = "default"
// AutoDetectWorkloadDefinition defines the default workload type for ComponentDefinition which doesn't specify a workload
AutoDetectWorkloadDefinition = "autodetects.core.oam.dev"
// KubeVelaControllerDeployment defines the KubeVela controller's deployment name
KubeVelaControllerDeployment = "kubevela-vela-core"
)
// DefaultKubeVelaNS defines the default KubeVela namespace in Kubernetes
var DefaultKubeVelaNS = "vela-system"
const (
// AnnoDefinitionDescription is the annotation which describe what is the capability used for in a WorkloadDefinition/TraitDefinition Object
AnnoDefinitionDescription = "definition.oam.dev/description"
@@ -113,11 +116,3 @@ var DefaultFilterAnnots = []string{
oam.AnnotationFilterAnnotationKeys,
oam.AnnotationLastAppliedConfiguration,
}
// Cluster contains base info of cluster
type Cluster struct {
Name string
Type string
EndPoint string
Accepted bool
}

View File

@@ -126,6 +126,7 @@ spec:
{{ end }}
- "--system-definition-namespace={{ .Values.systemDefinitionNamespace }}"
- "--oam-spec-ver={{ .Values.OAMSpecVer }}"
- "--concurrent-reconciles={{ .Values.concurrentReconciles }}"
image: {{ .Values.imageRegistry }}{{ .Values.image.repository }}:{{ .Values.image.tag }}
imagePullPolicy: {{ quote .Values.image.pullPolicy }}
resources:

View File

@@ -21,4 +21,4 @@ version: 0.1.0
appVersion: 0.1.0
home: https://kubevela.io
icon: https://kubevela.io/img/logo.jpg
icon: https://kubevela.io/img/logo.svg

View File

@@ -0,0 +1,268 @@
{{- if .Values.enableFluxcdAddon -}}
apiVersion: core.oam.dev/v1beta1
kind: Application
metadata:
labels:
addons.oam.dev/name: fluxcd-def
name: addon-fluxcd-def
namespace: {{ .Release.Namespace }}
spec:
components:
- name: fluxc-def-resources
properties:
objects:
- apiVersion: core.oam.dev/v1beta1
kind: ComponentDefinition
metadata:
annotations:
definition.oam.dev/description: helm release is a group of K8s resources
from either git repository or helm repo
name: helm
namespace: {{.Values.systemDefinitionNamespace}}
spec:
schematic:
cue:
template: "output: {\n\tapiVersion: \"source.toolkit.fluxcd.io/v1beta1\"\n\tmetadata:
{\n\t\tname: context.name\n\t}\n\tif parameter.repoType == \"git\"
{\n\t\tkind: \"GitRepository\"\n\t\tspec: {\n\t\t\turl: parameter.url\n\t\t\tif
parameter.git.branch != _|_ {\n\t\t\t\tref: branch: parameter.git.branch\n\t\t\t}\n\t\t\t_secret\n\t\t\t_sourceCommonArgs\n\t\t}\n\t}\n\tif
parameter.repoType == \"oss\" {\n\t\tkind: \"Bucket\"\n\t\tspec: {\n\t\t\tendpoint:
\ parameter.url\n\t\t\tbucketName: parameter.oss.bucketName\n\t\t\tprovider:
\ parameter.oss.provider\n\t\t\tif parameter.oss.region != _|_ {\n\t\t\t\tregion:
parameter.oss.region\n\t\t\t}\n\t\t\t_secret\n\t\t\t_sourceCommonArgs\n\t\t}\n\t}\n\tif
parameter.repoType == \"helm\" {\n\t\tkind: \"HelmRepository\"\n\t\tspec:
{\n\t\t\turl: parameter.url\n\t\t\t_secret\n\t\t\t_sourceCommonArgs\n\t\t}\n\t}\n}\n\noutputs:
release: {\n\tapiVersion: \"helm.toolkit.fluxcd.io/v2beta1\"\n\tkind:
\ \"HelmRelease\"\n\tmetadata: {\n\t\tname: context.name\n\t}\n\tspec:
{\n\t\ttimeout: parameter.installTimeout\n\t\tinterval: parameter.interval\n\t\tchart:
{\n\t\t\tspec: {\n\t\t\t\tchart: parameter.chart\n\t\t\t\tversion:
parameter.version\n\t\t\t\tsourceRef: {\n\t\t\t\t\tif parameter.repoType
== \"git\" {\n\t\t\t\t\t\tkind: \"GitRepository\"\n\t\t\t\t\t}\n\t\t\t\t\tif
parameter.repoType == \"helm\" {\n\t\t\t\t\t\tkind: \"HelmRepository\"\n\t\t\t\t\t}\n\t\t\t\t\tif
parameter.repoType == \"oss\" {\n\t\t\t\t\t\tkind: \"Bucket\"\n\t\t\t\t\t}\n\t\t\t\t\tname:
\ context.name\n\t\t\t\t}\n\t\t\t\tinterval: parameter.interval\n\t\t\t}\n\t\t}\n\t\tif
parameter.targetNamespace != _|_ {\n\t\t\ttargetNamespace: parameter.targetNamespace\n\t\t}\n\t\tif
parameter.releaseName != _|_ {\n\t\t\treleaseName: parameter.releaseName\n\t\t}\n\t\tif
parameter.values != _|_ {\n\t\t\tvalues: parameter.values\n\t\t}\n\t}\n}\n\n_secret:
{\n\tif parameter.secretRef != _|_ {\n\t\tsecretRef: {\n\t\t\tname:
parameter.secretRef\n\t\t}\n\t}\n}\n\n_sourceCommonArgs: {\n\tinterval:
parameter.pullInterval\n\tif parameter.timeout != _|_ {\n\t\ttimeout:
parameter.timeout\n\t}\n}\n\nparameter: {\n\trepoType: *\"helm\" |
\"git\" | \"oss\"\n\t// +usage=The interval at which to check for
repository/bucket and relese updates, default to 5m\n\tpullInterval:
*\"5m\" | string\n // +usage=The Interval at which to reconcile
the Helm release, default to 30s\n interval: *\"30s\" | string\n\t//
+usage=The Git or Helm repository URL, OSS endpoint, accept HTTP/S
or SSH address as git url,\n\turl: string\n\t// +usage=The name of
the secret containing authentication credentials\n\tsecretRef?: string\n\t//
+usage=The timeout for operations like download index/clone repository,
optional\n\ttimeout?: string\n\t// +usage=The timeout for operation
`helm install`, optional\n\tinstallTimeout: *\"10m\" | string\n\n\tgit?:
{\n\t\t// +usage=The Git reference to checkout and monitor for changes,
defaults to master branch\n\t\tbranch: string\n\t}\n\toss?: {\n\t\t//
+usage=The bucket's name, required if repoType is oss\n\t\tbucketName:
string\n\t\t// +usage=\"generic\" for Minio, Amazon S3, Google Cloud
Storage, Alibaba Cloud OSS, \"aws\" for retrieve credentials from
the EC2 service when credentials not specified, default \"generic\"\n\t\tprovider:
*\"generic\" | \"aws\"\n\t\t// +usage=The bucket region, optional\n\t\tregion?:
string\n\t}\n\n\t// +usage=1.The relative path to helm chart for git/oss
source. 2. chart name for helm resource 3. relative path for chart
package(e.g. ./charts/podinfo-1.2.3.tgz)\n\tchart: string\n\t// +usage=Chart
version\n\tversion: *\"*\" | string\n\t// +usage=The namespace for
helm chart, optional\n\ttargetNamespace?: string\n\t// +usage=The
release name\n\treleaseName?: string\n\t// +usage=Chart values\n\tvalues?:
#nestedmap\n}\n\n#nestedmap: {\n\t...\n}\n"
status:
customStatus: "repoMessage: string\nreleaseMessage: string\nif context.output.status
== _|_ {\n\trepoMessage: \"Fetching repository\"\n\treleaseMessage:
\"Wating repository ready\"\n}\nif context.output.status != _|_ {\n\trepoStatus:
context.output.status\n\tif repoStatus.conditions[0][\"type\"] != \"Ready\"
{\n\t\trepoMessage: \"Fetch repository fail\"\n\t}\n\tif repoStatus.conditions[0][\"type\"]
== \"Ready\" {\n\t\trepoMessage: \"Fetch repository successfully\"\n\t}\n\n\tif
context.outputs.release.status == _|_ {\n\t\treleaseMessage: \"Creating
helm release\"\n\t}\n\tif context.outputs.release.status != _|_ {\n\t\tif
context.outputs.release.status.conditions[0][\"message\"] == \"Release
reconciliation succeeded\" {\n\t\t\treleaseMessage: \"Create helm release
successfully\"\n\t\t}\n\t\tif context.outputs.release.status.conditions[0][\"message\"]
!= \"Release reconciliation succeeded\" {\n\t\t\treleaseBasicMessage:
\"Delivery helm release in progress, message: \" + context.outputs.release.status.conditions[0][\"message\"]\n\t\t\tif
len(context.outputs.release.status.conditions) == 1 {\n\t\t\t\treleaseMessage:
releaseBasicMessage\n\t\t\t}\n\t\t\tif len(context.outputs.release.status.conditions)
> 1 {\n\t\t\t\treleaseMessage: releaseBasicMessage + \", \" + context.outputs.release.status.conditions[1][\"message\"]\n\t\t\t}\n\t\t}\n\t}\n\n}\nmessage:
repoMessage + \", \" + releaseMessage"
healthPolicy: 'isHealth: len(context.outputs.release.status.conditions)
!= 0 && context.outputs.release.status.conditions[0]["status"]=="True"'
workload:
type: autodetects.core.oam.dev
- apiVersion: core.oam.dev/v1beta1
kind: TraitDefinition
metadata:
annotations:
definition.oam.dev/description: A list of JSON6902 patch to selected target
name: kustomize-json-patch
namespace: {{.Values.systemDefinitionNamespace}}
spec:
schematic:
cue:
template: "patch: {\n\tspec: {\n\t\tpatchesJson6902: parameter.patchesJson\n\t}\n}\n\nparameter:
{\n\t// +usage=A list of JSON6902 patch.\n\tpatchesJson: [...#jsonPatchItem]\n}\n\n//
+usage=Contains a JSON6902 patch\n#jsonPatchItem: {\n\ttarget: #selector\n\tpatch:
[...{\n\t\t// +usage=operation to perform\n\t\top: string | \"add\"
| \"remove\" | \"replace\" | \"move\" | \"copy\" | \"test\"\n\t\t//
+usage=operate path e.g. /foo/bar\n\t\tpath: string\n\t\t// +usage=specify
source path when op is copy/move\n\t\tfrom?: string\n\t\t// +usage=specify
opraation value when op is test/add/replace\n\t\tvalue?: string\n\t}]\n}\n\n//
+usage=Selector specifies a set of resources\n#selector: {\n\tgroup?:
\ string\n\tversion?: string\n\tkind?: string\n\tnamespace?:
\ string\n\tname?: string\n\tannotationSelector?:
string\n\tlabelSelector?: string\n}\n"
- apiVersion: core.oam.dev/v1beta1
kind: TraitDefinition
metadata:
annotations:
definition.oam.dev/description: A list of StrategicMerge or JSON6902 patch
to selected target
name: kustomize-patch
namespace: {{.Values.systemDefinitionNamespace}}
spec:
schematic:
cue:
template: "patch: {\n\tspec: {\n\t\tpatches: parameter.patches\n\t}\n}\nparameter:
{\n\t// +usage=a list of StrategicMerge or JSON6902 patch to selected
target\n\tpatches: [...#patchItem]\n}\n\n// +usage=Contains a strategicMerge
or JSON6902 patch\n#patchItem: {\n\t// +usage=Inline patch string,
in yaml style\n\tpatch: string\n\t// +usage=Specify the target the
patch should be applied to\n\ttarget: #selector\n}\n\n// +usage=Selector
specifies a set of resources\n#selector: {\n\tgroup?: string\n\tversion?:
\ string\n\tkind?: string\n\tnamespace?: string\n\tname?:
\ string\n\tannotationSelector?: string\n\tlabelSelector?:
\ string\n}\n"
- apiVersion: core.oam.dev/v1beta1
kind: ComponentDefinition
metadata:
annotations:
definition.oam.dev/description: kustomize can fetching, building, updating
and applying Kustomize manifests from git repo.
name: kustomize
namespace: {{.Values.systemDefinitionNamespace}}
spec:
schematic:
cue:
template: "output: {\n\tapiVersion: \"kustomize.toolkit.fluxcd.io/v1beta1\"\n\tkind:
\ \"Kustomization\"\n\tmetadata: {\n\t\tname: context.name\n
\ namespace: context.namespace\n\t}\n\tspec: {\n\t\tinterval: parameter.pullInterval\n\t\tsourceRef:
{\n\t\t\tif parameter.repoType == \"git\" {\n\t\t\t\tkind: \"GitRepository\"\n\t\t\t}\n\t\t\tif
parameter.repoType == \"oss\" {\n\t\t\t\tkind: \"Bucket\"\n\t\t\t}\n\t\t\tname:
\ context.name\n\t\t\tnamespace: context.namespace\n\t\t}\n\t\tpath:
\ parameter.path\n\t\tprune: true\n\t\tvalidation: \"client\"\n\t}\n}\n\noutputs:
{\n repo: {\n\t apiVersion: \"source.toolkit.fluxcd.io/v1beta1\"\n\t
\ metadata: {\n\t\t name: context.name\n namespace: context.namespace\n\t
\ }\n\t if parameter.repoType == \"git\" {\n\t\t kind: \"GitRepository\"\n\t\t
\ spec: {\n\t\t\t url: parameter.url\n\t\t\t if parameter.git.branch
!= _|_ {\n\t\t\t\t ref: branch: parameter.git.branch\n\t\t\t }\n
\ if parameter.git.provider != _|_ {\n if parameter.git.provider
== \"GitHub\" {\n gitImplementation: \"go-git\"\n }\n
\ if parameter.git.provider == \"AzureDevOps\" {\n gitImplementation:
\"libgit2\"\n }\n }\n\t\t\t _secret\n\t\t\t _sourceCommonArgs\n\t\t
\ }\n\t }\n\t if parameter.repoType == \"oss\" {\n\t\t kind: \"Bucket\"\n\t\t
\ spec: {\n\t\t\t endpoint: parameter.url\n\t\t\t bucketName:
parameter.oss.bucketName\n\t\t\t provider: parameter.oss.provider\n\t\t\t
\ if parameter.oss.region != _|_ {\n\t\t\t\t region: parameter.oss.region\n\t\t\t
\ }\n\t\t\t _secret\n\t\t\t _sourceCommonArgs\n\t\t }\n\t }\n
\ }\n\n if parameter.imageRepository != _|_ {\n imageRepo: {\n
\ apiVersion: \"image.toolkit.fluxcd.io/v1beta1\"\n kind:
\"ImageRepository\"\n\t metadata: {\n\t\t name: context.name\n
\ namespace: context.namespace\n\t }\n spec: {\n image:
parameter.imageRepository.image\n interval: parameter.pullInterval\n
\ if parameter.imageRepository.secretRef != _|_ {\n secretRef:
name: parameter.imageRepository.secretRef\n }\n }\n }\n\n
\ imagePolicy: {\n apiVersion: \"image.toolkit.fluxcd.io/v1beta1\"\n
\ kind: \"ImagePolicy\"\n\t metadata: {\n\t\t name: context.name\n
\ namespace: context.namespace\n\t }\n spec: {\n imageRepositoryRef:
name: context.name\n policy: parameter.imageRepository.policy\n
\ if parameter.imageRepository.filterTags != _|_ {\n filterTags:
parameter.imageRepository.filterTags\n }\n }\n }\n\n
\ imageUpdate: {\n apiVersion: \"image.toolkit.fluxcd.io/v1beta1\"\n
\ kind: \"ImageUpdateAutomation\"\n\t metadata: {\n\t\t name:
context.name\n namespace: context.namespace\n\t }\n spec:
{\n interval: parameter.pullInterval\n sourceRef: {\n
\ kind: \"GitRepository\"\n name: context.name\n
\ }\n git: {\n checkout: ref: branch: parameter.git.branch\n
\ commit: {\n author: {\n email: \"kubevelabot@users.noreply.github.com\"\n
\ name: \"kubevelabot\"\n }\n if
parameter.imageRepository.commitMessage != _|_ {\n messageTemplate:
\"Update image automatically.\\n\" + parameter.imageRepository.commitMessage\n
\ }\n if parameter.imageRepository.commitMessage
== _|_ {\n messageTemplate: \"Update image automatically.\"\n
\ }\n }\n push: branch: parameter.git.branch\n
\ }\n update: {\n path:\tparameter.path\n strategy:
\"Setters\"\n }\n }\n }\n }\n}\n\n_secret: {\n\tif
parameter.secretRef != _|_ {\n\t\tsecretRef: {\n\t\t\tname: parameter.secretRef\n\t\t}\n\t}\n}\n\n_sourceCommonArgs:
{\n\tinterval: parameter.pullInterval\n\tif parameter.timeout != _|_
{\n\t\ttimeout: parameter.timeout\n\t}\n}\n\nparameter: {\n\trepoType:
*\"git\" | \"oss\"\n // +usage=The image repository for automatically
update image to git\n imageRepository?: {\n // +usage=The image
url\n image: string\n // +usage=The name of the secret containing
authentication credentials\n secretRef?: string\n // +usage=Policy
gives the particulars of the policy to be followed in selecting the
most recent image.\n policy: {\n // +usage=Alphabetical set
of rules to use for alphabetical ordering of the tags.\n alphabetical?:
{\n // +usage=Order specifies the sorting order of the tags.\n
\ // +usage=Given the letters of the alphabet as tags, ascending
order would select Z, and descending order would select A.\n order?:
\"asc\" | \"desc\"\n }\n // +usage=Numerical set of rules
to use for numerical ordering of the tags.\n numerical?: {\n
\ // +usage=Order specifies the sorting order of the tags.\n
\ // +usage=Given the integer values from 0 to 9 as tags, ascending
order would select 9, and descending order would select 0.\n order:
\"asc\" | \"desc\"\n }\n // +usage=SemVer gives a semantic
version range to check against the tags available.\n semver?:
{\n // +usage=Range gives a semver range for the image tag;
the highest version within the range that's a tag yields the latest
image.\n range: string\n }\n }\n // +usage=FilterTags
enables filtering for only a subset of tags based on a set of rules.
If no rules are provided, all the tags from the repository will be
ordered and compared.\n filterTags?: {\n // +usage=Extract
allows a capture group to be extracted from the specified regular
expression pattern, useful before tag evaluation.\n extract?:
string\n // +usage=Pattern specifies a regular expression pattern
used to filter for image tags.\n pattern?: string\n }\n //
+usage=The image url\n commitMessage?: string\n }\n\t// +usage=The
interval at which to check for repository/bucket and release updates,
default to 5m\n\tpullInterval: *\"5m\" | string\n\t// +usage=The Git
or Helm repository URL, OSS endpoint, accept HTTP/S or SSH address
as git url,\n\turl: string\n\t// +usage=The name of the secret containing
authentication credentials\n\tsecretRef?: string\n\t// +usage=The
timeout for operations like download index/clone repository, optional\n\ttimeout?:
string\n\tgit?: {\n\t\t// +usage=The Git reference to checkout and
monitor for changes, defaults to master branch\n\t\tbranch: string\n
\ // +usage=Determines which git client library to use. Defaults
to GitHub, it will pick go-git. AzureDevOps will pick libgit2.\n provider?:
*\"GitHub\" | \"AzureDevOps\"\n\t}\n\toss?: {\n\t\t// +usage=The bucket's
name, required if repoType is oss\n\t\tbucketName: string\n\t\t//
+usage=\"generic\" for Minio, Amazon S3, Google Cloud Storage, Alibaba
Cloud OSS, \"aws\" for retrieve credentials from the EC2 service when
credentials not specified, default \"generic\"\n\t\tprovider: *\"generic\"
| \"aws\"\n\t\t// +usage=The bucket region, optional\n\t\tregion?:
string\n\t}\n\t//+usage=Path to the directory containing the kustomization.yaml
file, or the set of plain YAMLs a kustomization.yaml should be generated
for.\n\tpath: string\n}"
workload:
type: autodetects.core.oam.dev
- apiVersion: core.oam.dev/v1beta1
kind: TraitDefinition
metadata:
annotations:
definition.oam.dev/description: A list of strategic merge to kustomize
config
name: kustomize-strategy-merge
namespace: {{.Values.systemDefinitionNamespace}}
spec:
schematic:
cue:
template: "patch: {\n\tspec: {\n\t\tpatchesStrategicMerge: parameter.patchesStrategicMerge\n\t}\n}\n\nparameter:
{\n\t// +usage=a list of strategicmerge, defined as inline yaml objects.\n\tpatchesStrategicMerge:
[...#nestedmap]\n}\n\n#nestedmap: {\n\t...\n}\n"
type: k8s-objects
{{- end }}

File diff suppressed because it is too large Load Diff

View File

@@ -2,7 +2,7 @@ apiVersion: v1
kind: ConfigMap
metadata:
name: vela-addon-registry
namespace: {{.Values.systemDefinitionNamespace}}
namespace: {{ .Release.Namespace }}
data:
registries: '{
"KubeVela":{

View File

@@ -5,8 +5,6 @@ kind: TraitDefinition
metadata:
annotations:
definition.oam.dev/description: Rollout the component.
labels:
custom.definition.oam.dev/ui-hidden: "true"
name: rollout
namespace: {{.Values.systemDefinitionNamespace}}
spec:
@@ -22,8 +20,13 @@ spec:
namespace: context.namespace
}
spec: {
targetRevisionName: parameter.targetRevision
componentName: context.name
if parameter.targetRevision != _|_ {
targetRevisionName: parameter.targetRevision
}
if parameter.targetRevision == _|_ {
targetRevisionName: context.revision
}
componentName: context.name
rolloutPlan: {
rolloutStrategy: "IncreaseFirst"
if parameter.rolloutBatches != _|_ {
@@ -37,8 +40,8 @@ spec:
}
}
parameter: {
targetRevision: *context.revision | string
targetSize: int
targetRevision?: string
targetSize: int
rolloutBatches?: [...rolloutBatch]
batchPartition?: int
}

View File

@@ -154,7 +154,7 @@ spec:
}
if v.resources.requests.storage == _|_ {
resources: requests: storage: "1Gi"
resources: requests: storage: "8Gi"
}
if v.resources.requests.storage != _|_ {
resources: requests: storage: v.resources.requests.storage

View File

@@ -506,23 +506,49 @@ spec:
import "strconv"
ready: {
if context.output.status.readyReplicas == _|_ {
replica: "0"
readyReplicas: 0
}
if context.output.status.readyReplicas != _|_ {
replica: strconv.FormatInt(context.output.status.readyReplicas, 10)
readyReplicas: context.output.status.readyReplicas
}
}
message: "Ready:" + ready.replica + "/" + strconv.FormatInt(context.output.spec.replicas, 10)
message: "Ready:" + strconv.FormatInt(ready.readyReplicas, 10) + "/" + strconv.FormatInt(context.output.spec.replicas, 10)
healthPolicy: |-
ready: {
if context.output.status.readyReplicas == _|_ {
replica: 0
if context.output.status.updatedReplicas == _|_ {
updatedReplicas : 0
}
if context.output.status.updatedReplicas != _|_ {
updatedReplicas : context.output.status.updatedReplicas
}
if context.output.status.readyReplicas == _|_ {
readyReplicas: 0
}
if context.output.status.readyReplicas != _|_ {
replica: context.output.status.readyReplicas
readyReplicas: context.output.status.readyReplicas
}
if context.output.status.replicas == _|_ {
replicas: 0
}
if context.output.status.replicas != _|_ {
replicas: context.output.status.replicas
}
if context.output.status.observedGeneration != _|_ {
observedGeneration: context.output.status.observedGeneration
}
if context.output.status.observedGeneration == _|_ {
observedGeneration: 0
}
}
isHealth: context.output.spec.replicas == ready.replica
isHealth: (context.output.spec.replicas == ready.readyReplicas) && (context.output.spec.replicas == ready.updatedReplicas) && (context.output.spec.replicas == ready.replicas) && (ready.observedGeneration == context.output.metadata.generation || ready.observedGeneration > context.output.metadata.generation)
workload:
definition:
apiVersion: apps/v1

View File

@@ -399,23 +399,49 @@ spec:
import "strconv"
ready: {
if context.output.status.readyReplicas == _|_ {
replica: "0"
readyReplicas: 0
}
if context.output.status.readyReplicas != _|_ {
replica: strconv.FormatInt(context.output.status.readyReplicas, 10)
readyReplicas: context.output.status.readyReplicas
}
}
message: "Ready:" + ready.replica + "/" + strconv.FormatInt(context.output.spec.replicas, 10)
message: "Ready:" + strconv.FormatInt(ready.readyReplicas, 10) + "/" + strconv.FormatInt(context.output.spec.replicas, 10)
healthPolicy: |-
ready: {
if context.output.status.readyReplicas == _|_ {
replica: 0
if context.output.status.updatedReplicas == _|_ {
updatedReplicas : 0
}
if context.output.status.updatedReplicas != _|_ {
updatedReplicas : context.output.status.updatedReplicas
}
if context.output.status.readyReplicas == _|_ {
readyReplicas: 0
}
if context.output.status.readyReplicas != _|_ {
replica: context.output.status.readyReplicas
readyReplicas: context.output.status.readyReplicas
}
if context.output.status.replicas == _|_ {
replicas: 0
}
if context.output.status.replicas != _|_ {
replicas: context.output.status.replicas
}
if context.output.status.observedGeneration != _|_ {
observedGeneration: context.output.status.observedGeneration
}
if context.output.status.observedGeneration == _|_ {
observedGeneration: 0
}
}
isHealth: context.output.spec.replicas == ready.replica
isHealth: (context.output.spec.replicas == ready.readyReplicas) && (context.output.spec.replicas == ready.updatedReplicas) && (context.output.spec.replicas == ready.replicas) && (ready.observedGeneration == context.output.metadata.generation || ready.observedGeneration > context.output.metadata.generation)
workload:
definition:
apiVersion: apps/v1

View File

@@ -120,10 +120,8 @@ spec:
- "--use-webhook=true"
- "--webhook-port={{ .Values.webhookService.port }}"
- "--webhook-cert-dir={{ .Values.admissionWebhooks.certificate.mountPath }}"
- "--autogen-workload-definition={{ .Values.admissionWebhooks.autoGenWorkloadDefinition }}"
{{ end }}
- "--health-addr=:{{ .Values.healthCheck.port }}"
- "--apply-once-only={{ .Values.applyOnceOnly }}"
{{ if ne .Values.disableCaps "" }}
- "--disable-caps={{ .Values.disableCaps }}"
{{ end }}
@@ -135,6 +133,9 @@ spec:
- "--enable-cluster-gateway"
{{ end }}
- "--application-re-sync-period={{ .Values.controllerArgs.reSyncPeriod }}"
- "--concurrent-reconciles={{ .Values.concurrentReconciles }}"
- "--kube-api-qps={{ .Values.kubeClient.qps }}"
- "--kube-api-burst={{ .Values.kubeClient.burst }}"
image: {{ .Values.imageRegistry }}{{ .Values.image.repository }}:{{ .Values.image.tag }}
imagePullPolicy: {{ quote .Values.image.pullPolicy }}
resources:

View File

@@ -29,7 +29,7 @@ metadata:
"helm.sh/hook": test
helm.sh/hook-delete-policy: hook-succeeded
spec:
serviceAccountName: kubevela-vela-core
serviceAccountName: {{ include "kubevela.serviceAccountName" . }}
containers:
- name: {{ .Release.Name }}-application-test
image: {{ .Values.imageRegistry }}{{ .Values.test.k8s.repository }}:{{ .Values.test.k8s.tag }}

View File

@@ -3,8 +3,6 @@
# Declare variables to be passed into your templates.
replicaCount: 1
# Valid applyOnceOnly values: true/false/on/off/force
applyOnceOnly: "off"
disableCaps: ""
@@ -81,8 +79,6 @@ admissionWebhooks:
certManager:
enabled: false
revisionHistoryLimit: 3
# If autoGenWorkloadDefinition is true, webhook will auto generated workloadDefinition which componentDefinition refers to
autoGenWorkloadDefinition: true
#Enable debug logs for development purpose
logDebug: false
@@ -103,6 +99,12 @@ definitionRevisionLimit: 20
# concurrentReconciles is the concurrent reconcile number of the controller
concurrentReconciles: 4
kubeClient:
# the qps for reconcile clients, default is 50
qps: 50
# the burst for reconcile clients, default is 100
burst: 100
# dependCheckWait is the time to wait for ApplicationConfiguration's dependent-resource ready
dependCheckWait: 30s
@@ -132,4 +134,6 @@ test:
tag: v1
k8s:
repository: oamdev/alpine-k8s
tag: 1.18.2
tag: 1.18.2
enableFluxcdAddon: false

View File

@@ -5,8 +5,6 @@ kind: TraitDefinition
metadata:
annotations:
definition.oam.dev/description: Rollout the component.
labels:
custom.definition.oam.dev/ui-hidden: "true"
name: rollout
namespace: {{.Values.systemDefinitionNamespace}}
spec:
@@ -22,8 +20,13 @@ spec:
namespace: context.namespace
}
spec: {
targetRevisionName: parameter.targetRevision
componentName: context.name
if parameter.targetRevision != _|_ {
targetRevisionName: parameter.targetRevision
}
if parameter.targetRevision == _|_ {
targetRevisionName: context.revision
}
componentName: context.name
rolloutPlan: {
rolloutStrategy: "IncreaseFirst"
if parameter.rolloutBatches != _|_ {
@@ -37,8 +40,8 @@ spec:
}
}
parameter: {
targetRevision: *context.revision | string
targetSize: int
targetRevision?: string
targetSize: int
rolloutBatches?: [...rolloutBatch]
batchPartition?: int
}

View File

@@ -154,7 +154,7 @@ spec:
}
if v.resources.requests.storage == _|_ {
resources: requests: storage: "1Gi"
resources: requests: storage: "8Gi"
}
if v.resources.requests.storage != _|_ {
resources: requests: storage: v.resources.requests.storage

View File

@@ -506,23 +506,49 @@ spec:
import "strconv"
ready: {
if context.output.status.readyReplicas == _|_ {
replica: "0"
readyReplicas: 0
}
if context.output.status.readyReplicas != _|_ {
replica: strconv.FormatInt(context.output.status.readyReplicas, 10)
readyReplicas: context.output.status.readyReplicas
}
}
message: "Ready:" + ready.replica + "/" + strconv.FormatInt(context.output.spec.replicas, 10)
message: "Ready:" + strconv.FormatInt(ready.readyReplicas, 10) + "/" + strconv.FormatInt(context.output.spec.replicas, 10)
healthPolicy: |-
ready: {
if context.output.status.readyReplicas == _|_ {
replica: 0
if context.output.status.updatedReplicas == _|_ {
updatedReplicas : 0
}
if context.output.status.updatedReplicas != _|_ {
updatedReplicas : context.output.status.updatedReplicas
}
if context.output.status.readyReplicas == _|_ {
readyReplicas: 0
}
if context.output.status.readyReplicas != _|_ {
replica: context.output.status.readyReplicas
readyReplicas: context.output.status.readyReplicas
}
if context.output.status.replicas == _|_ {
replicas: 0
}
if context.output.status.replicas != _|_ {
replicas: context.output.status.replicas
}
if context.output.status.observedGeneration != _|_ {
observedGeneration: context.output.status.observedGeneration
}
if context.output.status.observedGeneration == _|_ {
observedGeneration: 0
}
}
isHealth: context.output.spec.replicas == ready.replica
isHealth: (context.output.spec.replicas == ready.readyReplicas) && (context.output.spec.replicas == ready.updatedReplicas) && (context.output.spec.replicas == ready.replicas) && (ready.observedGeneration == context.output.metadata.generation || ready.observedGeneration > context.output.metadata.generation)
workload:
definition:
apiVersion: apps/v1

View File

@@ -399,23 +399,49 @@ spec:
import "strconv"
ready: {
if context.output.status.readyReplicas == _|_ {
replica: "0"
readyReplicas: 0
}
if context.output.status.readyReplicas != _|_ {
replica: strconv.FormatInt(context.output.status.readyReplicas, 10)
readyReplicas: context.output.status.readyReplicas
}
}
message: "Ready:" + ready.replica + "/" + strconv.FormatInt(context.output.spec.replicas, 10)
message: "Ready:" + strconv.FormatInt(ready.readyReplicas, 10) + "/" + strconv.FormatInt(context.output.spec.replicas, 10)
healthPolicy: |-
ready: {
if context.output.status.readyReplicas == _|_ {
replica: 0
if context.output.status.updatedReplicas == _|_ {
updatedReplicas : 0
}
if context.output.status.updatedReplicas != _|_ {
updatedReplicas : context.output.status.updatedReplicas
}
if context.output.status.readyReplicas == _|_ {
readyReplicas: 0
}
if context.output.status.readyReplicas != _|_ {
replica: context.output.status.readyReplicas
readyReplicas: context.output.status.readyReplicas
}
if context.output.status.replicas == _|_ {
replicas: 0
}
if context.output.status.replicas != _|_ {
replicas: context.output.status.replicas
}
if context.output.status.observedGeneration != _|_ {
observedGeneration: context.output.status.observedGeneration
}
if context.output.status.observedGeneration == _|_ {
observedGeneration: 0
}
}
isHealth: context.output.spec.replicas == ready.replica
isHealth: (context.output.spec.replicas == ready.readyReplicas) && (context.output.spec.replicas == ready.updatedReplicas) && (context.output.spec.replicas == ready.replicas) && (ready.observedGeneration == context.output.metadata.generation || ready.observedGeneration > context.output.metadata.generation)
workload:
definition:
apiVersion: apps/v1

View File

@@ -136,6 +136,9 @@ spec:
{{ if .Values.multicluster.enabled }}
- "--enable-cluster-gateway"
{{ end }}
- "--concurrent-reconciles={{ .Values.concurrentReconciles }}"
- "--kube-api-qps={{ .Values.kubeClient.qps }}"
- "--kube-api-burst={{ .Values.kubeClient.burst }}"
image: {{ .Values.imageRegistry }}{{ .Values.image.repository }}:{{ .Values.image.tag }}
imagePullPolicy: {{ quote .Values.image.pullPolicy }}
resources:

View File

@@ -105,6 +105,12 @@ dependCheckWait: 30s
# OAMSpecVer is the oam spec version controller want to setup
OAMSpecVer: "minimal"
kubeClient:
# the qps for reconcile clients, default is 50
qps: 50
# the burst for reconcile clients, default is 100
burst: 100
multicluster:
enabled: false
clusterGateway:

View File

@@ -30,6 +30,8 @@ import (
"strings"
"time"
"github.com/oam-dev/kubevela/pkg/utils/util"
"k8s.io/klog/v2"
"k8s.io/klog/v2/klogr"
ctrl "sigs.k8s.io/controller-runtime"
@@ -131,6 +133,7 @@ func main() {
"The duration the LeaderElector clients should wait between tries of actions")
flag.BoolVar(&enableClusterGateway, "enable-cluster-gateway", false, "Enable cluster-gateway to use multicluster, disabled by default.")
flag.BoolVar(&controllerArgs.EnableCompatibility, "enable-asi-compatibility", false, "enable compatibility for asi")
flag.BoolVar(&controllerArgs.IgnoreAppWithoutControllerRequirement, "ignore-app-without-controller-version", false, "If true, application controller will not process the app without 'app.oam.dev/controller-version-require' annotation")
standardcontroller.AddOptimizeFlags()
flag.IntVar(&resourcekeeper.MaxDispatchConcurrent, "max-dispatch-concurrent", 10, "Set the max dispatch concurrent number, default is 10")
@@ -193,17 +196,19 @@ func main() {
// wrapper the round tripper by multi cluster rewriter
if enableClusterGateway {
if _, err := multicluster.Initialize(restConfig, true); err != nil {
klog.ErrorS(err, "failed to enable multicluster")
klog.ErrorS(err, "failed to enable multi-cluster capability")
os.Exit(1)
}
}
ctrl.SetLogger(klogr.New())
leaderElectionID := util.GenerateLeaderElectionID(kubevelaName, controllerArgs.IgnoreAppWithoutControllerRequirement)
mgr, err := ctrl.NewManager(restConfig, ctrl.Options{
Scheme: scheme,
MetricsBindAddress: metricsAddr,
LeaderElection: enableLeaderElection,
LeaderElectionNamespace: leaderElectionNamespace,
LeaderElectionID: kubevelaName,
LeaderElectionID: leaderElectionID,
Port: webhookPort,
CertDir: certDir,
HealthProbeBindAddress: healthAddr,

View File

@@ -80,6 +80,27 @@ var _ = Describe("Addon Test", func() {
}, 60*time.Second).Should(Succeed())
})
It("Test Change default namespace can work", func() {
output, err := e2e.LongTimeExecWithEnv("vela addon list", 600*time.Second, []string{"DEFAULT_VELA_NS=test-vela"})
Expect(err).NotTo(HaveOccurred())
Expect(output).To(ContainSubstring("test-addon"))
Expect(output).To(ContainSubstring("disabled"))
output, err = e2e.LongTimeExecWithEnv("vela addon enable test-addon", 600*time.Second, []string{"DEFAULT_VELA_NS=test-vela"})
Expect(err).NotTo(HaveOccurred())
Expect(output).To(ContainSubstring("enabled Successfully."))
Eventually(func(g Gomega) {
g.Expect(k8sClient.Get(context.Background(), types.NamespacedName{Name: "addon-test-addon", Namespace: "test-vela"}, &v1beta1.Application{})).Should(BeNil())
}, 60*time.Second).Should(Succeed())
output, err = e2e.LongTimeExecWithEnv("vela addon disable test-addon", 600*time.Second, []string{"DEFAULT_VELA_NS=test-vela"})
Expect(err).NotTo(HaveOccurred())
Expect(output).To(ContainSubstring("Successfully disable addon"))
Eventually(func(g Gomega) {
g.Expect(apierrors.IsNotFound(k8sClient.Get(context.Background(), types.NamespacedName{Name: "addon-test-addon", Namespace: "test-vela"}, &v1beta1.Application{}))).Should(BeTrue())
}, 60*time.Second).Should(Succeed())
})
})
Context("Addon registry test", func() {

View File

@@ -21,6 +21,8 @@ import (
"fmt"
"strings"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/types"
"sigs.k8s.io/yaml"
@@ -63,12 +65,25 @@ func ApplyMockServerConfig() error {
return err
}
otherRegistry := cm.DeepCopy()
err = k8sClient.Get(ctx, types.NamespacedName{Name: cm.Name, Namespace: cm.Namespace}, &originCm)
if err != nil && apierrors.IsNotFound(err) {
err = k8sClient.Create(ctx, &cm)
if err = k8sClient.Create(ctx, &cm); err != nil {
return err
}
} else {
cm.ResourceVersion = originCm.ResourceVersion
err = k8sClient.Update(ctx, &cm)
if err = k8sClient.Update(ctx, &cm); err != nil {
return err
}
}
return err
if err := k8sClient.Create(ctx, &v1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: "test-vela"}}); err != nil {
return err
}
otherRegistry.SetNamespace("test-vela")
if err := k8sClient.Create(ctx, otherRegistry); err != nil {
return err
}
return nil
}

View File

@@ -80,6 +80,22 @@ func asyncExec(cli string) (*gexec.Session, error) {
return session, err
}
func LongTimeExecWithEnv(cli string, timeout time.Duration, env []string) (string, error) {
var output []byte
c := strings.Fields(cli)
commandName := path.Join(rudrPath, c[0])
command := exec.Command(commandName, c[1:]...)
command.Env = os.Environ()
command.Env = append(command.Env, env...)
session, err := gexec.Start(command, ginkgo.GinkgoWriter, ginkgo.GinkgoWriter)
if err != nil {
return string(output), err
}
s := session.Wait(timeout)
return string(s.Out.Contents()) + string(s.Err.Contents()), nil
}
// InteractiveExec executes a command with interactive input
func InteractiveExec(cli string, consoleFn func(*expect.Console)) (string, error) {
var output []byte

View File

@@ -130,10 +130,11 @@ var _ = Describe("Test Kubectl Plugin", func() {
Expect(output).Should(ContainSubstring(showTdResult))
})
It("Test show componentDefinition use Helm Charts as Workload", func() {
cdName := "test-webapp-chart"
output, err := e2e.Exec(fmt.Sprintf("kubectl-vela show %s -n default", cdName))
Expect(err).NotTo(HaveOccurred())
Expect(output).Should(ContainSubstring("Properties"))
Eventually(func() string {
cdName := "test-webapp-chart"
output, _ := e2e.Exec(fmt.Sprintf("kubectl-vela show %s -n default", cdName))
return output
}, 20*time.Second).Should(ContainSubstring("Properties"))
})
It("Test show componentDefinition def with raw Kube mode", func() {
cdName := "kube-worker"

4
go.mod
View File

@@ -70,6 +70,7 @@ require (
gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b
gotest.tools v2.2.0+incompatible
helm.sh/helm/v3 v3.6.1
istio.io/api v0.0.0-20210128181506-0c4b8e54850f // indirect
istio.io/client-go v0.0.0-20210128182905-ee2edd059e02
k8s.io/api v0.22.1
k8s.io/apiextensions-apiserver v0.22.1
@@ -133,7 +134,7 @@ require (
github.com/cyphar/filepath-securejoin v0.2.2 // indirect
github.com/deislabs/oras v0.11.1 // indirect
github.com/docker/cli v20.10.5+incompatible // indirect
github.com/docker/distribution v2.7.1+incompatible // indirect
github.com/docker/distribution v2.8.0-beta.1+incompatible // indirect
github.com/docker/docker v17.12.0-ce-rc1.0.20200618181300-9dc6525e6118+incompatible // indirect
github.com/docker/docker-credential-helpers v0.6.3 // indirect
github.com/docker/go-connections v0.4.0 // indirect
@@ -261,7 +262,6 @@ require (
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 // indirect
gopkg.in/warnings.v0 v0.1.2 // indirect
gopkg.in/yaml.v2 v2.4.0 // indirect
istio.io/api v0.0.0-20210128181506-0c4b8e54850f // indirect
istio.io/gogo-genproto v0.0.0-20190930162913-45029607206a // indirect
k8s.io/apiserver v0.22.1 // indirect
k8s.io/component-base v0.22.1 // indirect

3
go.sum
View File

@@ -409,8 +409,9 @@ github.com/dnaeon/go-vcr v1.0.1/go.mod h1:aBB1+wY4s93YsC3HHjMBMrwTj2R9FHDzUr9KyG
github.com/docker/cli v20.10.9+incompatible h1:OJ7YkwQA+k2Oi51lmCojpjiygKpi76P7bg91b2eJxYU=
github.com/docker/cli v20.10.9+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8=
github.com/docker/distribution v0.0.0-20191216044856-a8371794149d/go.mod h1:0+TTO4EOBfRPhZXAeF1Vu+W3hHZ8eLp8PgKVZlcvtFY=
github.com/docker/distribution v2.7.1+incompatible h1:a5mlkVzth6W5A4fOsS3D2EO5BUmsJpcB+cRlLU7cSug=
github.com/docker/distribution v2.7.1+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w=
github.com/docker/distribution v2.8.0-beta.1+incompatible h1:9MjVa+OTMHm4C0kKZB68jPlDM9Cg75ta4i46Gxxxn8o=
github.com/docker/distribution v2.8.0-beta.1+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w=
github.com/docker/docker-credential-helpers v0.6.3 h1:zI2p9+1NQYdnG6sMU26EX4aVGlqbInSQxQXLvzJ4RPQ=
github.com/docker/docker-credential-helpers v0.6.3/go.mod h1:WRaJzqw3CTB9bk10avuGsjVBZsD05qeibJ1/TYlvc0Y=
github.com/docker/go-connections v0.4.0 h1:El9xVISelRB7BuFusrZozjnkIM5YnzCViNKohAFqRJQ=

View File

@@ -18,7 +18,7 @@ FAIL=false
for file in $(git ls-files | grep "\.go$" | grep -v vendor/); do
echo -n "Header check: $file... "
if [[ -z $(cat ${file} | grep "Copyright [0-9]\{4\}.\? The KubeVela Authors") && -z $(cat ${file} | grep "Copyright [0-9]\{4\} The Crossplane Authors") ]]; then
if [[ -z $(cat ${file} | grep "Copyright [0-9]\{4\}\(-[0-9]\{4\}\)\?.\? The KubeVela Authors") && -z $(cat ${file} | grep "Copyright [0-9]\{4\} The Crossplane Authors") ]]; then
ERR=true
fi
if [ $ERR == true ]; then

View File

@@ -23,7 +23,6 @@ import (
"fmt"
"path"
"path/filepath"
"strconv"
"strings"
"sync"
"text/template"
@@ -32,8 +31,10 @@ import (
"cuelang.org/go/cue"
cueyaml "cuelang.org/go/encoding/yaml"
"github.com/google/go-github/v32/github"
"github.com/hashicorp/go-version"
"github.com/pkg/errors"
"golang.org/x/oauth2"
appsv1 "k8s.io/api/apps/v1"
v1 "k8s.io/api/core/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
@@ -41,6 +42,7 @@ import (
"k8s.io/apimachinery/pkg/runtime"
k8syaml "k8s.io/apimachinery/pkg/runtime/serializer/yaml"
types2 "k8s.io/apimachinery/pkg/types"
"k8s.io/client-go/discovery"
"k8s.io/client-go/rest"
"k8s.io/client-go/util/retry"
"k8s.io/klog/v2"
@@ -60,6 +62,7 @@ import (
"github.com/oam-dev/kubevela/pkg/utils"
"github.com/oam-dev/kubevela/pkg/utils/apply"
"github.com/oam-dev/kubevela/pkg/utils/common"
version2 "github.com/oam-dev/kubevela/version"
)
const (
@@ -80,10 +83,13 @@ const (
// DefSchemaName is the addon definition schemas dir name
DefSchemaName string = "schemas"
// AddonParameterDataKey is the key of parameter in addon args secrets
AddonParameterDataKey string = "addonParameterDataKey"
)
// ParameterFileName is the addon resources/parameter.cue file name
var ParameterFileName = filepath.Join("resources", "parameter.cue")
var ParameterFileName = strings.Join([]string{"resources", "parameter.cue"}, "/")
// ListOptions contains flags mark what files should be read in an addon directory
type ListOptions struct {
@@ -176,7 +182,7 @@ var Patterns = []Pattern{{Value: ReadmeFileName}, {Value: MetadataFileName}, {Va
func GetPatternFromItem(it Item, r AsyncReader, rootPath string) string {
relativePath := r.RelativePath(it)
for _, p := range Patterns {
if strings.HasPrefix(relativePath, filepath.Join(rootPath, p.Value)) {
if strings.HasPrefix(relativePath, strings.Join([]string{rootPath, p.Value}, "/")) {
return p.Value
}
}
@@ -466,6 +472,10 @@ func RenderApp(ctx context.Context, addon *InstallPackage, config *rest.Config,
}
}
app.Labels = util.MergeMapOverrideWithDst(app.Labels, map[string]string{oam.LabelAddonName: addon.Name})
// force override the namespace defined vela with DefaultVelaNS,this value can be modified by Env
app.SetNamespace(types.DefaultKubeVelaNS)
for _, namespace := range addon.NeedNamespace {
// vela-system must exist before rendering vela addon
if namespace == types.DefaultKubeVelaNS {
@@ -479,13 +489,14 @@ func RenderApp(ctx context.Context, addon *InstallPackage, config *rest.Config,
app.Spec.Components = append(app.Spec.Components, comp)
}
for _, tmpl := range addon.YAMLTemplates {
comp, err := renderRawComponent(tmpl)
if len(addon.YAMLTemplates) != 0 {
comp, err := renderK8sObjectsComponent(addon.YAMLTemplates, addon.Name)
if err != nil {
return nil, err
}
app.Spec.Components = append(app.Spec.Components, *comp)
}
for _, tmpl := range addon.CUETemplates {
comp, err := renderCUETemplate(tmpl, addon.Parameters, args)
if err != nil {
@@ -546,19 +557,15 @@ func RenderApp(ctx context.Context, addon *InstallPackage, config *rest.Config,
app.Spec.Workflow.Steps = append(app.Spec.Workflow.Steps, workflowSteps...)
default:
for _, def := range addon.Definitions {
comp, err := renderRawComponent(def)
if err != nil {
return nil, err
}
app.Spec.Components = append(app.Spec.Components, *comp)
}
for _, cueDef := range addon.CUEDefinitions {
def := definition.Definition{Unstructured: unstructured.Unstructured{}}
err := def.FromCUEString(cueDef.Data, config)
if err != nil {
return nil, errors.Wrapf(err, "fail to render definition: %s in cue's format", cueDef.Name)
}
if def.Unstructured.GetNamespace() == "" {
def.Unstructured.SetNamespace(types.DefaultKubeVelaNS)
}
app.Spec.Components = append(app.Spec.Components, common2.ApplicationComponent{
Name: cueDef.Name,
Type: "raw",
@@ -589,25 +596,28 @@ func RenderApp(ctx context.Context, addon *InstallPackage, config *rest.Config,
func RenderDefinitions(addon *InstallPackage, config *rest.Config) ([]*unstructured.Unstructured, error) {
defObjs := make([]*unstructured.Unstructured, 0)
if isDeployToRuntimeOnly(addon) {
// Runtime cluster mode needs to deploy definitions to control plane k8s.
for _, def := range addon.Definitions {
obj, err := renderObject(def)
if err != nil {
return nil, err
}
defObjs = append(defObjs, obj)
}
for _, cueDef := range addon.CUEDefinitions {
def := definition.Definition{Unstructured: unstructured.Unstructured{}}
err := def.FromCUEString(cueDef.Data, config)
if err != nil {
return nil, errors.Wrapf(err, "fail to render definition: %s in cue's format", cueDef.Name)
}
defObjs = append(defObjs, &def.Unstructured)
// No matter runtime mode or control mode , definition only needs to control plane k8s.
for _, def := range addon.Definitions {
obj, err := renderObject(def)
if err != nil {
return nil, err
}
// we should ignore the namespace defined in definition yaml, override the filed by DefaultKubeVelaNS
obj.SetNamespace(types.DefaultKubeVelaNS)
defObjs = append(defObjs, obj)
}
for _, cueDef := range addon.CUEDefinitions {
def := definition.Definition{Unstructured: unstructured.Unstructured{}}
err := def.FromCUEString(cueDef.Data, config)
if err != nil {
return nil, errors.Wrapf(err, "fail to render definition: %s in cue's format", cueDef.Name)
}
// we should ignore the namespace defined in definition yaml, override the filed by DefaultKubeVelaNS
def.SetNamespace(types.DefaultKubeVelaNS)
defObjs = append(defObjs, &def.Unstructured)
}
return defObjs, nil
}
@@ -735,17 +745,25 @@ func renderNamespace(namespace string) *unstructured.Unstructured {
return u
}
// renderRawComponent will return a component in raw type from string
func renderRawComponent(elem ElementFile) (*common2.ApplicationComponent, error) {
baseRawComponent := common2.ApplicationComponent{
Type: "raw",
Name: strings.ReplaceAll(elem.Name, ".", "-"),
func renderK8sObjectsComponent(elems []ElementFile, addonName string) (*common2.ApplicationComponent, error) {
var objects []*unstructured.Unstructured
for _, elem := range elems {
obj, err := renderObject(elem)
if err != nil {
return nil, err
}
objects = append(objects, obj)
}
obj, err := renderObject(elem)
properties := map[string]interface{}{"objects": objects}
propJSON, err := json.Marshal(properties)
if err != nil {
return nil, err
}
baseRawComponent.Properties = util.Object2RawExtension(obj)
baseRawComponent := common2.ApplicationComponent{
Type: "k8s-objects",
Name: addonName + "-resources",
Properties: &runtime.RawExtension{Raw: propJSON},
}
return &baseRawComponent, nil
}
@@ -812,14 +830,9 @@ func Convert2AppName(name string) string {
// RenderArgsSecret render addon enable argument to secret
func RenderArgsSecret(addon *InstallPackage, args map[string]interface{}) *unstructured.Unstructured {
data := make(map[string]string)
for k, v := range args {
switch v := v.(type) {
case bool:
data[k] = strconv.FormatBool(v)
default:
data[k] = fmt.Sprintf("%v", v)
}
argsByte, err := json.Marshal(args)
if err != nil {
return nil
}
sec := v1.Secret{
TypeMeta: metav1.TypeMeta{APIVersion: "v1", Kind: "Secret"},
@@ -827,8 +840,10 @@ func RenderArgsSecret(addon *InstallPackage, args map[string]interface{}) *unstr
Name: Convert2SecName(addon.Name),
Namespace: types.DefaultKubeVelaNS,
},
StringData: data,
Type: v1.SecretTypeOpaque,
Data: map[string][]byte{
AddonParameterDataKey: argsByte,
},
Type: v1.SecretTypeOpaque,
}
u, err := util.Object2Unstructured(sec)
if err != nil {
@@ -837,6 +852,25 @@ func RenderArgsSecret(addon *InstallPackage, args map[string]interface{}) *unstr
return u
}
// FetchArgsFromSecret fetch addon args from secrets
func FetchArgsFromSecret(sec *v1.Secret) (map[string]interface{}, error) {
res := map[string]interface{}{}
if args, ok := sec.Data[AddonParameterDataKey]; ok {
err := json.Unmarshal(args, &res)
if err != nil {
return nil, err
}
return res, nil
}
// this is backward compatibility code for old way to storage parameter
res = make(map[string]interface{}, len(sec.Data))
for k, v := range sec.Data {
res[k] = string(v)
}
return res, nil
}
// Convert2SecName generate addon argument secret name
func Convert2SecName(name string) string {
return addonSecPrefix + name
@@ -853,10 +887,11 @@ type Installer struct {
registryMeta map[string]SourceMeta
args map[string]interface{}
cache *Cache
dc *discovery.DiscoveryClient
}
// NewAddonInstaller will create an installer for addon
func NewAddonInstaller(ctx context.Context, cli client.Client, apply apply.Applicator, config *rest.Config, r *Registry, args map[string]interface{}, cache *Cache) Installer {
func NewAddonInstaller(ctx context.Context, cli client.Client, discoveryClient *discovery.DiscoveryClient, apply apply.Applicator, config *rest.Config, r *Registry, args map[string]interface{}, cache *Cache) Installer {
return Installer{
ctx: ctx,
config: config,
@@ -865,12 +900,18 @@ func NewAddonInstaller(ctx context.Context, cli client.Client, apply apply.Appli
r: r,
args: args,
cache: cache,
dc: discoveryClient,
}
}
func (h *Installer) enableAddon(addon *InstallPackage) error {
var err error
h.addon = addon
err = checkAddonVersionMeetRequired(h.ctx, addon.SystemRequirements, h.cli, h.dc)
if err != nil {
return ErrVersionMismatch
}
if err = h.installDependency(addon); err != nil {
return err
}
@@ -945,6 +986,26 @@ func (h *Installer) installDependency(addon *InstallPackage) error {
return nil
}
// checkDependency checks if addon's dependency
func (h *Installer) checkDependency(addon *InstallPackage) ([]string, error) {
var app v1beta1.Application
var needEnable []string
for _, dep := range addon.Dependencies {
err := h.cli.Get(h.ctx, client.ObjectKey{
Namespace: types.DefaultKubeVelaNS,
Name: Convert2AppName(dep.Name),
}, &app)
if err == nil {
continue
}
if !apierrors.IsNotFound(err) {
return nil, err
}
needEnable = append(needEnable, dep.Name)
}
return needEnable, nil
}
func (h *Installer) dispatchAddonResource(addon *InstallPackage) error {
app, err := RenderApp(h.ctx, addon, h.config, h.cli, h.args)
if err != nil {
@@ -959,7 +1020,7 @@ func (h *Installer) dispatchAddonResource(addon *InstallPackage) error {
app.SetLabels(util.MergeMapOverrideWithDst(app.GetLabels(), map[string]string{oam.LabelAddonRegistry: h.r.Name}))
defs, err := RenderDefinitions(h.addon, h.config)
defs, err := RenderDefinitions(addon, h.config)
if err != nil {
return errors.Wrap(err, "render addon definitions fail")
}
@@ -969,7 +1030,7 @@ func (h *Installer) dispatchAddonResource(addon *InstallPackage) error {
return errors.Wrap(err, "render addon definitions' schema fail")
}
err = h.apply.Apply(h.ctx, app)
err = h.apply.Apply(h.ctx, app, apply.DisableUpdateAnnotation())
if err != nil {
klog.Errorf("fail to create application: %v", err)
return errors.Wrap(err, "fail to create application")
@@ -977,7 +1038,7 @@ func (h *Installer) dispatchAddonResource(addon *InstallPackage) error {
for _, def := range defs {
addOwner(def, app)
err = h.apply.Apply(h.ctx, def)
err = h.apply.Apply(h.ctx, def, apply.DisableUpdateAnnotation())
if err != nil {
return err
}
@@ -985,7 +1046,7 @@ func (h *Installer) dispatchAddonResource(addon *InstallPackage) error {
for _, schema := range schemas {
addOwner(schema, app)
err = h.apply.Apply(h.ctx, schema)
err = h.apply.Apply(h.ctx, schema, apply.DisableUpdateAnnotation())
if err != nil {
return err
}
@@ -994,7 +1055,7 @@ func (h *Installer) dispatchAddonResource(addon *InstallPackage) error {
if h.args != nil && len(h.args) > 0 {
sec := RenderArgsSecret(addon, h.args)
addOwner(sec, app)
err = h.apply.Apply(h.ctx, sec)
err = h.apply.Apply(h.ctx, sec, apply.DisableUpdateAnnotation())
if err != nil {
return err
}
@@ -1072,3 +1133,99 @@ func FetchAddonRelatedApp(ctx context.Context, cli client.Client, addonName stri
}
return app, nil
}
// checkAddonVersionMeetRequired will check the version of cli/ux and kubevela-core-controller whether meet the addon requirement, if not will return an error
// please notice that this func is for check production environment which vela cli/ux or vela core is officalVersion
// if version is for test or debug eg: latest/commit-id/branch-name this func will return nil error
func checkAddonVersionMeetRequired(ctx context.Context, require *SystemRequirements, k8sClient client.Client, dc *discovery.DiscoveryClient) error {
if require == nil {
return nil
}
// if not semver version, bypass check cli/ux. eg: {branch name/git commit id/UNKNOWN}
if version2.IsOfficialKubeVelaVersion(version2.VelaVersion) {
res, err := checkSemVer(version2.VelaVersion, require.VelaVersion)
if err != nil {
return err
}
if !res {
return fmt.Errorf("vela cli/ux version: %s cannot meet requirement", version2.VelaVersion)
}
}
// check vela core controller version
imageVersion, err := fetchVelaCoreImageTag(ctx, k8sClient)
if err != nil {
return err
}
// if not semver version, bypass check vela-core.
if version2.IsOfficialKubeVelaVersion(imageVersion) {
res, err := checkSemVer(imageVersion, require.VelaVersion)
if err != nil {
return err
}
if !res {
return fmt.Errorf("the vela core controller: %s cannot meet requirement ", imageVersion)
}
}
// discovery client is nil so bypass check kubernetes version
if dc == nil {
return nil
}
k8sVersion, err := dc.ServerVersion()
if err != nil {
return err
}
// if not semver version, bypass check kubernetes version.
if version2.IsOfficialKubeVelaVersion(k8sVersion.GitVersion) {
res, err := checkSemVer(k8sVersion.GitVersion, require.KubernetesVersion)
if err != nil {
return err
}
if !res {
return fmt.Errorf("the kubernetes version %s cannot meet requirement", k8sVersion.GitVersion)
}
}
return nil
}
func checkSemVer(actual string, require string) (bool, error) {
if len(require) == 0 {
return true, nil
}
smeVer := strings.TrimPrefix(actual, "v")
l := strings.ReplaceAll(require, "v", " ")
constraint, err := version.NewConstraint(l)
if err != nil {
return false, err
}
v, err := version.NewVersion(smeVer)
if err != nil {
return false, err
}
return constraint.Check(v), nil
}
func fetchVelaCoreImageTag(ctx context.Context, k8sClient client.Client) (string, error) {
deploy := &appsv1.Deployment{}
if err := k8sClient.Get(ctx, types2.NamespacedName{Namespace: types.DefaultKubeVelaNS, Name: types.KubeVelaControllerDeployment}, deploy); err != nil {
return "", err
}
var tag string
for _, c := range deploy.Spec.Template.Spec.Containers {
if c.Name == types.DefaultKubeVelaReleaseName {
l := strings.Split(c.Image, ":")
if len(l) == 1 {
// if tag is empty mean use latest image
return "latest", nil
}
tag = l[1]
}
}
return tag, nil
}

View File

@@ -21,6 +21,10 @@ import (
"fmt"
"time"
appsv1 "k8s.io/api/apps/v1"
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
types2 "k8s.io/apimachinery/pkg/types"
"sigs.k8s.io/controller-runtime/pkg/client"
@@ -30,6 +34,8 @@ import (
"github.com/oam-dev/kubevela/apis/core.oam.dev/common"
"github.com/oam-dev/kubevela/apis/core.oam.dev/v1beta1"
"github.com/oam-dev/kubevela/apis/types"
"github.com/oam-dev/kubevela/pkg/oam/util"
)
var _ = Describe("Addon test", func() {
@@ -176,6 +182,94 @@ var _ = Describe("Addon test", func() {
})
})
var _ = Describe("Addon func test", func() {
var deploy appsv1.Deployment
AfterEach(func() {
Expect(k8sClient.Delete(ctx, &deploy))
})
It("fetchVelaCoreImageTag func test", func() {
deploy = appsv1.Deployment{}
tag, err := fetchVelaCoreImageTag(ctx, k8sClient)
Expect(err).Should(util.NotFoundMatcher{})
Expect(tag).Should(BeEquivalentTo(""))
Expect(yaml.Unmarshal([]byte(deployYaml), &deploy)).Should(BeNil())
deploy.SetNamespace(types.DefaultKubeVelaNS)
Expect(k8sClient.Create(ctx, &deploy)).Should(BeNil())
Eventually(func() error {
tag, err := fetchVelaCoreImageTag(ctx, k8sClient)
if err != nil {
return err
}
if tag != "v1.2.3" {
return fmt.Errorf("tag missmatch want %s actual %s", "v1.2.3", tag)
}
return err
}, 30*time.Second, 300*time.Millisecond).Should(BeNil())
})
It("checkAddonVersionMeetRequired func test", func() {
deploy = appsv1.Deployment{}
Expect(checkAddonVersionMeetRequired(ctx, &SystemRequirements{VelaVersion: ">=v1.2.1"}, k8sClient, dc)).Should(util.NotFoundMatcher{})
Expect(yaml.Unmarshal([]byte(deployYaml), &deploy)).Should(BeNil())
deploy.SetNamespace(types.DefaultKubeVelaNS)
Expect(k8sClient.Create(ctx, &deploy)).Should(BeNil())
Expect(checkAddonVersionMeetRequired(ctx, &SystemRequirements{VelaVersion: ">=v1.2.1"}, k8sClient, dc)).Should(BeNil())
Expect(checkAddonVersionMeetRequired(ctx, &SystemRequirements{VelaVersion: ">=v1.2.4"}, k8sClient, dc)).ShouldNot(BeNil())
})
})
var _ = Describe("Test addon util func", func() {
It("test render and fetch args", func() {
i := InstallPackage{Meta: Meta{Name: "test-addon"}}
args := map[string]interface{}{
"imagePullSecrets": []string{
"myreg", "myreg1",
},
}
u := RenderArgsSecret(&i, args)
secName := u.GetName()
secNs := u.GetNamespace()
Expect(k8sClient.Create(ctx, u)).Should(BeNil())
sec := v1.Secret{}
Expect(k8sClient.Get(ctx, types2.NamespacedName{Namespace: secNs, Name: secName}, &sec)).Should(BeNil())
res, err := FetchArgsFromSecret(&sec)
Expect(err).Should(BeNil())
Expect(res).Should(BeEquivalentTo(map[string]interface{}{"imagePullSecrets": []interface{}{"myreg", "myreg1"}}))
})
It("test render and fetch args backward compatibility", func() {
secArgs := v1.Secret{
TypeMeta: metav1.TypeMeta{APIVersion: "v1", Kind: "Secret"},
ObjectMeta: metav1.ObjectMeta{
Name: Convert2SecName("test-addon-old-args"),
Namespace: types.DefaultKubeVelaNS,
},
StringData: map[string]string{
"repo": "www.test.com",
"tag": "v1.3.1",
},
Type: v1.SecretTypeOpaque,
}
secName := secArgs.GetName()
secNs := secArgs.GetNamespace()
Expect(k8sClient.Create(ctx, &secArgs)).Should(BeNil())
sec := v1.Secret{}
Expect(k8sClient.Get(ctx, types2.NamespacedName{Namespace: secNs, Name: secName}, &sec)).Should(BeNil())
res, err := FetchArgsFromSecret(&sec)
Expect(err).Should(BeNil())
Expect(res).Should(BeEquivalentTo(map[string]interface{}{"repo": "www.test.com", "tag": "v1.3.1"}))
})
})
const (
appYaml = `apiVersion: core.oam.dev/v1beta1
kind: Application
@@ -201,4 +295,56 @@ spec:
image: crccheck/hello-world
port: 8000
`
deployYaml = `apiVersion: apps/v1
kind: Deployment
metadata:
name: kubevela-vela-core
namespace: vela-system
spec:
progressDeadlineSeconds: 600
replicas: 1
revisionHistoryLimit: 10
selector:
matchLabels:
app.kubernetes.io/instance: kubevela
app.kubernetes.io/name: vela-core
strategy:
rollingUpdate:
maxSurge: 25%
maxUnavailable: 25%
type: RollingUpdate
template:
metadata:
annotations:
prometheus.io/path: /metrics
prometheus.io/port: "8080"
prometheus.io/scrape: "true"
labels:
app.kubernetes.io/instance: kubevela
app.kubernetes.io/name: vela-core
spec:
containers:
- args:
image: oamdev/vela-core:v1.2.3
imagePullPolicy: Always
name: kubevela
ports:
- containerPort: 9443
name: webhook-server
protocol: TCP
- containerPort: 9440
name: healthz
protocol: TCP
resources:
limits:
cpu: 500m
memory: 1Gi
requests:
cpu: 50m
memory: 20Mi
dnsPolicy: ClusterFirst
restartPolicy: Always
schedulerName: default-scheduler
securityContext: {}
terminationGracePeriodSeconds: 30`
)

View File

@@ -20,6 +20,7 @@ import (
"context"
"encoding/json"
"encoding/xml"
"fmt"
"net/http"
"net/http/httptest"
"os"
@@ -27,6 +28,8 @@ import (
"strings"
"testing"
version2 "github.com/oam-dev/kubevela/version"
"github.com/crossplane/crossplane-runtime/pkg/test"
"github.com/google/go-github/v32/github"
v1alpha12 "github.com/oam-dev/cluster-gateway/pkg/apis/cluster/v1alpha1"
@@ -258,6 +261,39 @@ func TestRenderDeploy2RuntimeAddon(t *testing.T) {
assert.Equal(t, steps[len(steps)-1].Type, "deploy2runtime")
}
func TestRenderDefinitions(t *testing.T) {
addonDeployToRuntime := baseAddon
addonDeployToRuntime.Meta.DeployTo = &DeployTo{
DisableControlPlane: false,
RuntimeCluster: false,
}
defs, err := RenderDefinitions(&addonDeployToRuntime, nil)
assert.NoError(t, err)
assert.Equal(t, len(defs), 1)
def := defs[0]
assert.Equal(t, def.GetAPIVersion(), "core.oam.dev/v1beta1")
assert.Equal(t, def.GetKind(), "TraitDefinition")
app, err := RenderApp(ctx, &addonDeployToRuntime, nil, nil, map[string]interface{}{})
assert.NoError(t, err)
// addon which app work on no-runtime-cluster mode workflow is nil
assert.Nil(t, app.Spec.Workflow)
}
func TestRenderK8sObjects(t *testing.T) {
addonMultiYaml := multiYamlAddon
addonMultiYaml.Meta.DeployTo = &DeployTo{
DisableControlPlane: false,
RuntimeCluster: false,
}
app, err := RenderApp(ctx, &addonMultiYaml, nil, nil, map[string]interface{}{})
assert.NoError(t, err)
assert.Equal(t, len(app.Spec.Components), 1)
comp := app.Spec.Components[0]
assert.Equal(t, comp.Type, "k8s-objects")
}
func TestGetAddonStatus(t *testing.T) {
getFunc := test.MockGetFn(func(ctx context.Context, key client.ObjectKey, obj client.Object) error {
switch key.Name {
@@ -402,6 +438,22 @@ var baseAddon = InstallPackage{
},
}
var multiYamlAddon = InstallPackage{
Meta: Meta{
Name: "test-render-multi-yaml-addon",
},
YAMLTemplates: []ElementFile{
{
Data: testYamlObject1,
Name: "test-object-1",
},
{
Data: testYamlObject2,
Name: "test-object-2",
},
},
}
var testCueDef = `annotations: {
type: "trait"
annotations: {}
@@ -433,6 +485,53 @@ template: {
}
`
var testYamlObject1 = `
apiVersion: apps/v1
kind: Deployment
metadata:
name: nginx-deployment
labels:
app: nginx
spec:
replicas: 3
selector:
matchLabels:
app: nginx
template:
metadata:
labels:
app: nginx
spec:
containers:
- name: nginx
image: nginx:1.14.2
ports:
- containerPort: 80
`
var testYamlObject2 = `
apiVersion: apps/v1
kind: Deployment
metadata:
name: nginx-deployment-2
labels:
app: nginx
spec:
replicas: 3
selector:
matchLabels:
app: nginx
template:
metadata:
labels:
app: nginx
spec:
containers:
- name: nginx
image: nginx:1.14.2
ports:
- containerPort: 80
`
func TestRenderApp4Observability(t *testing.T) {
k8sClient := fake.NewClientBuilder().Build()
testcases := []struct {
@@ -555,3 +654,96 @@ func TestGitLabReaderNotPanic(t *testing.T) {
_, err := NewAsyncReader("https://gitlab.com/test/catalog", "", "addons", "", gitType)
assert.EqualError(t, err, "git type repository only support github for now")
}
func TestCheckSemVer(t *testing.T) {
testCases := []struct {
actual string
require string
nilError bool
res bool
}{
{
actual: "v1.2.1",
require: "<=v1.2.1",
res: true,
},
{
actual: "v1.2.1",
require: ">v1.2.1",
res: false,
},
{
actual: "v1.2.1",
require: "<=v1.2.3",
res: true,
},
{
actual: "v1.2",
require: "<=v1.2.3",
res: true,
},
{
actual: "v1.2.1",
require: ">v1.2.3",
res: false,
},
{
actual: "v1.2.1",
require: "=v1.2.1",
res: true,
},
{
actual: "1.2.1",
require: "=v1.2.1",
res: true,
},
{
actual: "1.2.1",
require: "",
res: true,
},
{
actual: "v1.2.2",
require: "<=v1.2.3, >=v1.2.1",
res: true,
},
{
actual: "v1.2.0",
require: "v1.2.0, <=v1.2.3",
res: true,
},
{
actual: "1.2.2",
require: "v1.2.2",
res: true,
},
{
actual: "1.2.02",
require: "v1.2.2",
res: true,
},
}
for _, testCase := range testCases {
result, err := checkSemVer(testCase.actual, testCase.require)
assert.NoError(t, err)
assert.Equal(t, result, testCase.res)
}
}
func TestCheckAddonVersionMeetRequired(t *testing.T) {
k8sClient := &test.MockClient{
MockGet: test.NewMockGetFn(nil, func(obj client.Object) error {
return nil
}),
}
ctx := context.Background()
assert.NoError(t, checkAddonVersionMeetRequired(ctx, &SystemRequirements{VelaVersion: ">=1.2.4"}, k8sClient, nil))
version2.VelaVersion = "v1.2.3"
if err := checkAddonVersionMeetRequired(ctx, &SystemRequirements{VelaVersion: ">=1.2.4"}, k8sClient, nil); err == nil {
assert.Error(t, fmt.Errorf("should meet error"))
}
version2.VelaVersion = "v1.2.4"
assert.NoError(t, checkAddonVersionMeetRequired(ctx, &SystemRequirements{VelaVersion: ">=1.2.4"}, k8sClient, nil))
}

View File

@@ -35,6 +35,9 @@ var (
// ErrNotExist means addon not exists
ErrNotExist = NewAddonError("addon not exist")
// ErrVersionMismatch means addon version requirement mismatch
ErrVersionMismatch = NewAddonError("addon version requirements mismatch")
)
// WrapErrRateLimit return ErrRateLimit if is the situation, or return error directly

View File

@@ -21,6 +21,8 @@ import (
"encoding/json"
"fmt"
"k8s.io/client-go/discovery"
"k8s.io/klog/v2"
v1 "k8s.io/api/core/v1"
@@ -49,8 +51,8 @@ const (
)
// EnableAddon will enable addon with dependency check, source is where addon from.
func EnableAddon(ctx context.Context, name string, cli client.Client, apply apply.Applicator, config *rest.Config, r Registry, args map[string]interface{}, cache *Cache) error {
h := NewAddonInstaller(ctx, cli, apply, config, &r, args, cache)
func EnableAddon(ctx context.Context, name string, cli client.Client, discoveryClient *discovery.DiscoveryClient, apply apply.Applicator, config *rest.Config, r Registry, args map[string]interface{}, cache *Cache) error {
h := NewAddonInstaller(ctx, cli, discoveryClient, apply, config, &r, args, cache)
pkg, err := h.loadInstallPackage(name)
if err != nil {
return err
@@ -76,7 +78,7 @@ func DisableAddon(ctx context.Context, cli client.Client, name string) error {
}
// EnableAddonByLocalDir enable an addon from local dir
func EnableAddonByLocalDir(ctx context.Context, name string, dir string, cli client.Client, applicator apply.Applicator, config *rest.Config, args map[string]interface{}) error {
func EnableAddonByLocalDir(ctx context.Context, name string, dir string, cli client.Client, dc *discovery.DiscoveryClient, applicator apply.Applicator, config *rest.Config, args map[string]interface{}) error {
r := localReader{dir: dir, name: name}
metas, err := r.ListAddonMeta()
if err != nil {
@@ -91,7 +93,15 @@ func EnableAddonByLocalDir(ctx context.Context, name string, dir string, cli cli
if err != nil {
return err
}
h := NewAddonInstaller(ctx, cli, applicator, config, &Registry{Name: LocalAddonRegistryName}, args, nil)
h := NewAddonInstaller(ctx, cli, dc, applicator, config, &Registry{Name: LocalAddonRegistryName}, args, nil)
needEnableAddonNames, err := h.checkDependency(pkg)
if err != nil {
return err
}
if len(needEnableAddonNames) > 0 {
return fmt.Errorf("you must first enable dependencies: %v", needEnableAddonNames)
}
err = h.enableAddon(pkg)
if err != nil {
return err

View File

@@ -169,7 +169,6 @@ func (r *Registry) BuildReader() (AsyncReader, error) {
return NewAsyncReader(g.URL, "", g.Path, g.Token, gitType)
}
return nil, errors.New("registry don't have enough info to build a reader")
}
// GetUIData get UIData of an addon

View File

@@ -21,6 +21,8 @@ import (
"testing"
"time"
"k8s.io/client-go/discovery"
v12 "k8s.io/api/core/v1"
crdv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
@@ -49,6 +51,7 @@ var testEnv *envtest.Environment
var dm discoverymapper.DiscoveryMapper
var pd *packages.PackageDiscover
var testns string
var dc *discovery.DiscoveryClient
func TestAddon(t *testing.T) {
RegisterFailHandler(Fail)
@@ -79,6 +82,11 @@ var _ = BeforeSuite(func(done Done) {
k8sClient, err = client.New(cfg, client.Options{Scheme: scheme})
Expect(err).ToNot(HaveOccurred())
Expect(k8sClient).ToNot(BeNil())
dc, err = discovery.NewDiscoveryClientForConfig(cfg)
Expect(err).ToNot(HaveOccurred())
Expect(dc).ShouldNot(BeNil())
dm, err = discoverymapper.New(cfg)
Expect(err).ToNot(HaveOccurred())
Expect(dm).ToNot(BeNil())

View File

@@ -54,16 +54,17 @@ type InstallPackage struct {
// Meta defines the format for a single addon
type Meta struct {
Name string `json:"name" validate:"required"`
Version string `json:"version"`
Description string `json:"description"`
Icon string `json:"icon"`
URL string `json:"url,omitempty"`
Tags []string `json:"tags,omitempty"`
DeployTo *DeployTo `json:"deployTo,omitempty"`
Dependencies []*Dependency `json:"dependencies,omitempty"`
NeedNamespace []string `json:"needNamespace,omitempty"`
Invisible bool `json:"invisible"`
Name string `json:"name" validate:"required"`
Version string `json:"version"`
Description string `json:"description"`
Icon string `json:"icon"`
URL string `json:"url,omitempty"`
Tags []string `json:"tags,omitempty"`
DeployTo *DeployTo `json:"deployTo,omitempty"`
Dependencies []*Dependency `json:"dependencies,omitempty"`
NeedNamespace []string `json:"needNamespace,omitempty"`
Invisible bool `json:"invisible"`
SystemRequirements *SystemRequirements `json:"system,omitempty"`
}
// DeployTo defines where the addon to deploy to
@@ -84,3 +85,9 @@ type ElementFile struct {
Data string
Name string
}
// SystemRequirements is this addon need version
type SystemRequirements struct {
VelaVersion string `json:"vela,omitempty"`
KubernetesVersion string `json:"kubernetes,omitempty"`
}

View File

@@ -17,6 +17,7 @@ limitations under the License.
package clients
import (
"k8s.io/client-go/discovery"
"k8s.io/client-go/rest"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/client/config"
@@ -84,3 +85,16 @@ func GetPackageDiscover() (*packages.PackageDiscover, error) {
}
return pd, nil
}
// GetDiscoveryClient return a discovery client
func GetDiscoveryClient() (*discovery.DiscoveryClient, error) {
conf, err := GetKubeConfig()
if err != nil {
return nil, err
}
dc, err := discovery.NewDiscoveryClientForConfig(conf)
if err != nil {
return nil, err
}
return dc, nil
}

View File

@@ -127,8 +127,8 @@ type AddonBaseStatus struct {
type DetailAddonResponse struct {
addon.Meta
APISchema *openapi3.Schema `json:"schema"`
UISchema []*utils.UIParameter `json:"uiSchema"`
APISchema *openapi3.Schema `json:"schema"`
UISchema utils.UISchema `json:"uiSchema"`
// More details about the addon, e.g. README
Detail string `json:"detail,omitempty"`
@@ -147,9 +147,9 @@ type AddonDefinition struct {
// AddonStatusResponse defines the format of addon status response
type AddonStatusResponse struct {
AddonBaseStatus
Args map[string]string `json:"args"`
EnablingProgress *EnablingProgress `json:"enabling_progress,omitempty"`
AppStatus common.AppStatus `json:"appStatus,omitempty"`
Args map[string]interface{} `json:"args"`
EnablingProgress *EnablingProgress `json:"enabling_progress,omitempty"`
AppStatus common.AppStatus `json:"appStatus,omitempty"`
// the status of multiple clusters
Clusters map[string]map[string]interface{} `json:"clusters,omitempty"`
}
@@ -315,7 +315,7 @@ type ApplicationStatusResponse struct {
type ApplicationStatisticsResponse struct {
EnvCount int64 `json:"envCount"`
TargetCount int64 `json:"targetCount"`
RevisonCount int64 `json:"revisonCount"`
RevisionCount int64 `json:"revisionCount"`
WorkflowCount int64 `json:"workflowCount"`
}

View File

@@ -26,6 +26,8 @@ import (
"sync"
"time"
"k8s.io/client-go/discovery"
k8stypes "k8s.io/apimachinery/pkg/types"
v1 "k8s.io/api/core/v1"
@@ -101,6 +103,10 @@ func NewAddonUsecase(cacheTime time.Duration) AddonHandler {
if err != nil {
panic(err)
}
dc, err := clients.GetDiscoveryClient()
if err != nil {
panic(err)
}
ds := pkgaddon.NewRegistryDataStore(kubecli)
cache := pkgaddon.NewCache(ds)
@@ -114,6 +120,7 @@ func NewAddonUsecase(cacheTime time.Duration) AddonHandler {
config: config,
apply: apply.NewAPIApplicator(kubecli),
mutex: new(sync.RWMutex),
discoveryClient: dc,
}
}
@@ -123,6 +130,7 @@ type defaultAddonHandler struct {
kubeClient client.Client
config *rest.Config
apply apply.Applicator
discoveryClient *discovery.DiscoveryClient
mutex *sync.RWMutex
}
@@ -204,10 +212,12 @@ func (u *defaultAddonHandler) StatusAddon(ctx context.Context, name string) (*ap
if err != nil && !errors2.IsNotFound(err) {
return nil, bcode.ErrAddonSecretGet
} else if errors2.IsNotFound(err) {
res.Args = make(map[string]string, len(sec.Data))
for k, v := range sec.Data {
res.Args[k] = string(v)
}
return &res, nil
}
res.Args, err = pkgaddon.FetchArgsFromSecret(&sec)
if err != nil {
return nil, err
}
return &res, nil
@@ -351,15 +361,23 @@ func (u *defaultAddonHandler) EnableAddon(ctx context.Context, name string, args
return err
}
for _, r := range registries {
err = pkgaddon.EnableAddon(ctx, name, u.kubeClient, u.apply, u.config, r, args.Args, u.addonRegistryCache)
err = pkgaddon.EnableAddon(ctx, name, u.kubeClient, u.discoveryClient, u.apply, u.config, r, args.Args, u.addonRegistryCache)
if err == nil {
return nil
}
if err != nil && errors.As(err, &pkgaddon.ErrNotExist) {
// if reach this line error must is not nil
if errors.Is(err, pkgaddon.ErrNotExist) {
// one registry return addon not exist error, should not break other registry func
continue
}
// wrap this error with special bcode
if errors.Is(err, pkgaddon.ErrVersionMismatch) {
return bcode.ErrAddonSystemVersionMismatch
}
// except `addon not found`, other errors should return directly
return err
}
return bcode.ErrAddonNotExist
}
@@ -411,13 +429,21 @@ func (u *defaultAddonHandler) UpdateAddon(ctx context.Context, name string, args
}
for _, r := range registries {
err = pkgaddon.EnableAddon(ctx, name, u.kubeClient, u.apply, u.config, r, args.Args, u.addonRegistryCache)
err = pkgaddon.EnableAddon(ctx, name, u.kubeClient, u.discoveryClient, u.apply, u.config, r, args.Args, u.addonRegistryCache)
if err == nil {
return nil
}
if err != nil && !errors.Is(err, pkgaddon.ErrNotExist) {
return bcode.WrapGithubRateLimitErr(err)
if errors.Is(err, pkgaddon.ErrNotExist) {
continue
}
// wrap this error with special bcode
if errors.Is(err, pkgaddon.ErrVersionMismatch) {
return bcode.ErrAddonSystemVersionMismatch
}
// except `addon not found`, other errors should return directly
return err
}
return bcode.ErrAddonNotExist
}

View File

@@ -1343,7 +1343,7 @@ func (c *applicationUsecaseImpl) Statistics(ctx context.Context, app *model.Appl
return &apisv1.ApplicationStatisticsResponse{
EnvCount: int64(len(envbinding)),
TargetCount: int64(len(targetMap)),
RevisonCount: count,
RevisionCount: count,
WorkflowCount: c.workflowUsecase.CountWorkflow(ctx, app),
}, nil
}

View File

@@ -33,6 +33,7 @@ import (
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"sigs.k8s.io/controller-runtime/pkg/client"
velatypes "github.com/oam-dev/kubevela/apis/types"
"github.com/oam-dev/kubevela/pkg/apiserver/clients"
"github.com/oam-dev/kubevela/pkg/apiserver/datastore"
"github.com/oam-dev/kubevela/pkg/apiserver/log"
@@ -198,14 +199,14 @@ func joinClusterByKubeConfigString(ctx context.Context, k8sClient client.Client,
defer func() {
_ = os.Remove(tmpFileName)
}()
cluster, err := multicluster.JoinClusterByKubeConfig(ctx, k8sClient, tmpFileName, clusterName)
clusterConfig, err := multicluster.JoinClusterByKubeConfig(ctx, k8sClient, tmpFileName, clusterName, multicluster.JoinClusterCreateNamespaceOption(velatypes.DefaultKubeVelaNS))
if err != nil {
if errors.Is(err, multicluster.ErrClusterExists) {
return "", bcode.ErrClusterExistsInKubernetes
}
return "", errors.Wrapf(err, "failed to join cluster")
}
return cluster.Server, nil
return clusterConfig.Cluster.Server, nil
}
func createClusterModelFromRequest(req apis.CreateClusterRequest, oldCluster *model.Cluster) (newCluster *model.Cluster) {

View File

@@ -296,6 +296,9 @@ func patchSchema(defaultSchema, customSchema []*utils.UIParameter) []*utils.UIPa
if cusSchema.Additional != nil {
dSchema.Additional = cusSchema.Additional
}
if cusSchema.Style != nil {
dSchema.Style = cusSchema.Style
}
}
}
sort.Slice(defaultSchema, func(i, j int) bool {

View File

@@ -61,6 +61,9 @@ var (
// ErrAddonDependencyNotSatisfy means addon's dependencies is not enabled
ErrAddonDependencyNotSatisfy = NewBcode(500, 50017, "addon's dependencies is not enabled")
// ErrAddonSystemVersionMismatch means addon's version required mismatch
ErrAddonSystemVersionMismatch = NewBcode(400, 50018, "addon's system version requirement mismatch")
)
// isGithubRateLimit check if error is github rate limit

View File

@@ -21,6 +21,9 @@ import (
"strings"
)
// UISchema ui schema
type UISchema []*UIParameter
// UIParameter Structured import table simple UI model
type UIParameter struct {
Sort uint `json:"sort"`
@@ -29,6 +32,7 @@ type UIParameter struct {
Validate *Validate `json:"validate,omitempty"`
JSONKey string `json:"jsonKey"`
UIType string `json:"uiType"`
Style *Style `json:"style,omitempty"`
// means disable parameter in ui
Disable *bool `json:"disable,omitempty"`
SubParameterGroupOption []GroupOption `json:"subParameterGroupOption,omitempty"`
@@ -37,6 +41,12 @@ type UIParameter struct {
Additional *bool `json:"additional,omitempty"`
}
// Style ui style
type Style struct {
// ColSpan the width of a responsive layout
ColSpan int `json:"colSpan"`
}
// GroupOption define multiple data structure composition options.
type GroupOption struct {
Label string `json:"label"`
@@ -53,6 +63,8 @@ type Validate struct {
Pattern string `json:"pattern,omitempty"`
Options []Option `json:"options,omitempty"`
DefaultValue interface{} `json:"defaultValue,omitempty"`
// the parameter cannot be changed twice.
Immutable bool `json:"immutable"`
}
// Option select option
@@ -61,13 +73,6 @@ type Option struct {
Value interface{} `json:"value"`
}
// ParseUIParameterFromDefinition cue of parameter in Definitions was analyzed to obtain the form description model.
func ParseUIParameterFromDefinition(definition []byte) ([]*UIParameter, error) {
var params []*UIParameter
return params, nil
}
// FirstUpper Sets the first letter of the string to upper.
func FirstUpper(s string) string {
if s == "" {

View File

@@ -113,16 +113,6 @@ func (c *applicationWebService) GetWebService() *restful.WebService {
Returns(400, "", bcode.Bcode{}).
Writes(apis.ApplicationStatisticsResponse{}))
ws.Route(ws.PUT("/{name}").To(c.updateApplication).
Doc("update one application ").
Metadata(restfulspec.KeyOpenAPITags, tags).
Filter(c.appCheckFilter).
Param(ws.PathParameter("name", "identifier of the application ").DataType("string")).
Reads(apis.UpdateApplicationRequest{}).
Returns(200, "", apis.ApplicationBase{}).
Returns(400, "", bcode.Bcode{}).
Writes(apis.ApplicationBase{}))
ws.Route(ws.POST("/{name}/triggers").To(c.createApplicationTrigger).
Doc("create one application trigger").
Metadata(restfulspec.KeyOpenAPITags, tags).

View File

@@ -224,7 +224,8 @@ func (af *Appfile) PrepareWorkflowAndPolicy(ctx context.Context) ([]*unstructure
}
func (af *Appfile) generateUnstructured(workload *Workload) (*unstructured.Unstructured, error) {
un, err := generateUnstructuredFromCUEModule(workload, af.Name, af.AppRevisionName, af.Namespace, af.Components, af.Artifacts)
ctxData := GenerateContextDataFromAppFile(af, workload.Name)
un, err := generateUnstructuredFromCUEModule(workload, af.Artifacts, ctxData)
if err != nil {
return nil, err
}
@@ -235,13 +236,13 @@ func (af *Appfile) generateUnstructured(workload *Workload) (*unstructured.Unstr
return un, nil
}
func generateUnstructuredFromCUEModule(wl *Workload, appName, revision, ns string, components []common.ApplicationComponent, artifacts []*types.ComponentManifest) (*unstructured.Unstructured, error) {
pCtx := process.NewPolicyContext(ns, wl.Name, appName, revision, components)
func generateUnstructuredFromCUEModule(wl *Workload, artifacts []*types.ComponentManifest, ctxData process.ContextData) (*unstructured.Unstructured, error) {
pCtx := process.NewContext(ctxData)
pCtx.PushData(model.ContextDataArtifacts, prepareArtifactsData(artifacts))
if err := wl.EvalContext(pCtx); err != nil {
return nil, errors.Wrapf(err, "evaluate base template app=%s in namespace=%s", appName, ns)
return nil, errors.Wrapf(err, "evaluate base template app=%s in namespace=%s", ctxData.AppName, ctxData.Namespace)
}
return makeWorkloadWithContext(pCtx, wl, ns, appName)
return makeWorkloadWithContext(pCtx, wl, ctxData.Namespace, ctxData.AppName)
}
// artifacts contains resources in unstructured shape of all components
@@ -292,17 +293,18 @@ func (af *Appfile) GenerateComponentManifest(wl *Workload) (*types.ComponentMani
if af.Namespace == "" {
af.Namespace = corev1.NamespaceDefault
}
ctxData := GenerateContextDataFromAppFile(af, wl.Name)
// generate context here to avoid nil pointer panic
wl.Ctx = NewBasicContext(af.Name, wl.Name, af.AppRevisionName, af.Namespace, wl.Params)
wl.Ctx = NewBasicContext(GenerateContextDataFromAppFile(af, wl.Name), wl.Params)
switch wl.CapabilityCategory {
case types.HelmCategory:
return generateComponentFromHelmModule(wl, af.Name, af.AppRevisionName, af.Namespace)
return generateComponentFromHelmModule(wl, ctxData)
case types.KubeCategory:
return generateComponentFromKubeModule(wl, af.Name, af.AppRevisionName, af.Namespace)
return generateComponentFromKubeModule(wl, ctxData)
case types.TerraformCategory:
return generateComponentFromTerraformModule(wl, af.Name, af.Namespace)
default:
return generateComponentFromCUEModule(wl, af.Name, af.AppRevisionName, af.Namespace)
return generateComponentFromCUEModule(wl, ctxData)
}
}
@@ -471,31 +473,31 @@ func (af *Appfile) setWorkloadRefToTrait(wlRef corev1.ObjectReference, trait *un
}
// PrepareProcessContext prepares a DSL process Context
func PrepareProcessContext(wl *Workload, applicationName, revision, namespace string) (process.Context, error) {
func PrepareProcessContext(wl *Workload, ctxData process.ContextData) (process.Context, error) {
if wl.Ctx == nil {
wl.Ctx = NewBasicContext(applicationName, wl.Name, revision, namespace, wl.Params)
wl.Ctx = NewBasicContext(ctxData, wl.Params)
}
if err := wl.EvalContext(wl.Ctx); err != nil {
return nil, errors.Wrapf(err, "evaluate base template app=%s in namespace=%s", applicationName, namespace)
return nil, errors.Wrapf(err, "evaluate base template app=%s in namespace=%s", ctxData.AppName, ctxData.Namespace)
}
return wl.Ctx, nil
}
// NewBasicContext prepares a basic DSL process Context
func NewBasicContext(applicationName, workloadName, revision, namespace string, params map[string]interface{}) process.Context {
pCtx := process.NewContext(namespace, workloadName, applicationName, revision)
func NewBasicContext(contextData process.ContextData, params map[string]interface{}) process.Context {
pCtx := process.NewContext(contextData)
if params != nil {
pCtx.SetParameters(params)
}
return pCtx
}
func generateComponentFromCUEModule(wl *Workload, appName, revision, ns string) (*types.ComponentManifest, error) {
pCtx, err := PrepareProcessContext(wl, appName, revision, ns)
func generateComponentFromCUEModule(wl *Workload, ctxData process.ContextData) (*types.ComponentManifest, error) {
pCtx, err := PrepareProcessContext(wl, ctxData)
if err != nil {
return nil, err
}
return baseGenerateComponent(pCtx, wl, appName, ns)
return baseGenerateComponent(pCtx, wl, ctxData.AppName, ctxData.Namespace)
}
func generateComponentFromTerraformModule(wl *Workload, appName, ns string) (*types.ComponentManifest, error) {
@@ -664,7 +666,7 @@ output: {
return templateStr, nil
}
func generateComponentFromKubeModule(wl *Workload, appName, revision, ns string) (*types.ComponentManifest, error) {
func generateComponentFromKubeModule(wl *Workload, ctxData process.ContextData) (*types.ComponentManifest, error) {
templateStr, err := GenerateCUETemplate(wl)
if err != nil {
return nil, err
@@ -672,7 +674,7 @@ func generateComponentFromKubeModule(wl *Workload, appName, revision, ns string)
wl.FullTemplate.TemplateStr = templateStr
// re-use the way CUE module generates comp & acComp
compManifest, err := generateComponentFromCUEModule(wl, appName, revision, ns)
compManifest, err := generateComponentFromCUEModule(wl, ctxData)
if err != nil {
return nil, err
}
@@ -839,7 +841,7 @@ func setParameterValuesToKubeObj(obj *unstructured.Unstructured, values paramVal
return nil
}
func generateComponentFromHelmModule(wl *Workload, appName, revision, ns string) (*types.ComponentManifest, error) {
func generateComponentFromHelmModule(wl *Workload, ctxData process.ContextData) (*types.ComponentManifest, error) {
templateStr, err := GenerateCUETemplate(wl)
if err != nil {
return nil, err
@@ -849,22 +851,38 @@ func generateComponentFromHelmModule(wl *Workload, appName, revision, ns string)
// re-use the way CUE module generates comp & acComp
compManifest := &types.ComponentManifest{
Name: wl.Name,
Namespace: ns,
Namespace: ctxData.Namespace,
ExternalRevision: wl.ExternalRevision,
StandardWorkload: &unstructured.Unstructured{},
}
if wl.FullTemplate.Reference.Type != types.AutoDetectWorkloadDefinition {
compManifest, err = generateComponentFromCUEModule(wl, appName, revision, ns)
compManifest, err = generateComponentFromCUEModule(wl, ctxData)
if err != nil {
return nil, err
}
}
rls, repo, err := helm.RenderHelmReleaseAndHelmRepo(wl.FullTemplate.Helm, wl.Name, appName, ns, wl.Params)
rls, repo, err := helm.RenderHelmReleaseAndHelmRepo(wl.FullTemplate.Helm, wl.Name, ctxData.AppName, ctxData.Namespace, wl.Params)
if err != nil {
return nil, err
}
compManifest.PackagedWorkloadResources = []*unstructured.Unstructured{rls, repo}
return compManifest, nil
}
// GenerateContextDataFromAppFile generates process context data from app file
func GenerateContextDataFromAppFile(appfile *Appfile, wlName string) process.ContextData {
data := process.ContextData{
Namespace: appfile.Namespace,
AppName: appfile.Name,
CompName: wlName,
AppRevisionName: appfile.AppRevisionName,
Components: appfile.Components,
}
if appfile.AppAnnotations != nil {
data.WorkflowName = appfile.AppAnnotations[oam.AnnotationWorkflowName]
data.PublishVersion = appfile.AppAnnotations[oam.AnnotationPublishVersion]
}
return data
}

View File

@@ -43,6 +43,7 @@ import (
oamtypes "github.com/oam-dev/kubevela/apis/types"
"github.com/oam-dev/kubevela/pkg/cue/definition"
"github.com/oam-dev/kubevela/pkg/cue/model"
"github.com/oam-dev/kubevela/pkg/oam"
"github.com/oam-dev/kubevela/pkg/oam/util"
)
@@ -873,7 +874,12 @@ variable "password" {
revision: "v1",
}
pCtx := NewBasicContext(args.appName, args.wl.Name, args.revision, ns, args.wl.Params)
ctxData := GenerateContextDataFromAppFile(&Appfile{
Name: args.appName,
Namespace: ns,
AppRevisionName: args.revision,
}, args.wl.Name)
pCtx := NewBasicContext(ctxData, args.wl.Params)
comp, err := evalWorkloadWithContext(pCtx, args.wl, ns, args.appName, compName)
Expect(comp.StandardWorkload).ShouldNot(BeNil())
Expect(comp.Name).Should(Equal(""))
@@ -1329,7 +1335,17 @@ func TestBaseGenerateComponent(t *testing.T) {
var ns = "test-ns"
var traitName = "mytrait"
var wlName = "my-wl-1"
pContext := NewBasicContext(appName, wlName, "rev-1", ns, nil)
var workflowName = "my-wf"
var publishVersion = "123"
ctxData := GenerateContextDataFromAppFile(&Appfile{
Name: appName,
Namespace: ns,
AppAnnotations: map[string]string{
oam.AnnotationWorkflowName: workflowName,
oam.AnnotationPublishVersion: publishVersion,
},
}, wlName)
pContext := NewBasicContext(ctxData, nil)
base := `
apiVersion: "apps/v1"
kind: "Deployment"
@@ -1359,11 +1375,14 @@ if context.componentType == "stateless" {
}
name: context.name
envSourceContainerName: context.name
workflowName: context.workflowName
publishVersion: context.publishVersion
}`,
}
wl := &Workload{Type: "stateful", Traits: []*Trait{tr}}
cm, err := baseGenerateComponent(pContext, wl, appName, ns)
assert.NilError(t, err)
assert.Equal(t, cm.Traits[0].Object["kind"], "StatefulSet")
assert.Equal(t, cm.Traits[0].Object["name"], wlName)
assert.Equal(t, cm.Traits[0].Object["workflowName"], workflowName)
assert.Equal(t, cm.Traits[0].Object["publishVersion"], publishVersion)
}

View File

@@ -33,7 +33,8 @@ func (p *Parser) ValidateCUESchematicAppfile(a *Appfile) error {
if wl.CapabilityCategory != types.CUECategory {
continue
}
pCtx, err := newValidationProcessContext(wl, a.Name, a.AppRevisionName, a.Namespace)
ctxData := GenerateContextDataFromAppFile(a, wl.Name)
pCtx, err := newValidationProcessContext(wl, ctxData)
if err != nil {
return errors.WithMessagef(err, "cannot create the validation process context of app=%s in namespace=%s", a.Name, a.Namespace)
}
@@ -49,7 +50,7 @@ func (p *Parser) ValidateCUESchematicAppfile(a *Appfile) error {
return nil
}
func newValidationProcessContext(wl *Workload, appName, revisionName, ns string) (process.Context, error) {
func newValidationProcessContext(wl *Workload, ctxData process.ContextData) (process.Context, error) {
baseHooks := []process.BaseHook{
// add more hook funcs here to validate CUE base
}
@@ -58,9 +59,11 @@ func newValidationProcessContext(wl *Workload, appName, revisionName, ns string)
validateAuxiliaryNameUnique(),
}
pCtx := process.NewContextWithHooks(ns, wl.Name, appName, revisionName, baseHooks, auxiliaryHooks)
ctxData.BaseHooks = baseHooks
ctxData.AuxiliaryHooks = auxiliaryHooks
pCtx := process.NewContext(ctxData)
if err := wl.EvalContext(pCtx); err != nil {
return nil, errors.Wrapf(err, "evaluate base template app=%s in namespace=%s", appName, ns)
return nil, errors.Wrapf(err, "evaluate base template app=%s in namespace=%s", ctxData.AppName, ctxData.Namespace)
}
return pCtx, nil
}

View File

@@ -58,7 +58,13 @@ var _ = Describe("Test validate CUE schematic Appfile", func() {
},
engine: definition.NewWorkloadAbstractEngine("myweb", pd),
}
pCtx, err := newValidationProcessContext(wl, "myapp", "myapp-v1", "test-ns")
ctxData := GenerateContextDataFromAppFile(&Appfile{
Name: "myapp",
Namespace: "test-ns",
AppRevisionName: "myapp-v1",
}, wl.Name)
pCtx, err := newValidationProcessContext(wl, ctxData)
Expect(err).Should(BeNil())
Eventually(func() string {
for _, tr := range wl.Traits {

View File

@@ -51,7 +51,7 @@ func (c *HTTPCmd) Run(meta *registry.Meta) (res interface{}, err error) {
var (
r io.Reader
client = &http.Client{
Transport: &http.Transport{},
Transport: http.DefaultTransport,
Timeout: time.Second * 3,
}
)

View File

@@ -1,153 +0,0 @@
/*
Copyright 2021 The KubeVela Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package clustermanager
import (
"context"
"fmt"
"github.com/oam-dev/cluster-gateway/pkg/apis/cluster/v1alpha1"
"github.com/pkg/errors"
corev1 "k8s.io/api/core/v1"
crdv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1"
apierror "k8s.io/apimachinery/pkg/api/errors"
k8stypes "k8s.io/apimachinery/pkg/types"
"k8s.io/client-go/tools/clientcmd"
clusterv1 "open-cluster-management.io/api/cluster/v1"
"sigs.k8s.io/controller-runtime/pkg/client"
"github.com/oam-dev/kubevela/apis/types"
"github.com/oam-dev/kubevela/pkg/multicluster"
"github.com/oam-dev/kubevela/pkg/utils/common"
)
// GetClient returns a kube client for given kubeConfigData
func GetClient(kubeConfigData []byte) (client.Client, error) {
clientConfig, err := clientcmd.NewClientConfigFromBytes(kubeConfigData)
if err != nil {
return nil, err
}
restConfig, err := clientConfig.ClientConfig()
if err != nil {
return nil, err
}
return client.New(restConfig, client.Options{Scheme: common.Scheme})
}
// GetRegisteredClusters will get all registered clusters in control plane
func GetRegisteredClusters(c client.Client) ([]types.Cluster, error) {
var clusters []types.Cluster
secrets := corev1.SecretList{}
if err := c.List(context.Background(), &secrets, client.HasLabels{v1alpha1.LabelKeyClusterCredentialType}, client.InNamespace(multicluster.ClusterGatewaySecretNamespace)); err != nil {
return nil, errors.Wrapf(err, "failed to get clusterSecret secrets")
}
for _, clusterSecret := range secrets.Items {
endpoint := string(clusterSecret.Data["endpoint"])
if endp, ok := clusterSecret.GetLabels()[v1alpha1.LabelKeyClusterEndpointType]; ok {
endpoint = endp
}
clusters = append(clusters, types.Cluster{
Name: clusterSecret.Name,
Type: clusterSecret.GetLabels()[v1alpha1.LabelKeyClusterCredentialType],
EndPoint: endpoint,
Accepted: true,
})
}
crdName := k8stypes.NamespacedName{Name: "managedclusters." + clusterv1.GroupName}
if err := c.Get(context.Background(), crdName, &crdv1.CustomResourceDefinition{}); err != nil {
if apierror.IsNotFound(err) {
return clusters, nil
}
return nil, err
}
managedClusters := clusterv1.ManagedClusterList{}
if err := c.List(context.Background(), &managedClusters); err != nil {
return nil, errors.Wrapf(err, "failed to get managed clusters")
}
for _, cluster := range managedClusters.Items {
if len(cluster.Spec.ManagedClusterClientConfigs) != 0 {
clusters = append(clusters, types.Cluster{
Name: cluster.Name,
Type: "OCM ManagedServiceAccount",
EndPoint: "-",
Accepted: cluster.Spec.HubAcceptsClient,
})
}
}
return clusters, nil
}
// EnsureClusterNotExists will check the cluster is not existed in control plane
func EnsureClusterNotExists(c client.Client, clusterName string) error {
exist, err := clusterExists(c, clusterName)
if err != nil {
return err
}
if exist {
return fmt.Errorf("cluster %s already exists", clusterName)
}
return nil
}
// EnsureClusterExists will check the cluster is existed in control plane
func EnsureClusterExists(c client.Client, clusterName string) error {
exist, err := clusterExists(c, clusterName)
if err != nil {
return err
}
if !exist {
return fmt.Errorf("cluster %s not exists", clusterName)
}
return nil
}
// clusterExists will check whether the cluster exist or not
func clusterExists(c client.Client, clusterName string) (bool, error) {
err := c.Get(context.Background(),
k8stypes.NamespacedName{
Name: clusterName,
Namespace: multicluster.ClusterGatewaySecretNamespace,
},
&corev1.Secret{})
if err == nil {
return true, nil
}
if !apierror.IsNotFound(err) {
return false, errors.Wrapf(err, "failed to check duplicate cluster")
}
crdName := k8stypes.NamespacedName{Name: "managedclusters." + clusterv1.GroupName}
if err = c.Get(context.Background(), crdName, &crdv1.CustomResourceDefinition{}); err != nil {
if apierror.IsNotFound(err) {
return false, nil
}
return false, errors.Wrapf(err, "failed to get managedcluster CRD to check duplicate cluster")
}
err = c.Get(context.Background(), k8stypes.NamespacedName{
Name: clusterName,
Namespace: multicluster.ClusterGatewaySecretNamespace,
}, &clusterv1.ManagedCluster{})
if err == nil {
return true, nil
}
if !apierror.IsNotFound(err) {
return false, errors.Wrapf(err, "failed to check duplicate cluster")
}
return false, nil
}

View File

@@ -83,4 +83,7 @@ type Args struct {
// EnableCompatibility indicates that will change some functions of controller to adapt to multiple platforms, such as asi.
EnableCompatibility bool
// IgnoreAppWithoutControllerRequirement indicates that application controller will not process the app without 'app.oam.dev/controller-version-require' annotation.
IgnoreAppWithoutControllerRequirement bool
}

View File

@@ -92,6 +92,8 @@ type options struct {
appRevisionLimit int
concurrentReconciles int
disableStatusUpdate bool
ignoreAppNoCtrlReq bool
controllerVersion string
}
// +kubebuilder:rbac:groups=core.oam.dev,resources=applications,verbs=get;list;watch;create;update;patch;delete
@@ -100,6 +102,7 @@ type options struct {
// Reconcile process app event
// nolint:gocyclo
func (r *Reconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) {
ctx, cancel := context.WithTimeout(ctx, common2.ReconcileTimeout)
defer cancel()
@@ -117,6 +120,11 @@ func (r *Reconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Resu
return r.result(client.IgnoreNotFound(err)).ret()
}
if !r.matchControllerRequirement(app) {
logCtx.Info("skip app: not match the controller requirement of app")
return ctrl.Result{}, nil
}
timeReporter := timeReconcile(app)
defer timeReporter()
@@ -126,7 +134,7 @@ func (r *Reconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Resu
if annotations := app.GetAnnotations(); annotations == nil || annotations[oam.AnnotationKubeVelaVersion] == "" {
metav1.SetMetaDataAnnotation(&app.ObjectMeta, oam.AnnotationKubeVelaVersion, version.VelaVersion)
}
logCtx.AddTag("publish_version", app.GetAnnotations()[oam.AnnotationKubeVelaVersion])
logCtx.AddTag("publish_version", app.GetAnnotations()[oam.AnnotationPublishVersion])
appParser := appfile.NewApplicationParser(r.Client, r.dm, r.pd)
handler, err := NewAppHandler(logCtx, r, app, appParser)
@@ -590,5 +598,19 @@ func parseOptions(args core.Args) options {
disableStatusUpdate: args.EnableCompatibility,
appRevisionLimit: args.AppRevisionLimit,
concurrentReconciles: args.ConcurrentReconciles,
ignoreAppNoCtrlReq: args.IgnoreAppWithoutControllerRequirement,
controllerVersion: version.VelaVersion,
}
}
func (r *Reconciler) matchControllerRequirement(app *v1beta1.Application) bool {
if app.Annotations != nil {
if requireVersion, ok := app.Annotations[oam.AnnotationControllerRequirement]; ok {
return requireVersion == r.controllerVersion
}
}
if r.ignoreAppNoCtrlReq {
return false
}
return true
}

View File

@@ -2321,6 +2321,74 @@ var _ = Describe("Test Application Controller", func() {
Expect(len(rt.Spec.ManagedResources)).Should(Equal(20))
})
It("test controller requirement", func() {
ns := corev1.Namespace{
ObjectMeta: metav1.ObjectMeta{
Name: "test-controller-requirement",
},
}
Expect(k8sClient.Create(context.Background(), &ns)).Should(BeNil())
appWithoutCtrlReq := appwithNoTrait.DeepCopy()
appWithoutCtrlReq.SetNamespace(ns.Name)
appWithoutCtrlReq.SetName("app-no-ctrl-req")
Expect(k8sClient.Create(context.Background(), appWithoutCtrlReq)).Should(BeNil())
appWithCtrlReqV1 := appwithNoTrait.DeepCopy()
appWithCtrlReqV1.SetNamespace(ns.Name)
appWithCtrlReqV1.SetName("app-with-ctrl-v1")
appWithCtrlReqV1.Annotations = map[string]string{
oam.AnnotationControllerRequirement: "v1",
}
Expect(k8sClient.Create(context.Background(), appWithCtrlReqV1)).Should(BeNil())
appWithCtrlReqV2 := appwithNoTrait.DeepCopy()
appWithCtrlReqV2.SetNamespace(ns.Name)
appWithCtrlReqV2.SetName("app-with-ctrl-v2")
appWithCtrlReqV2.Annotations = map[string]string{
oam.AnnotationControllerRequirement: "v2",
}
Expect(k8sClient.Create(context.Background(), appWithCtrlReqV2)).Should(BeNil())
v1OREmptyReconciler := *reconciler
v1OREmptyReconciler.ignoreAppNoCtrlReq = false
v1OREmptyReconciler.controllerVersion = "v1"
v2OnlyReconciler := *reconciler
v2OnlyReconciler.ignoreAppNoCtrlReq = true
v2OnlyReconciler.controllerVersion = "v2"
check := func(r reconcile.Reconciler, app *v1beta1.Application, do bool) {
testutil.ReconcileOnceAfterFinalizer(r, reconcile.Request{NamespacedName: client.ObjectKey{
Name: app.Name,
Namespace: app.Namespace,
}})
checkApp := &v1beta1.Application{}
Expect(k8sClient.Get(context.Background(), client.ObjectKey{
Name: app.Name,
Namespace: app.Namespace,
}, checkApp)).Should(BeNil())
if do {
Expect(checkApp.Annotations[oam.AnnotationKubeVelaVersion]).ShouldNot(BeEmpty())
} else {
if checkApp.Annotations == nil {
return
}
Expect(checkApp.Annotations[oam.AnnotationKubeVelaVersion]).Should(BeEmpty())
}
}
check(&v2OnlyReconciler, appWithoutCtrlReq, false)
check(&v2OnlyReconciler, appWithCtrlReqV1, false)
check(&v1OREmptyReconciler, appWithCtrlReqV2, false)
check(&v1OREmptyReconciler, appWithoutCtrlReq, true)
check(&v1OREmptyReconciler, appWithCtrlReqV1, true)
check(&v2OnlyReconciler, appWithCtrlReqV2, true)
})
})
const (

View File

@@ -32,6 +32,7 @@ import (
"github.com/oam-dev/kubevela/pkg/appfile"
"github.com/oam-dev/kubevela/pkg/controller/core.oam.dev/v1alpha2/application/assemble"
"github.com/oam-dev/kubevela/pkg/cue/model/value"
"github.com/oam-dev/kubevela/pkg/cue/process"
"github.com/oam-dev/kubevela/pkg/monitor/metrics"
"github.com/oam-dev/kubevela/pkg/multicluster"
"github.com/oam-dev/kubevela/pkg/oam"
@@ -60,16 +61,19 @@ func (h *AppHandler) GenerateApplicationSteps(ctx context.Context,
appParser *appfile.Parser,
af *appfile.Appfile,
appRev *v1beta1.ApplicationRevision) ([]wfTypes.TaskRunner, error) {
handlerProviders := providers.NewProviders()
kube.Install(handlerProviders, h.r.Client, h.Dispatch, h.Delete)
oamProvider.Install(handlerProviders, app, h.applyComponentFunc(
appParser, appRev, af), h.renderComponentFunc(appParser, appRev, af))
http.Install(handlerProviders, h.r.Client, app.Namespace)
taskDiscover := tasks.NewTaskDiscover(handlerProviders, h.r.pd, h.r.Client, h.r.dm)
pCtx := process.NewContext(generateContextDataFromApp(app, appRev.Name))
taskDiscover := tasks.NewTaskDiscover(handlerProviders, h.r.pd, h.r.Client, h.r.dm, pCtx)
multiclusterProvider.Install(handlerProviders, h.r.Client, app)
terraformProvider.Install(handlerProviders, app, func(comp common.ApplicationComponent) (*appfile.Workload, error) {
return appParser.ParseWorkloadFromRevision(comp, appRev)
})
var tasks []wfTypes.TaskRunner
for _, step := range af.WorkflowSteps {
options := &wfTypes.GeneratorOptions{
@@ -290,3 +294,17 @@ func generateStepID(stepName string, wfStatus *common.WorkflowStatus) string {
}
return id
}
func generateContextDataFromApp(app *v1beta1.Application, appRev string) process.ContextData {
data := process.ContextData{
Namespace: app.Namespace,
AppName: app.Name,
CompName: app.Name,
AppRevisionName: appRev,
}
if app.Annotations != nil {
data.WorkflowName = app.Annotations[oam.AnnotationWorkflowName]
data.PublishVersion = app.Annotations[oam.AnnotationPublishVersion]
}
return data
}

View File

@@ -370,7 +370,7 @@ func ComputeAppRevisionHash(appRevision *v1beta1.ApplicationRevision) (string, e
// currentAppRevIsNew check application revision already exist or not
func (h *AppHandler) currentAppRevIsNew(ctx context.Context) (bool, bool, error) {
// the last revision doesn't exist.
if h.app.Status.LatestRevision == nil {
if h.app.Status.LatestRevision == nil || DisableAllApplicationRevision {
return true, true, nil
}

View File

@@ -439,8 +439,8 @@ func CUEBasedHealthCheck(ctx context.Context, c client.Client, wlRef WorkloadRef
switch wl.CapabilityCategory {
case oamtypes.TerraformCategory:
pCtx = af.NewBasicContext(appfile.Name, wl.Name, appfile.AppRevisionName, appfile.Namespace, wl.Params)
ctx := context.Background()
pCtx = af.NewBasicContext(af.GenerateContextDataFromAppFile(appfile, wl.Name), wl.Params)
var configuration terraformapi.Configuration
if err := c.Get(ctx, client.ObjectKey{Name: wl.Name, Namespace: ns}, &configuration); err != nil {
wlHealth.HealthStatus = StatusUnhealthy
@@ -454,7 +454,8 @@ func CUEBasedHealthCheck(ctx context.Context, c client.Client, wlRef WorkloadRef
wlHealth.Diagnosis = configuration.Status.Apply.Message
okToCheckTrait = true
default:
pCtx = process.NewProcessContextWithCtx(ctx, ns, wl.Name, appfile.Name, appfile.AppRevisionName)
pCtx = process.NewContext(af.GenerateContextDataFromAppFile(appfile, wl.Name))
pCtx.SetCtx(ctx)
if wl.CapabilityCategory != oamtypes.CUECategory {
templateStr, err := af.GenerateCUETemplate(wl)
if err != nil {

View File

@@ -194,7 +194,9 @@ func GetOpenAPISchemaFromTerraformComponentDefinition(configuration string) ([]b
}
}
schema.Title = k
required = append(required, k)
if v.Required {
required = append(required, k)
}
if v.Default != nil {
schema.Default = v.Default
}

View File

@@ -303,21 +303,9 @@ variable "password" {
variable "intVar" {
type = "number"
}
variable "boolVar" {
type = "bool"
}
variable "listVar" {
type = "list"
}
variable "mapVar" {
type = "map"
}`,
want: want{
subStr: "account_name",
subStr: `"required":["intVar"]`,
err: nil,
},
},

View File

@@ -217,7 +217,12 @@ parameter: {
}
for _, v := range testCases {
ctx := process.NewContext("default", "test", "myapp", "myapp-v1")
ctx := process.NewContext(process.ContextData{
AppName: "myapp",
CompName: "test",
Namespace: "default",
AppRevisionName: "myapp-v1",
})
wt := NewWorkloadAbstractEngine("testWorkload", &packages.PackageDiscover{})
err := wt.Complete(ctx, v.workloadTemplate, v.params)
hasError := err != nil
@@ -918,7 +923,12 @@ parameter: [string]: string`,
}
`
ctx := process.NewContext("default", "test", "myapp", "myapp-v1")
ctx := process.NewContext(process.ContextData{
AppName: "myapp",
CompName: "test",
Namespace: "default",
AppRevisionName: "myapp-v1",
})
wt := NewWorkloadAbstractEngine("-", &packages.PackageDiscover{})
if err := wt.Complete(ctx, baseTemplate, map[string]interface{}{
"replicas": 2,
@@ -1017,7 +1027,12 @@ outputs: service :{
}
for k, v := range testcases {
wd := NewWorkloadAbstractEngine(k, &packages.PackageDiscover{})
ctx := process.NewContext("default", k, "myapp", "myapp-v1")
ctx := process.NewContext(process.ContextData{
AppName: "myapp",
CompName: k,
Namespace: "default",
AppRevisionName: "myapp-v1",
})
err := wd.Complete(ctx, v.template, map[string]interface{}{})
assert.NoError(t, err)
_, assists := ctx.Output()
@@ -1095,7 +1110,12 @@ outputs: abc :{
}
for k, v := range testcases {
td := NewTraitAbstractEngine(k, &packages.PackageDiscover{})
ctx := process.NewContext("default", k, "myapp", "myapp-v1")
ctx := process.NewContext(process.ContextData{
AppName: "myapp",
CompName: k,
Namespace: "default",
AppRevisionName: "myapp-v1",
})
err := td.Complete(ctx, v.template, map[string]interface{}{})
assert.NoError(t, err)
_, assists := ctx.Output()

View File

@@ -35,6 +35,10 @@ const (
ContextAppRevisionNum = "appRevisionNum"
// ContextNamespace is the namespace of the app
ContextNamespace = "namespace"
// ContextPublishVersion is the publish version of the app
ContextPublishVersion = "publishVersion"
// ContextWorkflowName is the name of the workflow
ContextWorkflowName = "workflowName"
// OutputSecretName is used to store all secret names which are generated by cloud resource components
OutputSecretName = "outputSecretName"
// ContextCompRevisionName is the component revision name of context

View File

@@ -317,7 +317,7 @@ func (val *Value) LookupValue(paths ...string) (*Value, error) {
func (val *Value) LookupByScript(script string) (*Value, error) {
var outputKey = "zz_output__"
script = strings.TrimSpace(script)
scriptFile, err := parser.ParseFile("-", script)
scriptFile, err := parser.ParseFile("-", script, parser.ParseComments)
if err != nil {
return nil, errors.WithMessage(err, "parse script")
}
@@ -327,7 +327,7 @@ func (val *Value) LookupByScript(script string) (*Value, error) {
return nil, err
}
rawFile, err := parser.ParseFile("-", raw)
rawFile, err := parser.ParseFile("-", raw, parser.ParseComments)
if err != nil {
return nil, errors.WithMessage(err, "parse script")
}

View File

@@ -597,6 +597,23 @@ func TestLookupByScript(t *testing.T) {
}{
{
src: `
traits: {
ingress: {
// +patchKey=name
test: [{name: "main", image: "busybox"}]
}
}
`,
script: `traits["ingress"]`,
expect: `// +patchKey=name
test: [{
name: "main"
image: "busybox"
}]
`,
},
{
src: `
apply: containers: [{name: "main", image: "busybox"}]
`,
script: `apply.containers[0].image`,

View File

@@ -62,10 +62,12 @@ type templateContext struct {
// appName is the name of Application
appName string
// appRevision is the revision name of Application
appRevision string
configs []map[string]string
base model.Instance
auxiliaries []Auxiliary
appRevision string
workflowName string
publishVersion string
configs []map[string]string
base model.Instance
auxiliaries []Auxiliary
// namespace is the namespace of Application which is used to set the namespace for Crossplane connection secret,
// ComponentDefinition/TratiDefinition OpenAPI v3 schema
namespace string
@@ -94,57 +96,41 @@ type RequiredSecrets struct {
Data map[string]interface{}
}
// ContextData is the core data of process context
type ContextData struct {
Namespace string
AppName string
CompName string
AppRevisionName string
WorkflowName string
PublishVersion string
Ctx context.Context
BaseHooks []BaseHook
AuxiliaryHooks []AuxiliaryHook
Components []common.ApplicationComponent
}
// NewContext create render templateContext
func NewContext(namespace, name, appName, appRevision string) Context {
return &templateContext{
name: name,
appName: appName,
appRevision: appRevision,
func NewContext(data ContextData) Context {
ctx := &templateContext{
namespace: data.Namespace,
name: data.CompName,
appName: data.AppName,
appRevision: data.AppRevisionName,
workflowName: data.WorkflowName,
publishVersion: data.PublishVersion,
configs: []map[string]string{},
auxiliaries: []Auxiliary{},
namespace: namespace,
parameters: map[string]interface{}{},
}
}
// NewProcessContextWithCtx create render templateContext with ctx
func NewProcessContextWithCtx(ctx context.Context, namespace, name, appName, appRevision string) Context {
return &templateContext{
name: name,
appName: appName,
appRevision: appRevision,
configs: []map[string]string{},
auxiliaries: []Auxiliary{},
namespace: namespace,
parameters: map[string]interface{}{},
ctx: ctx,
}
}
// NewContextWithHooks create render templateContext with hooks for validation
func NewContextWithHooks(namespace, name, appName, appRevision string, baseHooks []BaseHook, auxHooks []AuxiliaryHook) Context {
return &templateContext{
name: name,
appName: appName,
appRevision: appRevision,
configs: []map[string]string{},
auxiliaries: []Auxiliary{},
namespace: namespace,
parameters: map[string]interface{}{},
baseHooks: baseHooks,
auxiliaryHooks: auxHooks,
}
}
// NewPolicyContext create Application Scope templateContext for Policy
func NewPolicyContext(namespace, name, appName, appRevision string, components []common.ApplicationComponent) Context {
return &templateContext{
name: name,
appName: appName,
appRevision: appRevision,
namespace: namespace,
components: components,
ctx: data.Ctx,
baseHooks: data.BaseHooks,
auxiliaryHooks: data.AuxiliaryHooks,
components: data.Components,
}
return ctx
}
// SetParameters sets templateContext parameters
@@ -185,6 +171,8 @@ func (ctx *templateContext) BaseContextFile() string {
buff += fmt.Sprintf(model.ContextAppRevisionNum+": %d\n", revNum)
buff += fmt.Sprintf(model.ContextNamespace+": \"%s\"\n", ctx.namespace)
buff += fmt.Sprintf(model.ContextCompRevisionName+": \"%s\"\n", model.ComponentRevisionPlaceHolder)
buff += fmt.Sprintf(model.ContextWorkflowName+": \"%s\"\n", ctx.workflowName)
buff += fmt.Sprintf(model.ContextPublishVersion+": \"%s\"\n", ctx.publishVersion)
if ctx.base != nil {
buff += fmt.Sprintf(model.OutputFieldName+": %s\n", structMarshal(ctx.base.String()))

View File

@@ -100,7 +100,14 @@ image: "myserver"
},
}
ctx := NewContext("myns", "mycomp", "myapp", "myapp-v1")
ctx := NewContext(ContextData{
AppName: "myapp",
CompName: "mycomp",
Namespace: "myns",
AppRevisionName: "myapp-v1",
WorkflowName: "myworkflow",
PublishVersion: "mypublishversion",
})
ctx.SetBase(base)
ctx.AppendAuxiliaries(svcAux)
ctx.AppendAuxiliaries(svcAuxWithAbnormalName)
@@ -130,6 +137,14 @@ image: "myserver"
assert.Equal(t, nil, err)
assert.Equal(t, int64(1), myAppRevisionNum)
myWorkflowName, err := ctxInst.Lookup("context", model.ContextWorkflowName).String()
assert.Equal(t, nil, err)
assert.Equal(t, "myworkflow", myWorkflowName)
myPublishVersion, err := ctxInst.Lookup("context", model.ContextPublishVersion).String()
assert.Equal(t, nil, err)
assert.Equal(t, "mypublishversion", myPublishVersion)
inputJs, err := ctxInst.Lookup("context", model.OutputFieldName).MarshalJSON()
assert.Equal(t, nil, err)
assert.Equal(t, `{"image":"myserver"}`, string(inputJs))

View File

@@ -17,69 +17,479 @@ limitations under the License.
package multicluster
import (
"bytes"
"context"
"fmt"
v1alpha12 "github.com/oam-dev/cluster-gateway/pkg/apis/cluster/v1alpha1"
"github.com/briandowns/spinner"
"github.com/oam-dev/cluster-register/pkg/hub"
"github.com/oam-dev/cluster-register/pkg/spoke"
"github.com/pkg/errors"
v1 "k8s.io/api/core/v1"
v14 "k8s.io/api/storage/v1"
errors2 "k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/api/resource"
v12 "k8s.io/apimachinery/pkg/apis/meta/v1"
types2 "k8s.io/apimachinery/pkg/types"
corev1 "k8s.io/api/core/v1"
apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
apitypes "k8s.io/apimachinery/pkg/types"
"k8s.io/client-go/rest"
"k8s.io/client-go/tools/clientcmd"
"k8s.io/client-go/tools/clientcmd/api"
clientcmdapi "k8s.io/client-go/tools/clientcmd/api"
ocmclusterv1 "open-cluster-management.io/api/cluster/v1"
"sigs.k8s.io/controller-runtime/pkg/client"
clusterv1alpha1 "github.com/oam-dev/cluster-gateway/pkg/apis/cluster/v1alpha1"
"github.com/oam-dev/kubevela/apis/core.oam.dev/v1beta1"
"github.com/oam-dev/kubevela/apis/types"
"github.com/oam-dev/kubevela/pkg/policy/envbinding"
errors3 "github.com/oam-dev/kubevela/pkg/utils/errors"
"github.com/oam-dev/kubevela/pkg/utils"
velaerrors "github.com/oam-dev/kubevela/pkg/utils/errors"
cmdutil "github.com/oam-dev/kubevela/pkg/utils/util"
)
// ensureVelaSystemNamespaceInstalled ensures vela namespace to be installed in child cluster
func ensureVelaSystemNamespaceInstalled(ctx context.Context, c client.Client, clusterName string, createNamespace string) error {
remoteCtx := ContextWithClusterName(ctx, clusterName)
if err := c.Get(remoteCtx, types2.NamespacedName{Name: createNamespace}, &v1.Namespace{}); err != nil {
if !errors2.IsNotFound(err) {
return errors.Wrapf(err, "failed to check vela-system ")
}
if err = c.Create(remoteCtx, &v1.Namespace{ObjectMeta: v12.ObjectMeta{Name: createNamespace}}); err != nil {
return errors.Wrapf(err, "failed to create vela-system namespace")
// KubeClusterConfig info for cluster management
type KubeClusterConfig struct {
ClusterName string
CreateNamespace string
*clientcmdapi.Config
*clientcmdapi.Cluster
*clientcmdapi.AuthInfo
// Logs records intermediate logs (which do not return error) during running
Logs bytes.Buffer
}
// SetClusterName set cluster name if not empty
func (clusterConfig *KubeClusterConfig) SetClusterName(clusterName string) *KubeClusterConfig {
if clusterName != "" {
clusterConfig.ClusterName = clusterName
}
return clusterConfig
}
// SetCreateNamespace set create namespace, if empty, no namespace will be created
func (clusterConfig *KubeClusterConfig) SetCreateNamespace(createNamespace string) *KubeClusterConfig {
clusterConfig.CreateNamespace = createNamespace
return clusterConfig
}
// Validate check if config is valid for join
func (clusterConfig *KubeClusterConfig) Validate() error {
switch clusterConfig.ClusterName {
case "":
return errors.Errorf("ClusterName cannot be empty")
case ClusterLocalName:
return errors.Errorf("ClusterName cannot be `%s`, it is reserved as the local cluster", ClusterLocalName)
}
return nil
}
// RegisterByVelaSecret create cluster secrets for KubeVela to use
func (clusterConfig *KubeClusterConfig) RegisterByVelaSecret(ctx context.Context, cli client.Client) error {
if err := ensureClusterNotExists(ctx, cli, clusterConfig.ClusterName); err != nil {
return errors.Wrapf(err, "cannot use cluster name %s", clusterConfig.ClusterName)
}
var credentialType clusterv1alpha1.CredentialType
data := map[string][]byte{
"endpoint": []byte(clusterConfig.Cluster.Server),
"ca.crt": clusterConfig.Cluster.CertificateAuthorityData,
}
if len(clusterConfig.AuthInfo.Token) > 0 {
credentialType = clusterv1alpha1.CredentialTypeServiceAccountToken
data["token"] = []byte(clusterConfig.AuthInfo.Token)
} else {
credentialType = clusterv1alpha1.CredentialTypeX509Certificate
data["tls.crt"] = clusterConfig.AuthInfo.ClientCertificateData
data["tls.key"] = clusterConfig.AuthInfo.ClientKeyData
}
secret := &corev1.Secret{
ObjectMeta: metav1.ObjectMeta{
Name: clusterConfig.ClusterName,
Namespace: ClusterGatewaySecretNamespace,
Labels: map[string]string{
clusterv1alpha1.LabelKeyClusterCredentialType: string(credentialType),
},
},
Type: corev1.SecretTypeOpaque,
Data: data,
}
if err := cli.Create(ctx, secret); err != nil {
return errors.Wrapf(err, "failed to add cluster to kubernetes")
}
// TODO(somefive): create namespace now only work for cluster secret
if clusterConfig.CreateNamespace != "" {
if err := ensureNamespaceExists(ctx, cli, clusterConfig.ClusterName, clusterConfig.CreateNamespace); err != nil {
_ = cli.Delete(ctx, secret)
return errors.Wrapf(err, "failed to ensure %s namespace installed in cluster %s", clusterConfig.CreateNamespace, clusterConfig.ClusterName)
}
}
return nil
}
// ensureClusterNotExists checks if child cluster has already been joined, if joined, error is returned
// RegisterClusterManagedByOCM create ocm managed cluster for use
// TODO(somefive): OCM ManagedCluster only support cli join now
func (clusterConfig *KubeClusterConfig) RegisterClusterManagedByOCM(ctx context.Context, args *JoinClusterArgs) error {
newTrackingSpinner := args.trackingSpinnerFactory
hubCluster, err := hub.NewHubCluster(args.hubConfig)
if err != nil {
return errors.Wrap(err, "fail to create client connect to hub cluster")
}
hubTracker := newTrackingSpinner("Checking the environment of hub cluster..")
hubTracker.FinalMSG = "Hub cluster all set, continue registration.\n"
hubTracker.Start()
crdName := apitypes.NamespacedName{Name: "managedclusters." + ocmclusterv1.GroupName}
if err := hubCluster.Client.Get(context.Background(), crdName, &apiextensionsv1.CustomResourceDefinition{}); err != nil {
return err
}
clusters, err := ListVirtualClusters(context.Background(), hubCluster.Client)
if err != nil {
return err
}
for _, cluster := range clusters {
if cluster.Name == clusterConfig.ClusterName && cluster.Accepted {
return errors.Errorf("you have register a cluster named %s", clusterConfig.ClusterName)
}
}
hubTracker.Stop()
spokeRestConf, err := clientcmd.BuildConfigFromKubeconfigGetter("", func() (*clientcmdapi.Config, error) {
return clusterConfig.Config, nil
})
if err != nil {
return errors.Wrap(err, "fail to convert spoke-cluster kubeconfig")
}
spokeTracker := newTrackingSpinner("Building registration config for the managed cluster")
spokeTracker.FinalMSG = "Successfully prepared registration config.\n"
spokeTracker.Start()
overridingRegistrationEndpoint := ""
if !*args.inClusterBootstrap {
args.ioStreams.Infof("Using the api endpoint from hub kubeconfig %q as registration entry.\n", args.hubConfig.Host)
overridingRegistrationEndpoint = args.hubConfig.Host
}
hubKubeToken, err := hubCluster.GenerateHubClusterKubeConfig(ctx, overridingRegistrationEndpoint)
if err != nil {
return errors.Wrap(err, "fail to generate the token for spoke-cluster")
}
spokeCluster, err := spoke.NewSpokeCluster(clusterConfig.ClusterName, spokeRestConf, hubKubeToken)
if err != nil {
return errors.Wrap(err, "fail to connect spoke cluster")
}
err = spokeCluster.InitSpokeClusterEnv(ctx)
if err != nil {
return errors.Wrap(err, "fail to prepare the env for spoke-cluster")
}
spokeTracker.Stop()
registrationOperatorTracker := newTrackingSpinner("Waiting for registration operators running: (`kubectl -n open-cluster-management get pod -l app=klusterlet`)")
registrationOperatorTracker.FinalMSG = "Registration operator successfully deployed.\n"
registrationOperatorTracker.Start()
if err := spokeCluster.WaitForRegistrationOperatorReady(ctx); err != nil {
return errors.Wrap(err, "fail to setup registration operator for spoke-cluster")
}
registrationOperatorTracker.Stop()
registrationAgentTracker := newTrackingSpinner("Waiting for registration agent running: (`kubectl -n open-cluster-management-agent get pod -l app=klusterlet-registration-agent`)")
registrationAgentTracker.FinalMSG = "Registration agent successfully deployed.\n"
registrationAgentTracker.Start()
if err := spokeCluster.WaitForRegistrationAgentReady(ctx); err != nil {
return errors.Wrap(err, "fail to setup registration agent for spoke-cluster")
}
registrationAgentTracker.Stop()
csrCreationTracker := newTrackingSpinner("Waiting for CSRs created (`kubectl get csr -l open-cluster-management.io/cluster-name=" + spokeCluster.Name + "`)")
csrCreationTracker.FinalMSG = "Successfully found corresponding CSR from the agent.\n"
csrCreationTracker.Start()
if err := hubCluster.WaitForCSRCreated(ctx, spokeCluster.Name); err != nil {
return errors.Wrap(err, "failed found CSR created by registration agent")
}
csrCreationTracker.Stop()
args.ioStreams.Infof("Approving the CSR for cluster %q.\n", spokeCluster.Name)
if err := hubCluster.ApproveCSR(ctx, spokeCluster.Name); err != nil {
return errors.Wrap(err, "failed found CSR created by registration agent")
}
ready, err := hubCluster.WaitForSpokeClusterReady(ctx, clusterConfig.ClusterName)
if err != nil || !ready {
return errors.Errorf("fail to waiting for register request")
}
if err = hubCluster.RegisterSpokeCluster(ctx, spokeCluster.Name); err != nil {
return errors.Wrap(err, "fail to approve spoke cluster")
}
return nil
}
// LoadKubeClusterConfigFromFile create KubeClusterConfig from kubeconfig file
func LoadKubeClusterConfigFromFile(filepath string) (*KubeClusterConfig, error) {
clusterConfig := &KubeClusterConfig{}
var err error
clusterConfig.Config, err = clientcmd.LoadFromFile(filepath)
if err != nil {
return nil, errors.Wrapf(err, "failed to get kubeconfig")
}
if len(clusterConfig.Config.CurrentContext) == 0 {
return nil, fmt.Errorf("current-context is not set")
}
var ok bool
ctx, ok := clusterConfig.Config.Contexts[clusterConfig.Config.CurrentContext]
if !ok {
return nil, fmt.Errorf("current-context %s not found", clusterConfig.Config.CurrentContext)
}
clusterConfig.Cluster, ok = clusterConfig.Config.Clusters[ctx.Cluster]
if !ok {
return nil, fmt.Errorf("cluster %s not found", ctx.Cluster)
}
clusterConfig.AuthInfo, ok = clusterConfig.Config.AuthInfos[ctx.AuthInfo]
if !ok {
return nil, fmt.Errorf("authInfo %s not found", ctx.AuthInfo)
}
clusterConfig.ClusterName = ctx.Cluster
if endpoint, err := utils.ParseAPIServerEndpoint(clusterConfig.Cluster.Server); err == nil {
clusterConfig.Cluster.Server = endpoint
} else {
_, _ = fmt.Fprintf(&clusterConfig.Logs, "failed to parse server endpoint: %v", err)
}
return clusterConfig, nil
}
const (
// ClusterGateWayEngine cluster-gateway cluster management solution
ClusterGateWayEngine = "cluster-gateway"
// OCMEngine ocm cluster management solution
OCMEngine = "ocm"
)
// JoinClusterArgs args for join cluster
type JoinClusterArgs struct {
engine string
createNamespace string
ioStreams cmdutil.IOStreams
hubConfig *rest.Config
inClusterBootstrap *bool
trackingSpinnerFactory func(string) *spinner.Spinner
}
func newJoinClusterArgs(options ...JoinClusterOption) *JoinClusterArgs {
args := &JoinClusterArgs{
engine: ClusterGateWayEngine,
}
for _, op := range options {
op.ApplyToArgs(args)
}
return args
}
// JoinClusterOption option for join cluster
type JoinClusterOption interface {
ApplyToArgs(args *JoinClusterArgs)
}
// JoinClusterCreateNamespaceOption create namespace when join cluster, if empty, no creation
type JoinClusterCreateNamespaceOption string
// ApplyToArgs apply to args
func (op JoinClusterCreateNamespaceOption) ApplyToArgs(args *JoinClusterArgs) {
args.createNamespace = string(op)
}
// JoinClusterEngineOption configure engine for join cluster, either cluster-gateway or ocm
type JoinClusterEngineOption string
// ApplyToArgs apply to args
func (op JoinClusterEngineOption) ApplyToArgs(args *JoinClusterArgs) {
args.engine = string(op)
}
// JoinClusterOCMOptions options used when joining clusters by ocm, only support cli for now
type JoinClusterOCMOptions struct {
IoStreams cmdutil.IOStreams
HubConfig *rest.Config
InClusterBootstrap *bool
TrackingSpinnerFactory func(string) *spinner.Spinner
}
// ApplyToArgs apply to args
func (op JoinClusterOCMOptions) ApplyToArgs(args *JoinClusterArgs) {
args.ioStreams = op.IoStreams
args.hubConfig = op.HubConfig
args.inClusterBootstrap = op.InClusterBootstrap
args.trackingSpinnerFactory = op.TrackingSpinnerFactory
}
// JoinClusterByKubeConfig add child cluster by kubeconfig path, return cluster info and error
func JoinClusterByKubeConfig(ctx context.Context, cli client.Client, kubeconfigPath string, clusterName string, options ...JoinClusterOption) (*KubeClusterConfig, error) {
args := newJoinClusterArgs(options...)
clusterConfig, err := LoadKubeClusterConfigFromFile(kubeconfigPath)
if err != nil {
return nil, err
}
if err := clusterConfig.SetClusterName(clusterName).SetCreateNamespace(args.createNamespace).Validate(); err != nil {
return nil, err
}
switch args.engine {
case ClusterGateWayEngine:
if err = clusterConfig.RegisterByVelaSecret(ctx, cli); err != nil {
return nil, err
}
case OCMEngine:
if args.inClusterBootstrap == nil {
return nil, errors.Wrapf(err, "failed to determine the registration endpoint for the hub cluster "+
"when parsing --in-cluster-bootstrap flag")
}
if err = clusterConfig.RegisterClusterManagedByOCM(ctx, args); err != nil {
return clusterConfig, err
}
}
return clusterConfig, nil
}
// DetachClusterArgs args for detaching cluster
type DetachClusterArgs struct {
managedClusterKubeConfigPath string
}
func newDetachClusterArgs(options ...DetachClusterOption) *DetachClusterArgs {
args := &DetachClusterArgs{}
for _, op := range options {
op.ApplyToArgs(args)
}
return args
}
// DetachClusterOption option for detach cluster
type DetachClusterOption interface {
ApplyToArgs(args *DetachClusterArgs)
}
// DetachClusterManagedClusterKubeConfigPathOption configure the managed cluster kubeconfig path while detach ocm cluster
type DetachClusterManagedClusterKubeConfigPathOption string
// ApplyToArgs apply to args
func (op DetachClusterManagedClusterKubeConfigPathOption) ApplyToArgs(args *DetachClusterArgs) {
args.managedClusterKubeConfigPath = string(op)
}
// DetachCluster detach cluster by name, if cluster is using by application, it will return error
func DetachCluster(ctx context.Context, cli client.Client, clusterName string, options ...DetachClusterOption) error {
args := newDetachClusterArgs(options...)
if clusterName == ClusterLocalName {
return ErrReservedLocalClusterName
}
vc, err := GetVirtualCluster(ctx, cli, clusterName)
if err != nil {
return err
}
switch vc.Type {
case clusterv1alpha1.CredentialTypeX509Certificate, clusterv1alpha1.CredentialTypeServiceAccountToken:
clusterSecret, err := getMutableClusterSecret(ctx, cli, clusterName)
if err != nil {
return errors.Wrapf(err, "cluster %s is not mutable now", clusterName)
}
if err := cli.Delete(ctx, clusterSecret); err != nil {
return errors.Wrapf(err, "failed to detach cluster %s", clusterName)
}
case CredentialTypeOCMManagedCluster:
if args.managedClusterKubeConfigPath == "" {
return errors.New("kubeconfig-path must be set to detach ocm managed cluster")
}
config, err := clientcmd.LoadFromFile(args.managedClusterKubeConfigPath)
if err != nil {
return err
}
restConfig, err := clientcmd.BuildConfigFromKubeconfigGetter("", func() (*clientcmdapi.Config, error) {
return config, nil
})
if err != nil {
return err
}
if err = spoke.CleanSpokeClusterEnv(restConfig); err != nil {
return err
}
managedCluster := ocmclusterv1.ManagedCluster{ObjectMeta: metav1.ObjectMeta{Name: clusterName}}
if err = cli.Delete(context.Background(), &managedCluster); err != nil {
if !apierrors.IsNotFound(err) {
return err
}
}
}
return nil
}
// RenameCluster rename cluster
func RenameCluster(ctx context.Context, k8sClient client.Client, oldClusterName string, newClusterName string) error {
if newClusterName == ClusterLocalName {
return ErrReservedLocalClusterName
}
clusterSecret, err := getMutableClusterSecret(ctx, k8sClient, oldClusterName)
if err != nil {
return errors.Wrapf(err, "cluster %s is not mutable now", oldClusterName)
}
if err := ensureClusterNotExists(ctx, k8sClient, newClusterName); err != nil {
return errors.Wrapf(err, "cannot set cluster name to %s", newClusterName)
}
if err := k8sClient.Delete(ctx, clusterSecret); err != nil {
return errors.Wrapf(err, "failed to rename cluster from %s to %s", oldClusterName, newClusterName)
}
clusterSecret.ObjectMeta = metav1.ObjectMeta{
Name: newClusterName,
Namespace: ClusterGatewaySecretNamespace,
Labels: clusterSecret.Labels,
Annotations: clusterSecret.Annotations,
}
if err := k8sClient.Create(ctx, clusterSecret); err != nil {
return errors.Wrapf(err, "failed to rename cluster from %s to %s", oldClusterName, newClusterName)
}
return nil
}
// ensureClusterNotExists will check the cluster is not existed in control plane
func ensureClusterNotExists(ctx context.Context, c client.Client, clusterName string) error {
secret := &v1.Secret{}
err := c.Get(ctx, types2.NamespacedName{Name: clusterName, Namespace: ClusterGatewaySecretNamespace}, secret)
if err == nil {
return ErrClusterExists
_, err := GetVirtualCluster(ctx, c, clusterName)
if err != nil {
if IsClusterNotExists(err) {
return nil
}
return err
}
if !errors2.IsNotFound(err) {
return errors.Wrapf(err, "failed to check duplicate cluster secret")
return ErrClusterExists
}
// ensureNamespaceExists ensures vela namespace to be installed in child cluster
func ensureNamespaceExists(ctx context.Context, c client.Client, clusterName string, createNamespace string) error {
remoteCtx := ContextWithClusterName(ctx, clusterName)
if err := c.Get(remoteCtx, apitypes.NamespacedName{Name: createNamespace}, &corev1.Namespace{}); err != nil {
if !apierrors.IsNotFound(err) {
return errors.Wrapf(err, "failed to check if namespace %s exists", createNamespace)
}
if err = c.Create(remoteCtx, &corev1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: createNamespace}}); err != nil {
return errors.Wrapf(err, "failed to create namespace %s", createNamespace)
}
}
return nil
}
// GetMutableClusterSecret retrieves the cluster secret and check if any application is using the cluster
func GetMutableClusterSecret(ctx context.Context, c client.Client, clusterName string) (*v1.Secret, error) {
clusterSecret := &v1.Secret{}
if err := c.Get(ctx, types2.NamespacedName{Namespace: ClusterGatewaySecretNamespace, Name: clusterName}, clusterSecret); err != nil {
// getMutableClusterSecret retrieves the cluster secret and check if any application is using the cluster
// TODO(somefive): should rework the logic of checking application cluster usage
func getMutableClusterSecret(ctx context.Context, c client.Client, clusterName string) (*corev1.Secret, error) {
clusterSecret := &corev1.Secret{}
if err := c.Get(ctx, apitypes.NamespacedName{Namespace: ClusterGatewaySecretNamespace, Name: clusterName}, clusterSecret); err != nil {
return nil, errors.Wrapf(err, "failed to find target cluster secret %s", clusterName)
}
labels := clusterSecret.GetLabels()
if labels == nil || labels[v1alpha12.LabelKeyClusterCredentialType] == "" {
return nil, fmt.Errorf("invalid cluster secret %s: cluster credential type label %s is not set", clusterName, v1alpha12.LabelKeyClusterCredentialType)
if labels == nil || labels[clusterv1alpha1.LabelKeyClusterCredentialType] == "" {
return nil, fmt.Errorf("invalid cluster secret %s: cluster credential type label %s is not set", clusterName, clusterv1alpha1.LabelKeyClusterCredentialType)
}
apps := &v1beta1.ApplicationList{}
if err := c.List(ctx, apps); err != nil {
return nil, errors.Wrap(err, "failed to find applications to check clusters")
}
errs := errors3.ErrorList{}
errs := velaerrors.ErrorList{}
for _, app := range apps.Items {
status, err := envbinding.GetEnvBindingPolicyStatus(app.DeepCopy(), "")
if err == nil && status != nil {
@@ -97,167 +507,3 @@ func GetMutableClusterSecret(ctx context.Context, c client.Client, clusterName s
}
return clusterSecret, nil
}
// JoinClusterByKubeConfig add child cluster by kubeconfig path, return cluster info and error
func JoinClusterByKubeConfig(_ctx context.Context, k8sClient client.Client, kubeconfigPath string, clusterName string) (*api.Cluster, error) {
config, err := clientcmd.LoadFromFile(kubeconfigPath)
if err != nil {
return nil, errors.Wrapf(err, "failed to get kubeconfig")
}
if len(config.CurrentContext) == 0 {
return nil, fmt.Errorf("current-context is not set")
}
ctx, ok := config.Contexts[config.CurrentContext]
if !ok {
return nil, fmt.Errorf("current-context %s not found", config.CurrentContext)
}
cluster, ok := config.Clusters[ctx.Cluster]
if !ok {
return nil, fmt.Errorf("cluster %s not found", ctx.Cluster)
}
authInfo, ok := config.AuthInfos[ctx.AuthInfo]
if !ok {
return nil, fmt.Errorf("authInfo %s not found", ctx.AuthInfo)
}
if clusterName == "" {
clusterName = ctx.Cluster
}
if clusterName == ClusterLocalName {
return cluster, fmt.Errorf("cannot use `%s` as cluster name, it is reserved as the local cluster", ClusterLocalName)
}
if err := ensureClusterNotExists(_ctx, k8sClient, clusterName); err != nil {
return cluster, errors.Wrapf(err, "cannot use cluster name %s", clusterName)
}
var credentialType v1alpha12.CredentialType
data := map[string][]byte{
"endpoint": []byte(cluster.Server),
"ca.crt": cluster.CertificateAuthorityData,
}
if len(authInfo.Token) > 0 {
credentialType = v1alpha12.CredentialTypeServiceAccountToken
data["token"] = []byte(authInfo.Token)
} else {
credentialType = v1alpha12.CredentialTypeX509Certificate
data["tls.crt"] = authInfo.ClientCertificateData
data["tls.key"] = authInfo.ClientKeyData
}
secret := &v1.Secret{
ObjectMeta: v12.ObjectMeta{
Name: clusterName,
Namespace: ClusterGatewaySecretNamespace,
Labels: map[string]string{
v1alpha12.LabelKeyClusterCredentialType: string(credentialType),
},
},
Type: v1.SecretTypeOpaque,
Data: data,
}
if err := k8sClient.Create(_ctx, secret); err != nil {
return cluster, errors.Wrapf(err, "failed to add cluster to kubernetes")
}
if err := ensureVelaSystemNamespaceInstalled(_ctx, k8sClient, clusterName, types.DefaultKubeVelaNS); err != nil {
return nil, errors.Wrapf(err, "failed to create vela namespace in cluster %s", clusterName)
}
return cluster, nil
}
// DetachCluster detach cluster by name, if cluster is using by application, it will return error
func DetachCluster(ctx context.Context, k8sClient client.Client, clusterName string) error {
if clusterName == ClusterLocalName {
return ErrReservedLocalClusterName
}
clusterSecret, err := GetMutableClusterSecret(ctx, k8sClient, clusterName)
if err != nil {
return errors.Wrapf(err, "cluster %s is not mutable now", clusterName)
}
return k8sClient.Delete(ctx, clusterSecret)
}
// RenameCluster rename cluster
func RenameCluster(ctx context.Context, k8sClient client.Client, oldClusterName string, newClusterName string) error {
if newClusterName == ClusterLocalName {
return ErrReservedLocalClusterName
}
clusterSecret, err := GetMutableClusterSecret(ctx, k8sClient, oldClusterName)
if err != nil {
return errors.Wrapf(err, "cluster %s is not mutable now", oldClusterName)
}
if err := ensureClusterNotExists(ctx, k8sClient, newClusterName); err != nil {
return errors.Wrapf(err, "cannot set cluster name to %s", newClusterName)
}
if err := k8sClient.Delete(ctx, clusterSecret); err != nil {
return errors.Wrapf(err, "failed to rename cluster from %s to %s", oldClusterName, newClusterName)
}
clusterSecret.ObjectMeta = v12.ObjectMeta{
Name: newClusterName,
Namespace: ClusterGatewaySecretNamespace,
Labels: clusterSecret.Labels,
Annotations: clusterSecret.Annotations,
}
if err := k8sClient.Create(ctx, clusterSecret); err != nil {
return errors.Wrapf(err, "failed to rename cluster from %s to %s", oldClusterName, newClusterName)
}
return nil
}
// ClusterInfo describes the basic information of a cluster
type ClusterInfo struct {
Nodes *v1.NodeList
WorkerNumber int
MasterNumber int
MemoryCapacity resource.Quantity
CPUCapacity resource.Quantity
PodCapacity resource.Quantity
MemoryAllocatable resource.Quantity
CPUAllocatable resource.Quantity
PodAllocatable resource.Quantity
StorageClasses *v14.StorageClassList
}
// GetClusterInfo retrieves current cluster info from cluster
func GetClusterInfo(_ctx context.Context, k8sClient client.Client, clusterName string) (*ClusterInfo, error) {
ctx := ContextWithClusterName(_ctx, clusterName)
nodes := &v1.NodeList{}
if err := k8sClient.List(ctx, nodes); err != nil {
return nil, errors.Wrapf(err, "failed to list cluster nodes")
}
var workerNumber, masterNumber int
var memoryCapacity, cpuCapacity, podCapacity, memoryAllocatable, cpuAllocatable, podAllcatable resource.Quantity
for _, node := range nodes.Items {
if _, ok := node.Labels["node-role.kubernetes.io/master"]; ok {
masterNumber++
} else {
workerNumber++
}
capacity := node.Status.Capacity
memoryCapacity.Add(*capacity.Memory())
cpuCapacity.Add(*capacity.Cpu())
podCapacity.Add(*capacity.Pods())
allocatable := node.Status.Allocatable
memoryAllocatable.Add(*allocatable.Memory())
cpuAllocatable.Add(*allocatable.Cpu())
podAllcatable.Add(*allocatable.Pods())
}
storageClasses := &v14.StorageClassList{}
if err := k8sClient.List(ctx, storageClasses); err != nil {
return nil, errors.Wrapf(err, "failed to list storage classes")
}
return &ClusterInfo{
Nodes: nodes,
WorkerNumber: workerNumber,
MasterNumber: masterNumber,
MemoryCapacity: memoryCapacity,
CPUCapacity: cpuCapacity,
PodCapacity: podCapacity,
MemoryAllocatable: memoryAllocatable,
CPUAllocatable: cpuAllocatable,
PodAllocatable: podAllcatable,
StorageClasses: storageClasses,
}, nil
}

View File

@@ -26,6 +26,8 @@ import (
var (
// ErrClusterExists cluster already exists
ErrClusterExists = ClusterManagementError(fmt.Errorf("cluster already exists"))
// ErrClusterNotExists cluster not exists
ErrClusterNotExists = ClusterManagementError(fmt.Errorf("no such cluster"))
// ErrReservedLocalClusterName reserved cluster name is used
ErrReservedLocalClusterName = ClusterManagementError(fmt.Errorf("cluster name `local` is reserved for kubevela hub cluster"))
)

83
pkg/multicluster/o11n.go Normal file
View File

@@ -0,0 +1,83 @@
/*
Copyright 2020-2022 The KubeVela Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package multicluster
import (
"context"
"github.com/pkg/errors"
corev1 "k8s.io/api/core/v1"
storagev1 "k8s.io/api/storage/v1"
"k8s.io/apimachinery/pkg/api/resource"
"sigs.k8s.io/controller-runtime/pkg/client"
)
// ClusterInfo describes the basic information of a cluster
type ClusterInfo struct {
Nodes *corev1.NodeList
WorkerNumber int
MasterNumber int
MemoryCapacity resource.Quantity
CPUCapacity resource.Quantity
PodCapacity resource.Quantity
MemoryAllocatable resource.Quantity
CPUAllocatable resource.Quantity
PodAllocatable resource.Quantity
StorageClasses *storagev1.StorageClassList
}
// GetClusterInfo retrieves current cluster info from cluster
func GetClusterInfo(_ctx context.Context, k8sClient client.Client, clusterName string) (*ClusterInfo, error) {
ctx := ContextWithClusterName(_ctx, clusterName)
nodes := &corev1.NodeList{}
if err := k8sClient.List(ctx, nodes); err != nil {
return nil, errors.Wrapf(err, "failed to list cluster nodes")
}
var workerNumber, masterNumber int
var memoryCapacity, cpuCapacity, podCapacity, memoryAllocatable, cpuAllocatable, podAllocatable resource.Quantity
for _, node := range nodes.Items {
if _, ok := node.Labels["node-role.kubernetes.io/master"]; ok {
masterNumber++
} else {
workerNumber++
}
capacity := node.Status.Capacity
memoryCapacity.Add(*capacity.Memory())
cpuCapacity.Add(*capacity.Cpu())
podCapacity.Add(*capacity.Pods())
allocatable := node.Status.Allocatable
memoryAllocatable.Add(*allocatable.Memory())
cpuAllocatable.Add(*allocatable.Cpu())
podAllocatable.Add(*allocatable.Pods())
}
storageClasses := &storagev1.StorageClassList{}
if err := k8sClient.List(ctx, storageClasses); err != nil {
return nil, errors.Wrapf(err, "failed to list storage classes")
}
return &ClusterInfo{
Nodes: nodes,
WorkerNumber: workerNumber,
MasterNumber: masterNumber,
MemoryCapacity: memoryCapacity,
CPUCapacity: cpuCapacity,
PodCapacity: podCapacity,
MemoryAllocatable: memoryAllocatable,
CPUAllocatable: cpuAllocatable,
PodAllocatable: podAllocatable,
StorageClasses: storageClasses,
}, nil
}

View File

@@ -0,0 +1,74 @@
/*
Copyright 2020-2022 The KubeVela Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package multicluster
import (
"math/rand"
"testing"
"time"
"github.com/oam-dev/cluster-gateway/pkg/apis/cluster/v1alpha1"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"k8s.io/client-go/rest"
"k8s.io/utils/pointer"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/envtest"
"github.com/oam-dev/kubevela/pkg/utils/common"
)
var cfg *rest.Config
var k8sClient client.Client
var testEnv *envtest.Environment
func TestUtils(t *testing.T) {
RegisterFailHandler(Fail)
RunSpecs(t, "Utils Suite")
}
var _ = BeforeSuite(func(done Done) {
rand.Seed(time.Now().UnixNano())
By("bootstrapping test environment for utils test")
testEnv = &envtest.Environment{
ControlPlaneStartTimeout: time.Minute * 3,
ControlPlaneStopTimeout: time.Minute,
UseExistingCluster: pointer.BoolPtr(false),
CRDDirectoryPaths: []string{"./testdata"},
}
By("start kube test env")
var err error
cfg, err = testEnv.Start()
Expect(err).ShouldNot(HaveOccurred())
Expect(cfg).ToNot(BeNil())
By("new kube client")
cfg.Timeout = time.Minute * 2
Expect(v1alpha1.AddToScheme(common.Scheme)).Should(Succeed())
k8sClient, err = client.New(cfg, client.Options{Scheme: common.Scheme})
Expect(err).Should(BeNil())
Expect(k8sClient).ToNot(BeNil())
close(done)
}, 240)
var _ = AfterSuite(func() {
By("tearing down the test environment")
err := testEnv.Stop()
Expect(err).ToNot(HaveOccurred())
})

View File

@@ -0,0 +1,204 @@
apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
name: managedclusters.cluster.open-cluster-management.io
spec:
group: cluster.open-cluster-management.io
names:
kind: ManagedCluster
listKind: ManagedClusterList
plural: managedclusters
shortNames:
- mcl
- mcls
singular: managedcluster
scope: Cluster
preserveUnknownFields: false
versions:
- additionalPrinterColumns:
- jsonPath: .spec.hubAcceptsClient
name: Hub Accepted
type: boolean
- jsonPath: .spec.managedClusterClientConfigs[*].url
name: Managed Cluster URLs
type: string
- jsonPath: .status.conditions[?(@.type=="ManagedClusterJoined")].status
name: Joined
type: string
- jsonPath: .status.conditions[?(@.type=="ManagedClusterConditionAvailable")].status
name: Available
type: string
- jsonPath: .metadata.creationTimestamp
name: Age
type: date
name: v1
schema:
openAPIV3Schema:
description: "ManagedCluster represents the desired state and current status of managed cluster. ManagedCluster is a cluster scoped resource. The name is the cluster UID. \n The cluster join process follows a double opt-in process: \n 1. Agent on managed cluster creates CSR on hub with cluster UID and agent name. 2. Agent on managed cluster creates ManagedCluster on hub. 3. Cluster admin on hub approves the CSR for UID and agent name of the ManagedCluster. 4. Cluster admin sets spec.acceptClient of ManagedCluster to true. 5. Cluster admin on managed cluster creates credential of kubeconfig to hub. \n Once the hub creates the cluster namespace, the Klusterlet agent on the ManagedCluster pushes the credential to the hub to use against the kube-apiserver of the ManagedCluster."
type: object
properties:
apiVersion:
description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
type: string
kind:
description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
type: string
metadata:
type: object
spec:
description: Spec represents a desired configuration for the agent on the managed cluster.
type: object
properties:
hubAcceptsClient:
description: hubAcceptsClient represents that hub accepts the joining of Klusterlet agent on the managed cluster with the hub. The default value is false, and can only be set true when the user on hub has an RBAC rule to UPDATE on the virtual subresource of managedclusters/accept. When the value is set true, a namespace whose name is the same as the name of ManagedCluster is created on the hub. This namespace represents the managed cluster, also role/rolebinding is created on the namespace to grant the permision of access from the agent on the managed cluster. When the value is set to false, the namespace representing the managed cluster is deleted.
type: boolean
leaseDurationSeconds:
description: LeaseDurationSeconds is used to coordinate the lease update time of Klusterlet agents on the managed cluster. If its value is zero, the Klusterlet agent will update its lease every 60 seconds by default
type: integer
format: int32
default: 60
managedClusterClientConfigs:
description: ManagedClusterClientConfigs represents a list of the apiserver address of the managed cluster. If it is empty, the managed cluster has no accessible address for the hub to connect with it.
type: array
items:
description: ClientConfig represents the apiserver address of the managed cluster. TODO include credential to connect to managed cluster kube-apiserver
type: object
properties:
caBundle:
description: CABundle is the ca bundle to connect to apiserver of the managed cluster. System certs are used if it is not set.
type: string
format: byte
url:
description: URL is the URL of apiserver endpoint of the managed cluster.
type: string
taints:
description: Taints is a property of managed cluster that allow the cluster to be repelled when scheduling. Taints, including 'ManagedClusterUnavailable' and 'ManagedClusterUnreachable', can not be added/removed by agent running on the managed cluster; while it's fine to add/remove other taints from either hub cluser or managed cluster.
type: array
items:
description: The managed cluster this Taint is attached to has the "effect" on any placement that does not tolerate the Taint.
type: object
required:
- effect
- key
properties:
effect:
description: Effect indicates the effect of the taint on placements that do not tolerate the taint. Valid effects are NoSelect, PreferNoSelect and NoSelectIfNew.
type: string
enum:
- NoSelect
- PreferNoSelect
- NoSelectIfNew
key:
description: Key is the taint key applied to a cluster. e.g. bar or foo.example.com/bar. The regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt)
type: string
maxLength: 316
pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$
timeAdded:
description: TimeAdded represents the time at which the taint was added.
type: string
format: date-time
nullable: true
value:
description: Value is the taint value corresponding to the taint key.
type: string
maxLength: 1024
status:
description: Status represents the current status of joined managed cluster
type: object
properties:
allocatable:
description: Allocatable represents the total allocatable resources on the managed cluster.
type: object
additionalProperties:
pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
anyOf:
- type: integer
- type: string
x-kubernetes-int-or-string: true
capacity:
description: Capacity represents the total resource capacity from all nodeStatuses on the managed cluster.
type: object
additionalProperties:
pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
anyOf:
- type: integer
- type: string
x-kubernetes-int-or-string: true
clusterClaims:
description: ClusterClaims represents cluster information that a managed cluster claims, for example a unique cluster identifier (id.k8s.io) and kubernetes version (kubeversion.open-cluster-management.io). They are written from the managed cluster. The set of claims is not uniform across a fleet, some claims can be vendor or version specific and may not be included from all managed clusters.
type: array
items:
description: ManagedClusterClaim represents a ClusterClaim collected from a managed cluster.
type: object
properties:
name:
description: Name is the name of a ClusterClaim resource on managed cluster. It's a well known or customized name to identify the claim.
type: string
maxLength: 253
minLength: 1
value:
description: Value is a claim-dependent string
type: string
maxLength: 1024
minLength: 1
conditions:
description: Conditions contains the different condition statuses for this managed cluster.
type: array
items:
description: "Condition contains details for one aspect of the current state of this API Resource. --- This struct is intended for direct use as an array at the field path .status.conditions. For example, type FooStatus struct{ // Represents the observations of a foo's current state. // Known .status.conditions.type are: \"Available\", \"Progressing\", and \"Degraded\" // +patchMergeKey=type // +patchStrategy=merge // +listType=map // +listMapKey=type Conditions []metav1.Condition `json:\"conditions,omitempty\" patchStrategy:\"merge\" patchMergeKey:\"type\" protobuf:\"bytes,1,rep,name=conditions\"` \n // other fields }"
type: object
required:
- lastTransitionTime
- message
- reason
- status
- type
properties:
lastTransitionTime:
description: lastTransitionTime is the last time the condition transitioned from one status to another. This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable.
type: string
format: date-time
message:
description: message is a human readable message indicating details about the transition. This may be an empty string.
type: string
maxLength: 32768
observedGeneration:
description: observedGeneration represents the .metadata.generation that the condition was set based upon. For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date with respect to the current state of the instance.
type: integer
format: int64
minimum: 0
reason:
description: reason contains a programmatic identifier indicating the reason for the condition's last transition. Producers of specific condition types may define expected values and meanings for this field, and whether the values are considered a guaranteed API. The value should be a CamelCase string. This field may not be empty.
type: string
maxLength: 1024
minLength: 1
pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$
status:
description: status of the condition, one of True, False, Unknown.
type: string
enum:
- "True"
- "False"
- Unknown
type:
description: type of condition in CamelCase or in foo.example.com/CamelCase. --- Many .condition.type values are consistent across resources like Available, but because arbitrary conditions can be useful (see .node.status.conditions), the ability to deconflict is important. The regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt)
type: string
maxLength: 316
pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$
version:
description: Version represents the kubernetes version of the managed cluster.
type: object
properties:
kubernetes:
description: Kubernetes is the kubernetes version of managed cluster.
type: string
served: true
storage: true
subresources:
status: {}
status:
acceptedNames:
kind: ""
plural: ""
conditions: []
storedVersions: []

View File

@@ -0,0 +1,186 @@
/*
Copyright 2020-2022 The KubeVela Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package multicluster
import (
"context"
"github.com/pkg/errors"
corev1 "k8s.io/api/core/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
apilabels "k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/selection"
apitypes "k8s.io/apimachinery/pkg/types"
clusterv1 "open-cluster-management.io/api/cluster/v1"
"sigs.k8s.io/controller-runtime/pkg/client"
"github.com/oam-dev/cluster-gateway/pkg/apis/cluster/v1alpha1"
velaerrors "github.com/oam-dev/kubevela/pkg/utils/errors"
)
const (
// CredentialTypeOCMManagedCluster identifies the virtual cluster from ocm
CredentialTypeOCMManagedCluster v1alpha1.CredentialType = "ManagedCluster"
)
// VirtualCluster contains base info of cluster, it unifies the difference between different cluster implementations
// like cluster secret or ocm managed cluster
type VirtualCluster struct {
Name string
Type v1alpha1.CredentialType
EndPoint string
Accepted bool
Labels map[string]string
}
// NewVirtualClusterFromSecret extract virtual cluster from cluster secret
func NewVirtualClusterFromSecret(secret *corev1.Secret) (*VirtualCluster, error) {
endpoint := string(secret.Data["endpoint"])
labels := secret.GetLabels()
if labels == nil {
labels = map[string]string{}
}
if _endpoint, ok := labels[v1alpha1.LabelKeyClusterEndpointType]; ok {
endpoint = _endpoint
}
credType, ok := labels[v1alpha1.LabelKeyClusterCredentialType]
if !ok {
return nil, errors.Errorf("secret is not a valid cluster secret, no credential type found")
}
return &VirtualCluster{
Name: secret.Name,
Type: v1alpha1.CredentialType(credType),
EndPoint: endpoint,
Accepted: true,
Labels: labels,
}, nil
}
// NewVirtualClusterFromManagedCluster extract virtual cluster from ocm managed cluster
func NewVirtualClusterFromManagedCluster(managedCluster *clusterv1.ManagedCluster) (*VirtualCluster, error) {
if len(managedCluster.Spec.ManagedClusterClientConfigs) == 0 {
return nil, errors.Errorf("managed cluster has no client config")
}
return &VirtualCluster{
Name: managedCluster.Name,
Type: CredentialTypeOCMManagedCluster,
EndPoint: "-",
Accepted: managedCluster.Spec.HubAcceptsClient,
Labels: managedCluster.GetLabels(),
}, nil
}
// GetVirtualCluster returns virtual cluster with given clusterName
func GetVirtualCluster(ctx context.Context, c client.Client, clusterName string) (vc *VirtualCluster, err error) {
secret := &corev1.Secret{}
err = c.Get(ctx, apitypes.NamespacedName{
Name: clusterName,
Namespace: ClusterGatewaySecretNamespace,
}, secret)
var secretErr error
if err == nil {
vc, secretErr = NewVirtualClusterFromSecret(secret)
if secretErr == nil {
return vc, nil
}
}
if err != nil && !apierrors.IsNotFound(err) {
secretErr = err
}
managedCluster := &clusterv1.ManagedCluster{}
err = c.Get(ctx, apitypes.NamespacedName{
Name: clusterName,
Namespace: ClusterGatewaySecretNamespace,
}, managedCluster)
var managedClusterErr error
if err == nil {
vc, managedClusterErr = NewVirtualClusterFromManagedCluster(managedCluster)
if managedClusterErr == nil {
return vc, nil
}
}
if err != nil && !apierrors.IsNotFound(err) && !velaerrors.IsCRDNotExists(err) {
managedClusterErr = err
}
if secretErr == nil && managedClusterErr == nil {
return nil, ErrClusterNotExists
}
var errs velaerrors.ErrorList
if secretErr != nil {
errs = append(errs, secretErr)
}
if managedClusterErr != nil {
errs = append(errs, managedClusterErr)
}
return nil, errs
}
// MatchVirtualClusterLabels filters the list/delete operation of cluster list
type MatchVirtualClusterLabels map[string]string
// ApplyToList applies this configuration to the given list options.
func (m MatchVirtualClusterLabels) ApplyToList(opts *client.ListOptions) {
sel := apilabels.SelectorFromValidatedSet(map[string]string(m))
r, err := apilabels.NewRequirement(v1alpha1.LabelKeyClusterCredentialType, selection.Exists, nil)
if err == nil {
sel = sel.Add(*r)
}
opts.LabelSelector = sel
opts.Namespace = ClusterGatewaySecretNamespace
}
// ApplyToDeleteAllOf applies this configuration to the given a List options.
func (m MatchVirtualClusterLabels) ApplyToDeleteAllOf(opts *client.DeleteAllOfOptions) {
m.ApplyToList(&opts.ListOptions)
}
// ListVirtualClusters will get all registered clusters in control plane
func ListVirtualClusters(ctx context.Context, c client.Client) ([]VirtualCluster, error) {
return FindVirtualClustersByLabels(ctx, c, map[string]string{})
}
// FindVirtualClustersByLabels will get all virtual clusters with matched labels in control plane
func FindVirtualClustersByLabels(ctx context.Context, c client.Client, labels map[string]string) ([]VirtualCluster, error) {
var clusters []VirtualCluster
secrets := corev1.SecretList{}
if err := c.List(ctx, &secrets, MatchVirtualClusterLabels(labels)); err != nil {
return nil, errors.Wrapf(err, "failed to get clusterSecret secrets")
}
for _, secret := range secrets.Items {
vc, err := NewVirtualClusterFromSecret(secret.DeepCopy())
if err == nil {
clusters = append(clusters, *vc)
}
}
managedClusters := clusterv1.ManagedClusterList{}
if err := c.List(context.Background(), &managedClusters, client.MatchingLabels(labels)); err != nil && !velaerrors.IsCRDNotExists(err) {
return nil, errors.Wrapf(err, "failed to get managed clusters")
}
for _, managedCluster := range managedClusters.Items {
vc, err := NewVirtualClusterFromManagedCluster(managedCluster.DeepCopy())
if err == nil {
clusters = append(clusters, *vc)
}
}
return clusters, nil
}

View File

@@ -0,0 +1,118 @@
/*
Copyright 2020-2022 The KubeVela Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package multicluster
import (
"context"
"github.com/oam-dev/cluster-gateway/pkg/apis/cluster/v1alpha1"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
clusterv1 "open-cluster-management.io/api/cluster/v1"
)
var _ = Describe("Test Virtual Cluster", func() {
It("Test Virtual Cluster", func() {
ClusterGatewaySecretNamespace = "vela-system"
ctx := context.Background()
Expect(k8sClient.Create(ctx, &v1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: ClusterGatewaySecretNamespace}})).Should(Succeed())
By("Initialize Secrets")
Expect(k8sClient.Create(ctx, &v1.Secret{
ObjectMeta: metav1.ObjectMeta{
Name: "test-cluster",
Namespace: ClusterGatewaySecretNamespace,
Labels: map[string]string{
v1alpha1.LabelKeyClusterCredentialType: string(v1alpha1.CredentialTypeX509Certificate),
v1alpha1.LabelKeyClusterEndpointType: v1alpha1.ClusterEndpointTypeConst,
"key": "value",
},
},
})).Should(Succeed())
Expect(k8sClient.Create(ctx, &v1.Secret{
ObjectMeta: metav1.ObjectMeta{
Name: "cluster-no-label",
Namespace: ClusterGatewaySecretNamespace,
Labels: map[string]string{
v1alpha1.LabelKeyClusterCredentialType: string(v1alpha1.CredentialTypeX509Certificate),
},
},
})).Should(Succeed())
Expect(k8sClient.Create(ctx, &v1.Secret{
ObjectMeta: metav1.ObjectMeta{
Name: "cluster-invalid",
Namespace: ClusterGatewaySecretNamespace,
},
})).Should(Succeed())
By("Test Get Virtual Cluster From Cluster Secret")
vc, err := GetVirtualCluster(ctx, k8sClient, "test-cluster")
Expect(err).Should(Succeed())
Expect(vc.Type).Should(Equal(v1alpha1.CredentialTypeX509Certificate))
Expect(vc.Labels["key"]).Should(Equal("value"))
_, err = GetVirtualCluster(ctx, k8sClient, "cluster-not-found")
Expect(err).ShouldNot(Succeed())
Expect(err.Error()).Should(ContainSubstring("no such cluster"))
_, err = GetVirtualCluster(ctx, k8sClient, "cluster-invalid")
Expect(err).ShouldNot(Succeed())
Expect(err.Error()).Should(ContainSubstring("not a valid cluster"))
By("Add OCM ManagedCluster")
Expect(k8sClient.Create(ctx, &clusterv1.ManagedCluster{
ObjectMeta: metav1.ObjectMeta{
Name: "ocm-bad-cluster",
Namespace: ClusterGatewaySecretNamespace,
},
})).Should(Succeed())
Expect(k8sClient.Create(ctx, &clusterv1.ManagedCluster{
ObjectMeta: metav1.ObjectMeta{
Name: "ocm-cluster",
Namespace: ClusterGatewaySecretNamespace,
Labels: map[string]string{"key": "value"},
},
Spec: clusterv1.ManagedClusterSpec{
ManagedClusterClientConfigs: []clusterv1.ClientConfig{{URL: "test-url"}},
},
})).Should(Succeed())
By("Test Get Virtual Cluster From OCM")
_, err = GetVirtualCluster(ctx, k8sClient, "ocm-bad-cluster")
Expect(err).ShouldNot(Succeed())
Expect(err.Error()).Should(ContainSubstring("has no client config"))
vc, err = GetVirtualCluster(ctx, k8sClient, "ocm-cluster")
Expect(err).Should(Succeed())
Expect(vc.Type).Should(Equal(CredentialTypeOCMManagedCluster))
By("Test List Virtual Clusters")
vcs, err := ListVirtualClusters(ctx, k8sClient)
Expect(err).Should(Succeed())
Expect(len(vcs)).Should(Equal(3))
vcs, err = FindVirtualClustersByLabels(ctx, k8sClient, map[string]string{"key": "value"})
Expect(err).Should(Succeed())
Expect(len(vcs)).Should(Equal(2))
})
})

View File

@@ -182,4 +182,7 @@ const (
// AnnotationWorkloadName indicates the managed workload's name by trait
AnnotationWorkloadName = "trait.oam.dev/workload-name"
// AnnotationControllerRequirement indicates the controller version that can process the application.
AnnotationControllerRequirement = "app.oam.dev/controller-version-require"
)

View File

@@ -591,10 +591,9 @@ func TestConvertWorkloadGVK2Def(t *testing.T) {
Version: "v1",
}, ref)
ref, err = util.ConvertWorkloadGVK2Definition(mapper, common.WorkloadGVK{APIVersion: "/apps/v1",
_, err = util.ConvertWorkloadGVK2Definition(mapper, common.WorkloadGVK{APIVersion: "/apps/v1",
Kind: "Deployment"})
assert.Error(t, err)
}
func TestGenTraitName(t *testing.T) {

View File

@@ -50,7 +50,8 @@ type Applicator interface {
}
type applyAction struct {
skipUpdate bool
skipUpdate bool
updateAnnotation bool
}
// ApplyOption is called before applying state to the object.
@@ -80,13 +81,13 @@ func (fn creatorFn) createOrGetExisting(ctx context.Context, act *applyAction, c
}
type patcher interface {
patch(c, m client.Object) (client.Patch, error)
patch(c, m client.Object, a *applyAction) (client.Patch, error)
}
type patcherFn func(c, m client.Object) (client.Patch, error)
type patcherFn func(c, m client.Object, a *applyAction) (client.Patch, error)
func (fn patcherFn) patch(c, m client.Object) (client.Patch, error) {
return fn(c, m)
func (fn patcherFn) patch(c, m client.Object, a *applyAction) (client.Patch, error) {
return fn(c, m, a)
}
// APIApplicator implements Applicator
@@ -112,7 +113,7 @@ func (a *APIApplicator) Apply(ctx context.Context, desired client.Object, ao ...
if err != nil {
return err
}
applyAct := new(applyAction)
applyAct := &applyAction{updateAnnotation: true}
existing, err := a.createOrGetExisting(ctx, applyAct, a.c, desired, ao...)
if err != nil {
return err
@@ -132,7 +133,7 @@ func (a *APIApplicator) Apply(ctx context.Context, desired client.Object, ao ...
}
loggingApply("patching object", desired)
patch, err := a.patcher.patch(existing, desired)
patch, err := a.patcher.patch(existing, desired, applyAct)
if err != nil {
return errors.Wrap(err, "cannot calculate patch by computing a three way diff")
}
@@ -169,8 +170,10 @@ func createOrGetExisting(ctx context.Context, act *applyAction, c client.Client,
if err := executeApplyOptions(act, nil, desired, ao); err != nil {
return nil, err
}
if err := addLastAppliedConfigAnnotation(desired); err != nil {
return nil, err
if act.updateAnnotation {
if err := addLastAppliedConfigAnnotation(desired); err != nil {
return nil, err
}
}
loggingApply("creating object", desired)
return nil, errors.Wrap(c.Create(ctx, desired), "cannot create object")
@@ -275,3 +278,11 @@ func MakeCustomApplyOption(f func(existing, desired client.Object) error) ApplyO
return f(existing, desired)
}
}
// DisableUpdateAnnotation disable write last config to annotation
func DisableUpdateAnnotation() ApplyOption {
return func(a *applyAction, existing, _ client.Object) error {
a.updateAnnotation = false
return nil
}
}

View File

@@ -28,6 +28,7 @@ import (
"k8s.io/apimachinery/pkg/types"
"k8s.io/utils/pointer"
"github.com/oam-dev/kubevela/pkg/oam"
oamutil "github.com/oam-dev/kubevela/pkg/oam/util"
)
@@ -84,6 +85,22 @@ var _ = Describe("Test apply", func() {
By("Unsetted fields shoulde be removed or set to default value")
Expect(*resultDeploy.Spec.Replicas).Should(Equal(int32(1)))
Expect(len(resultDeploy.Spec.Template.Spec.Volumes)).Should(Equal(0))
deployUpdate := basicTestDeployment()
deployUpdate.Name = deploy.Name + "-no-update"
Expect(k8sApplicator.Apply(ctx, deployUpdate, DisableUpdateAnnotation())).Should(Succeed())
Expect(len(deployUpdate.Annotations[oam.AnnotationLastAppliedConfig])).Should(Equal(0))
deployUpdate = basicTestDeployment()
deployUpdate.Spec.Replicas = &int32_3
deployUpdate.Spec.Template.Spec.Volumes = []corev1.Volume{{Name: "test"}}
Expect(k8sApplicator.Apply(ctx, deployUpdate)).Should(Succeed())
resultDeploy = basicTestDeployment()
resultDeploy.Name = deploy.Name + "-no-update"
Expect(rawClient.Get(ctx, deployKey, resultDeploy)).Should(Succeed())
Expect(*resultDeploy.Spec.Replicas).Should(Equal(int32_3))
Expect(len(resultDeploy.Spec.Template.Spec.Volumes)).Should(Equal(1))
Expect(rawClient.Delete(ctx, deployUpdate)).Should(SatisfyAny(Succeed(), &oamutil.NotFoundMatcher{}))
})
It("Test multiple appliers", func() {

View File

@@ -140,7 +140,7 @@ func TestAPIApplicator(t *testing.T) {
creator: creatorFn(func(_ context.Context, _ *applyAction, _ client.Client, _ client.Object, _ ...ApplyOption) (client.Object, error) {
return tc.args.existing, tc.args.creatorErr
}),
patcher: patcherFn(func(c, m client.Object) (client.Patch, error) {
patcher: patcherFn(func(c, m client.Object, a *applyAction) (client.Patch, error) {
return nil, tc.args.patcherErr
}),
c: tc.c,

View File

@@ -42,7 +42,7 @@ func init() {
// threeWayMergePatch creates a patch by computing a three way diff based on
// its current state, modified state, and last-applied-state recorded in the
// annotation.
func threeWayMergePatch(currentObj, modifiedObj client.Object) (client.Patch, error) {
func threeWayMergePatch(currentObj, modifiedObj client.Object, a *applyAction) (client.Patch, error) {
current, err := json.Marshal(currentObj)
if err != nil {
return nil, err
@@ -51,7 +51,7 @@ func threeWayMergePatch(currentObj, modifiedObj client.Object) (client.Patch, er
if err != nil {
return nil, err
}
modified, err := getModifiedConfiguration(modifiedObj, true)
modified, err := getModifiedConfiguration(modifiedObj, a.updateAnnotation)
if err != nil {
return nil, err
}

View File

@@ -19,6 +19,8 @@ package common
import (
"fmt"
"k8s.io/client-go/discovery"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/client-go/rest"
"k8s.io/client-go/util/flowcontrol"
@@ -36,6 +38,7 @@ type Args struct {
client client.Client
dm discoverymapper.DiscoveryMapper
pd *packages.PackageDiscover
dc *discovery.DiscoveryClient
}
// SetConfig insert kubeconfig into Args
@@ -122,3 +125,20 @@ func (a *Args) GetPackageDiscover() (*packages.PackageDiscover, error) {
a.pd = pd
return pd, nil
}
// GetDiscoveryClient return a discovery client from cli args
func (a *Args) GetDiscoveryClient() (*discovery.DiscoveryClient, error) {
if a.dc != nil {
return a.dc, nil
}
cfg, err := a.GetConfig()
if err != nil {
return nil, err
}
dc, err := discovery.NewDiscoveryClientForConfig(cfg)
if err != nil {
return nil, err
}
return dc, nil
}

28
pkg/utils/errors/crd.go Normal file
View File

@@ -0,0 +1,28 @@
/*
Copyright 2020-2022 The KubeVela Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package errors
import (
"github.com/pkg/errors"
"k8s.io/apimachinery/pkg/api/meta"
)
// IsCRDNotExists check if error is crd not exists
func IsCRDNotExists(err error) bool {
var noKindMatchErr *meta.NoKindMatchError
return errors.As(err, &noKindMatchErr)
}

View File

@@ -28,7 +28,6 @@ import (
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/envtest"
"github.com/oam-dev/kubevela/pkg/apiserver/clients"
"github.com/oam-dev/kubevela/pkg/utils/common"
)
@@ -62,9 +61,6 @@ var _ = BeforeSuite(func(done Done) {
k8sClient, err = client.New(cfg, client.Options{Scheme: common.Scheme})
Expect(err).Should(BeNil())
Expect(k8sClient).ToNot(BeNil())
By("new kube client success")
clients.SetKubeClient(k8sClient)
Expect(err).Should(BeNil())
close(done)
}, 240)

View File

@@ -32,6 +32,8 @@ import (
"k8s.io/client-go/tools/clientcmd"
"k8s.io/client-go/util/homedir"
ctrl "sigs.k8s.io/controller-runtime"
"github.com/oam-dev/kubevela/version"
)
var defaultCacheDir = filepath.Join(homedir.HomeDir(), ".kube", "http-cache")
@@ -167,3 +169,11 @@ func computeDiscoverCacheDir(parentDir, host string) string {
safeHost := overlyCautiousIllegalFileCharacters.ReplaceAllString(schemelessHost, "_")
return filepath.Join(parentDir, safeHost)
}
// GenerateLeaderElectionID returns the Leader Election ID.
func GenerateLeaderElectionID(name string, versionedDeploy bool) string {
if versionedDeploy {
return name + "-" + strings.ToLower(strings.ReplaceAll(version.VelaVersion, ".", "-"))
}
return name
}

View File

@@ -0,0 +1,35 @@
/*
Copyright 2021 The KubeVela Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package util
import (
"testing"
"github.com/oam-dev/kubevela/version"
)
func TestGenerateLeaderElectionID(t *testing.T) {
version.VelaVersion = "v10.13.0"
if id := GenerateLeaderElectionID("kubevela", true); id != "kubevela-v10-13-0" {
t.Errorf("id is not as expected(%s != kubevela-v10-13-0)", id)
return
}
if id := GenerateLeaderElectionID("kubevela", false); id != "kubevela" {
t.Errorf("id is not as expected(%s != kubevela)", id)
return
}
}

View File

@@ -203,9 +203,7 @@ func NewPodCollector(gvk schema.GroupVersionKind) PodCollector {
if collector, ok := podCollectorMap[gvk]; ok {
return collector
}
return func(cli client.Client, obj *unstructured.Unstructured, cluster string) ([]*unstructured.Unstructured, error) {
return nil, nil
}
return velaComponentPodCollector
}
// standardWorkloadPodCollector collect pods created by standard workload
@@ -402,6 +400,35 @@ func helmReleasePodCollector(cli client.Client, obj *unstructured.Unstructured,
return collectedPods, nil
}
func velaComponentPodCollector(cli client.Client, obj *unstructured.Unstructured, cluster string) ([]*unstructured.Unstructured, error) {
ctx := multicluster.ContextWithClusterName(context.Background(), cluster)
listOpts := []client.ListOption{
client.MatchingLabels(map[string]string{"app.oam.dev/component": obj.GetName()}),
client.InNamespace(obj.GetNamespace()),
}
podList := corev1.PodList{}
if err := cli.List(ctx, &podList, listOpts...); err != nil {
return nil, err
}
pods := make([]*unstructured.Unstructured, len(podList.Items))
for i := range podList.Items {
pod, err := oamutil.Object2Unstructured(podList.Items[i])
if err != nil {
return nil, err
}
pod.SetGroupVersionKind(
corev1.SchemeGroupVersion.WithKind(
reflect.TypeOf(corev1.Pod{}).Name(),
),
)
pods[i] = pod
}
return pods, nil
}
func getEventFieldSelector(obj *unstructured.Unstructured) fields.Selector {
field := fields.Set{}
field["involvedObject.name"] = obj.GetName()

View File

@@ -55,6 +55,9 @@ const (
ProviderName = "query"
// HelmReleaseKind is the kind of HelmRelease
HelmReleaseKind = "HelmRelease"
annoAmbassadorServiceName = "ambassador.service/name"
annoAmbassadorServiceNamespace = "ambassador.service/namespace"
)
var fluxcdGroupVersion = schema.GroupVersion{Group: "helm.toolkit.fluxcd.io", Version: "v2beta1"}
@@ -233,7 +236,7 @@ func (h *provider) GeneratorServiceEndpoints(wfctx wfContext.Context, v *value.V
klog.Error(err, fmt.Sprintf("find v1 Service %s/%s from cluster %s failure", resource.Name, resource.Namespace, resource.Cluster))
continue
}
serviceEndpoints = append(serviceEndpoints, generatorFromService(service, selectorNodeIP, cluster)...)
serviceEndpoints = append(serviceEndpoints, generatorFromService(service, selectorNodeIP, cluster, "")...)
case helmapi.HelmReleaseGVK.Kind:
obj := new(unstructured.Unstructured)
obj.SetNamespace(resource.Namespace)
@@ -244,7 +247,7 @@ func (h *provider) GeneratorServiceEndpoints(wfctx wfContext.Context, v *value.V
klog.Error(err, "collect service by helm release failure", "helmRelease", resource.Name, "namespace", resource.Namespace, "cluster", resource.Cluster)
}
for _, service := range services {
serviceEndpoints = append(serviceEndpoints, generatorFromService(service, selectorNodeIP, cluster)...)
serviceEndpoints = append(serviceEndpoints, generatorFromService(service, selectorNodeIP, cluster, "")...)
}
// only support network/v1beta1
@@ -255,6 +258,34 @@ func (h *provider) GeneratorServiceEndpoints(wfctx wfContext.Context, v *value.V
for _, ing := range ingress {
serviceEndpoints = append(serviceEndpoints, generatorFromIngress(ing, cluster)...)
}
case "SeldonDeployment":
obj := new(unstructured.Unstructured)
obj.SetGroupVersionKind(schema.GroupVersionKind{
Group: "machinelearning.seldon.io",
Version: "v1",
Kind: "SeldonDeployment",
})
if err := findResource(obj, resource.Name, resource.Namespace, resource.Cluster); err != nil {
klog.Error(err, fmt.Sprintf("find v1 Seldon Deployment %s/%s from cluster %s failure", resource.Name, resource.Namespace, resource.Cluster))
continue
}
anno := obj.GetAnnotations()
serviceName := "ambassador"
serviceNS := "vela-system"
if anno != nil {
if anno[annoAmbassadorServiceName] != "" {
serviceName = anno[annoAmbassadorServiceName]
}
if anno[annoAmbassadorServiceNamespace] != "" {
serviceNS = anno[annoAmbassadorServiceNamespace]
}
}
var service corev1.Service
if err := findResource(&service, serviceName, serviceNS, resource.Cluster); err != nil {
klog.Error(err, fmt.Sprintf("find v1 Service %s/%s from cluster %s failure", serviceName, serviceNS, resource.Cluster))
continue
}
serviceEndpoints = append(serviceEndpoints, generatorFromService(service, selectorNodeIP, cluster, fmt.Sprintf("/seldon/%s/%s", resource.Namespace, resource.Name))...)
}
}
return v.FillObject(serviceEndpoints, "list")
@@ -362,7 +393,7 @@ func Install(p providers.Providers, cli client.Client, cfg *rest.Config) {
})
}
func generatorFromService(service corev1.Service, selectorNodeIP func() string, cluster string) []querytypes.ServiceEndpoint {
func generatorFromService(service corev1.Service, selectorNodeIP func() string, cluster, path string) []querytypes.ServiceEndpoint {
var serviceEndpoints []querytypes.ServiceEndpoint
switch service.Spec.Type {
case corev1.ServiceTypeLoadBalancer:
@@ -376,6 +407,7 @@ func generatorFromService(service corev1.Service, selectorNodeIP func() string,
AppProtocol: &judgeAppProtocol,
Host: ingress.Hostname,
Port: int(port.Port),
Path: path,
},
Ref: corev1.ObjectReference{
Kind: "Service",
@@ -395,6 +427,7 @@ func generatorFromService(service corev1.Service, selectorNodeIP func() string,
AppProtocol: &judgeAppProtocol,
Host: ingress.IP,
Port: int(port.Port),
Path: path,
},
Ref: corev1.ObjectReference{
Kind: "Service",
@@ -418,6 +451,7 @@ func generatorFromService(service corev1.Service, selectorNodeIP func() string,
Port: int(port.NodePort),
AppProtocol: &judgeAppProtocol,
Host: selectorNodeIP(),
Path: path,
},
Ref: corev1.ObjectReference{
Kind: "Service",

View File

@@ -29,6 +29,7 @@ import (
networkv1beta1 "k8s.io/api/networking/v1beta1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/apimachinery/pkg/util/intstr"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/yaml"
@@ -454,6 +455,14 @@ options: {
Name: "helmRelease",
},
},
{
Cluster: "",
ObjectReference: corev1.ObjectReference{
Kind: "SeldonDeployment",
Namespace: "default",
Name: "sdep",
},
},
},
},
}
@@ -507,12 +516,38 @@ options: {
"helm.toolkit.fluxcd.io/namespace": "default",
},
},
{
"name": "seldon-ambassador",
"ports": []corev1.ServicePort{
{Port: 80, TargetPort: intstr.FromInt(80), Name: "80port"},
},
"type": corev1.ServiceTypeLoadBalancer,
"status": corev1.ServiceStatus{
LoadBalancer: corev1.LoadBalancerStatus{
Ingress: []corev1.LoadBalancerIngress{
{
IP: "1.1.1.1",
},
},
},
},
},
}
err = k8sClient.Create(context.TODO(), &corev1.Namespace{
ObjectMeta: metav1.ObjectMeta{
Name: "vela-system",
},
})
Expect(err).Should(BeNil())
for _, s := range testServicelist {
ns := "default"
if s["namespace"] != nil {
ns = s["namespace"].(string)
}
service := &corev1.Service{
ObjectMeta: metav1.ObjectMeta{
Name: s["name"].(string),
Namespace: "default",
Namespace: ns,
},
Spec: corev1.ServiceSpec{
Ports: s["ports"].([]corev1.ServicePort),
@@ -672,6 +707,22 @@ options: {
err := k8sClient.Create(context.TODO(), ing)
Expect(err).Should(BeNil())
}
obj := &unstructured.Unstructured{}
obj.SetName("sdep")
obj.SetNamespace("default")
obj.SetAnnotations(map[string]string{
annoAmbassadorServiceName: "seldon-ambassador",
annoAmbassadorServiceNamespace: "default",
})
obj.SetGroupVersionKind(schema.GroupVersionKind{
Group: "machinelearning.seldon.io",
Version: "v1",
Kind: "SeldonDeployment",
})
err = k8sClient.Create(context.TODO(), obj)
Expect(err).Should(BeNil())
opt := `app: {
name: "endpoints-app"
namespace: "default"
@@ -712,6 +763,7 @@ options: {
// helmRelease
fmt.Sprintf("http://%s:30002", gatewayIP),
"http://ingress.domain.helm",
"tcp://1.1.1.1:80/seldon/test",
}
endValue, err := v.Field("list")
Expect(err).Should(BeNil())

View File

@@ -44,7 +44,7 @@ var _ = BeforeSuite(func(done Done) {
ControlPlaneStartTimeout: time.Minute * 3,
ControlPlaneStopTimeout: time.Minute,
UseExistingCluster: pointer.BoolPtr(false),
CRDDirectoryPaths: []string{"../../../../charts/vela-core/crds"},
CRDDirectoryPaths: []string{"../../../../charts/vela-core/crds", "./testdata/machinelearning.seldon.io_seldondeployments.yaml"},
}
By("start kube test env")

File diff suppressed because it is too large Load Diff

View File

@@ -30,6 +30,7 @@ import (
"github.com/oam-dev/kubevela/apis/core.oam.dev/v1beta1"
"github.com/oam-dev/kubevela/pkg/cue/model/value"
"github.com/oam-dev/kubevela/pkg/cue/packages"
"github.com/oam-dev/kubevela/pkg/cue/process"
"github.com/oam-dev/kubevela/pkg/multicluster"
"github.com/oam-dev/kubevela/pkg/oam/discoverymapper"
oamutil "github.com/oam-dev/kubevela/pkg/oam/util"
@@ -82,7 +83,8 @@ func (handler *ViewHandler) QueryView(ctx context.Context, qv QueryView) (*value
Outputs: queryKey.Outputs,
}
taskDiscover := tasks.NewViewTaskDiscover(handler.pd, handler.cli, handler.cfg, handler.dispatch, handler.delete, handler.namespace, 3)
pCtx := process.NewContext(process.ContextData{})
taskDiscover := tasks.NewViewTaskDiscover(handler.pd, handler.cli, handler.cfg, handler.dispatch, handler.delete, handler.namespace, 3, pCtx)
genTask, err := taskDiscover.GetTaskGenerator(ctx, handler.viewTask.Type)
if err != nil {
return nil, err

View File

@@ -120,7 +120,7 @@ func (h *provider) ApplyInParallel(ctx wfContext.Context, v *value.Value, act ty
}
deployCtx := multicluster.ContextWithClusterName(context.Background(), cluster)
if err = h.apply(deployCtx, cluster, common.WorkflowResourceCreator, workloads...); err != nil {
return v.FillObject(err, "err")
return err
}
return nil
}

View File

@@ -24,7 +24,6 @@ import (
"github.com/oam-dev/kubevela/apis/core.oam.dev/v1alpha1"
"github.com/oam-dev/kubevela/apis/core.oam.dev/v1beta1"
"github.com/oam-dev/kubevela/pkg/clustermanager"
"github.com/oam-dev/kubevela/pkg/cue/model/value"
"github.com/oam-dev/kubevela/pkg/multicluster"
"github.com/oam-dev/kubevela/pkg/policy/envbinding"
@@ -103,7 +102,7 @@ func (p *provider) MakePlacementDecisions(ctx wfContext.Context, v *value.Value,
}
// check if target cluster exists
if clusterName != multicluster.ClusterLocalName {
if err = clustermanager.EnsureClusterExists(p, clusterName); err != nil {
if _, err := multicluster.GetVirtualCluster(context.Background(), p.Client, clusterName); err != nil {
return errors.Wrapf(err, "failed to get cluster %s for env %s", clusterName, env)
}
}

View File

@@ -273,6 +273,7 @@ func TestMakePlacementDecisions(t *testing.T) {
ObjectMeta: v12.ObjectMeta{
Namespace: multicluster.ClusterGatewaySecretNamespace,
Name: testCase.PreAddCluster,
Labels: map[string]string{v1alpha12.LabelKeyClusterCredentialType: string(v1alpha12.CredentialTypeX509Certificate)},
},
}))
}

View File

@@ -31,6 +31,7 @@ import (
"github.com/oam-dev/kubevela/pkg/cue/model/sets"
"github.com/oam-dev/kubevela/pkg/cue/model/value"
"github.com/oam-dev/kubevela/pkg/cue/packages"
"github.com/oam-dev/kubevela/pkg/cue/process"
monitorContext "github.com/oam-dev/kubevela/pkg/monitor/context"
wfContext "github.com/oam-dev/kubevela/pkg/workflow/context"
"github.com/oam-dev/kubevela/pkg/workflow/hooks"
@@ -197,7 +198,7 @@ func (t *TaskLoader) makeTaskGenerator(templ string) (wfTypes.TaskGenerator, err
paramFile = fmt.Sprintf(model.ParameterFieldName+": {%s}\n", ps)
}
taskv, err := t.makeValue(ctx, strings.Join([]string{templ, paramFile}, "\n"), exec.wfStatus.ID)
taskv, err := t.makeValue(ctx, strings.Join([]string{templ, paramFile}, "\n"), exec.wfStatus.ID, options.PCtx)
if err != nil {
exec.err(ctx, err, StatusReasonRendering)
return exec.status(), exec.operation(), nil
@@ -227,7 +228,7 @@ func (t *TaskLoader) makeTaskGenerator(templ string) (wfTypes.TaskGenerator, err
}, nil
}
func (t *TaskLoader) makeValue(ctx wfContext.Context, templ string, id string) (*value.Value, error) {
func (t *TaskLoader) makeValue(ctx wfContext.Context, templ string, id string, pCtx process.Context) (*value.Value, error) {
var contextTempl string
meta, _ := ctx.GetVar(wfTypes.ContextKeyMetadata)
if meta != nil {
@@ -237,6 +238,7 @@ func (t *TaskLoader) makeValue(ctx wfContext.Context, templ string, id string) (
}
contextTempl = fmt.Sprintf("\ncontext: {%s}\ncontext: stepSessionID: \"%s\"", ms, id)
}
contextTempl += "\n" + pCtx.ExtendedContextFile()
return value.NewValue(templ+contextTempl, t.pd, contextTempl, value.ProcessScript, value.TagFieldOrder)
}
@@ -415,7 +417,7 @@ func getLabel(v *value.Value, label string) string {
}
// NewTaskLoader create a tasks loader.
func NewTaskLoader(lt LoadTaskTemplate, pkgDiscover *packages.PackageDiscover, handlers providers.Providers, logLevel int) *TaskLoader {
func NewTaskLoader(lt LoadTaskTemplate, pkgDiscover *packages.PackageDiscover, handlers providers.Providers, logLevel int, pCtx process.Context) *TaskLoader {
return &TaskLoader{
loadTemplate: lt,
pd: pkgDiscover,
@@ -423,6 +425,7 @@ func NewTaskLoader(lt LoadTaskTemplate, pkgDiscover *packages.PackageDiscover, h
runOptionsProcess: func(options *wfTypes.TaskRunOptions) {
options.PreStartHooks = append(options.PreStartHooks, hooks.Input)
options.PostStopHooks = append(options.PostStopHooks, hooks.Output)
options.PCtx = pCtx
},
logLevel: logLevel,
}

View File

@@ -34,6 +34,7 @@ import (
"github.com/oam-dev/kubevela/apis/core.oam.dev/common"
"github.com/oam-dev/kubevela/apis/core.oam.dev/v1beta1"
"github.com/oam-dev/kubevela/pkg/cue/model/value"
"github.com/oam-dev/kubevela/pkg/cue/process"
wfContext "github.com/oam-dev/kubevela/pkg/workflow/context"
"github.com/oam-dev/kubevela/pkg/workflow/hooks"
"github.com/oam-dev/kubevela/pkg/workflow/providers"
@@ -75,7 +76,13 @@ myIP: value: "1.1.1.1"
},
})
tasksLoader := NewTaskLoader(mockLoadTemplate, nil, discover, 0)
pCtx := process.NewContext(process.ContextData{
AppName: "app",
CompName: "app",
Namespace: "default",
AppRevisionName: "app-v1",
})
tasksLoader := NewTaskLoader(mockLoadTemplate, nil, discover, 0, pCtx)
steps := []v1beta1.WorkflowStep{
{
@@ -178,7 +185,13 @@ close({
return errors.New("mock error")
},
})
tasksLoader := NewTaskLoader(mockLoadTemplate, nil, discover, 0)
pCtx := process.NewContext(process.ContextData{
AppName: "app",
CompName: "app",
Namespace: "default",
AppRevisionName: "app-v1",
})
tasksLoader := NewTaskLoader(mockLoadTemplate, nil, discover, 0, pCtx)
steps := []v1beta1.WorkflowStep{
{
@@ -414,7 +427,13 @@ func TestPendingInputCheck(t *testing.T) {
ParameterKey: "score",
}},
}
tasksLoader := NewTaskLoader(mockLoadTemplate, nil, discover, 0)
pCtx := process.NewContext(process.ContextData{
AppName: "myapp",
CompName: "mycomp",
Namespace: "default",
AppRevisionName: "myapp-v1",
})
tasksLoader := NewTaskLoader(mockLoadTemplate, nil, discover, 0, pCtx)
gen, err := tasksLoader.GetTaskGenerator(context.Background(), step.Type)
r.NoError(err)
run, err := gen(step, &types.GeneratorOptions{})
@@ -443,7 +462,13 @@ func TestPendingDependsOnCheck(t *testing.T) {
Type: "ok",
DependsOn: []string{"depend"},
}
tasksLoader := NewTaskLoader(mockLoadTemplate, nil, discover, 0)
pCtx := process.NewContext(process.ContextData{
AppName: "myapp",
CompName: "mycomp",
Namespace: "default",
AppRevisionName: "myapp-v1",
})
tasksLoader := NewTaskLoader(mockLoadTemplate, nil, discover, 0, pCtx)
gen, err := tasksLoader.GetTaskGenerator(context.Background(), step.Type)
r.NoError(err)
run, err := gen(step, &types.GeneratorOptions{})

View File

@@ -26,6 +26,7 @@ import (
"github.com/oam-dev/kubevela/apis/core.oam.dev/common"
"github.com/oam-dev/kubevela/apis/core.oam.dev/v1beta1"
"github.com/oam-dev/kubevela/pkg/cue/packages"
"github.com/oam-dev/kubevela/pkg/cue/process"
"github.com/oam-dev/kubevela/pkg/oam/discoverymapper"
"github.com/oam-dev/kubevela/pkg/velaql/providers/query"
wfContext "github.com/oam-dev/kubevela/pkg/workflow/context"
@@ -74,7 +75,7 @@ func suspend(step v1beta1.WorkflowStep, opt *types.GeneratorOptions) (types.Task
}
// NewTaskDiscover will create a client for load task generator.
func NewTaskDiscover(providerHandlers providers.Providers, pd *packages.PackageDiscover, cli client.Client, dm discoverymapper.DiscoveryMapper) types.TaskDiscover {
func NewTaskDiscover(providerHandlers providers.Providers, pd *packages.PackageDiscover, cli client.Client, dm discoverymapper.DiscoveryMapper, pCtx process.Context) types.TaskDiscover {
// install builtin provider
workspace.Install(providerHandlers)
email.Install(providerHandlers)
@@ -85,7 +86,7 @@ func NewTaskDiscover(providerHandlers providers.Providers, pd *packages.PackageD
builtins: map[string]types.TaskGenerator{
"suspend": suspend,
},
remoteTaskDiscover: custom.NewTaskLoader(templateLoader.LoadTaskTemplate, pd, providerHandlers, 0),
remoteTaskDiscover: custom.NewTaskLoader(templateLoader.LoadTaskTemplate, pd, providerHandlers, 0, pCtx),
templateLoader: templateLoader,
}
}
@@ -116,7 +117,7 @@ func (tr *suspendTaskRunner) Pending(ctx wfContext.Context) bool {
}
// NewViewTaskDiscover will create a client for load task generator.
func NewViewTaskDiscover(pd *packages.PackageDiscover, cli client.Client, cfg *rest.Config, apply kube.Dispatcher, delete kube.Deleter, viewNs string, logLevel int) types.TaskDiscover {
func NewViewTaskDiscover(pd *packages.PackageDiscover, cli client.Client, cfg *rest.Config, apply kube.Dispatcher, delete kube.Deleter, viewNs string, logLevel int, pCtx process.Context) types.TaskDiscover {
handlerProviders := providers.NewProviders()
// install builtin provider
@@ -128,7 +129,7 @@ func NewViewTaskDiscover(pd *packages.PackageDiscover, cli client.Client, cfg *r
templateLoader := template.NewViewTemplateLoader(cli, viewNs)
return &taskDiscover{
remoteTaskDiscover: custom.NewTaskLoader(templateLoader.LoadTaskTemplate, pd, handlerProviders, logLevel),
remoteTaskDiscover: custom.NewTaskLoader(templateLoader.LoadTaskTemplate, pd, handlerProviders, logLevel, pCtx),
templateLoader: templateLoader,
}
}

View File

@@ -26,6 +26,7 @@ import (
"github.com/pkg/errors"
"gotest.tools/assert"
"github.com/oam-dev/kubevela/pkg/cue/process"
"github.com/oam-dev/kubevela/pkg/workflow/tasks/custom"
"github.com/oam-dev/kubevela/pkg/workflow/types"
)
@@ -46,11 +47,17 @@ func TestDiscover(t *testing.T) {
return "", makeErr(name)
}
}
pCtx := process.NewContext(process.ContextData{
AppName: "myapp",
CompName: "mycomp",
Namespace: "default",
AppRevisionName: "myapp-v1",
})
discover := &taskDiscover{
builtins: map[string]types.TaskGenerator{
"suspend": suspend,
},
remoteTaskDiscover: custom.NewTaskLoader(loadTemplate, nil, nil, 0),
remoteTaskDiscover: custom.NewTaskLoader(loadTemplate, nil, nil, 0, pCtx),
}
_, err := discover.GetTaskGenerator(context.Background(), "suspend")

View File

@@ -22,6 +22,7 @@ import (
"github.com/oam-dev/kubevela/apis/core.oam.dev/common"
"github.com/oam-dev/kubevela/apis/core.oam.dev/v1beta1"
"github.com/oam-dev/kubevela/pkg/cue/model/value"
"github.com/oam-dev/kubevela/pkg/cue/process"
monitorCtx "github.com/oam-dev/kubevela/pkg/monitor/context"
wfContext "github.com/oam-dev/kubevela/pkg/workflow/context"
)
@@ -41,6 +42,7 @@ type TaskDiscover interface {
// TaskRunOptions is the options for task run.
type TaskRunOptions struct {
Data *value.Value
PCtx process.Context
PreStartHooks []TaskPreStartHook
PostStopHooks []TaskPostStopHook
GetTracer func(id string, step v1beta1.WorkflowStep) monitorCtx.Context

View File

@@ -33,7 +33,7 @@ import (
"github.com/oam-dev/kubevela/apis/core.oam.dev/v1beta1"
"github.com/oam-dev/kubevela/apis/types"
"github.com/oam-dev/kubevela/pkg/appfile"
"github.com/oam-dev/kubevela/pkg/controller/utils"
"github.com/oam-dev/kubevela/pkg/cue/process"
util2 "github.com/oam-dev/kubevela/pkg/oam/util"
"github.com/oam-dev/kubevela/pkg/utils/common"
"github.com/oam-dev/kubevela/pkg/utils/util"
@@ -75,15 +75,13 @@ func ApplyTerraform(app *v1beta1.Application, k8sClient client.Client, ioStream
return nil, err
}
revisionName, _ := utils.GetAppNextRevision(app)
for i, wl := range appFile.Workloads {
switch wl.CapabilityCategory {
case types.TerraformCategory:
name := wl.Name
ioStream.Infof("\nApplying cloud resources %s\n", name)
tf, err := getTerraformJSONFiles(wl, appFile.Name, revisionName, namespace)
tf, err := getTerraformJSONFiles(wl, appfile.GenerateContextDataFromAppFile(appFile, wl.Name))
if err != nil {
return nil, fmt.Errorf("failed to get Terraform JSON files from workload %s: %w", name, err)
}
@@ -197,8 +195,8 @@ func generateSecretFromTerraformOutput(k8sClient client.Client, outputList []str
}
// getTerraformJSONFiles gets Terraform JSON files or modules from workload
func getTerraformJSONFiles(wl *appfile.Workload, applicationName, revisionName string, namespace string) ([]byte, error) {
pCtx, err := appfile.PrepareProcessContext(wl, applicationName, revisionName, namespace)
func getTerraformJSONFiles(wl *appfile.Workload, ctxData process.ContextData) ([]byte, error) {
pCtx, err := appfile.PrepareProcessContext(wl, ctxData)
if err != nil {
return nil, err
}

View File

@@ -24,6 +24,10 @@ import (
"strings"
"time"
"k8s.io/client-go/discovery"
"helm.sh/helm/v3/pkg/strvals"
"github.com/oam-dev/kubevela/apis/core.oam.dev/v1alpha2"
"github.com/oam-dev/kubevela/pkg/oam"
@@ -132,17 +136,29 @@ func NewAddonEnableCommand(c common.Args, ioStream cmdutil.IOStreams) *cobra.Com
if err != nil {
return err
}
dc, err := c.GetDiscoveryClient()
if err != nil {
return err
}
addonOrDir := args[0]
var name = addonOrDir
if _, err := os.Stat(addonOrDir); err == nil {
if file, err := os.Stat(addonOrDir); err == nil {
if !file.IsDir() {
return fmt.Errorf("%s is not addon dir", addonOrDir)
}
ioStream.Infof("enable addon by local dir: %s \n", addonOrDir)
// args[0] is a local path install with local dir, use base dir name as addonName
name = filepath.Base(addonOrDir)
err = enableAddonByLocal(ctx, name, addonOrDir, k8sClient, config, addonArgs)
err = enableAddonByLocal(ctx, name, addonOrDir, k8sClient, dc, config, addonArgs)
if err != nil {
return err
}
} else {
err = enableAddon(ctx, k8sClient, config, name, addonArgs)
if filepath.IsAbs(addonOrDir) || strings.HasPrefix(addonOrDir, ".") || strings.HasSuffix(addonOrDir, "/") {
return fmt.Errorf("addon directory %s not found in local", addonOrDir)
}
err = enableAddon(ctx, k8sClient, dc, config, name, addonArgs)
if err != nil {
return err
}
@@ -194,29 +210,41 @@ func NewAddonUpgradeCommand(c common.Args, ioStream cmdutil.IOStreams) *cobra.Co
if err != nil {
return err
}
dc, err := c.GetDiscoveryClient()
if err != nil {
return err
}
addonArgs, err := parseToMap(args[1:])
if err != nil {
return err
}
addonOrDir := args[0]
var name string
if _, err := os.Stat(addonOrDir); err == nil {
if file, err := os.Stat(addonOrDir); err == nil {
if !file.IsDir() {
return fmt.Errorf("%s is not addon dir", addonOrDir)
}
ioStream.Infof("enable addon by local dir: %s \n", addonOrDir)
// args[0] is a local path install with local dir
name := filepath.Base(addonOrDir)
_, err = pkgaddon.FetchAddonRelatedApp(context.Background(), k8sClient, name)
if err != nil {
return errors.Wrapf(err, "cannot fetch addon related addon %s", name)
}
err = enableAddonByLocal(ctx, name, addonOrDir, k8sClient, config, addonArgs)
err = enableAddonByLocal(ctx, name, addonOrDir, k8sClient, dc, config, addonArgs)
if err != nil {
return err
}
} else {
if filepath.IsAbs(addonOrDir) || strings.HasPrefix(addonOrDir, ".") || strings.HasSuffix(addonOrDir, "/") {
return fmt.Errorf("addon directory %s not found in local", addonOrDir)
}
_, err = pkgaddon.FetchAddonRelatedApp(context.Background(), k8sClient, addonOrDir)
if err != nil {
return errors.Wrapf(err, "cannot fetch addon related addon %s", addonOrDir)
}
err = enableAddon(ctx, k8sClient, config, addonOrDir, addonArgs)
err = enableAddon(ctx, k8sClient, dc, config, addonOrDir, addonArgs)
if err != nil {
return err
}
@@ -232,15 +260,9 @@ func NewAddonUpgradeCommand(c common.Args, ioStream cmdutil.IOStreams) *cobra.Co
func parseToMap(args []string) (map[string]interface{}, error) {
res := map[string]interface{}{}
for _, pair := range args {
line := strings.Split(pair, "=")
if len(line) < 2 {
return nil, fmt.Errorf("parameter format should be foo=bar, %s not match", pair)
}
k := strings.TrimSpace(line[0])
v := strings.TrimSpace(strings.Join(line[1:], "="))
if k != "" && v != "" {
res[k] = v
for _, arg := range args {
if err := strvals.ParseIntoString(arg, res); err != nil {
return nil, err
}
}
return res, nil
@@ -293,7 +315,7 @@ func NewAddonStatusCommand(c common.Args, ioStream cmdutil.IOStreams) *cobra.Com
}
}
func enableAddon(ctx context.Context, k8sClient client.Client, config *rest.Config, name string, args map[string]interface{}) error {
func enableAddon(ctx context.Context, k8sClient client.Client, dc *discovery.DiscoveryClient, config *rest.Config, name string, args map[string]interface{}) error {
var err error
registryDS := pkgaddon.NewRegistryDataStore(k8sClient)
registries, err := registryDS.ListRegistries(ctx)
@@ -302,7 +324,7 @@ func enableAddon(ctx context.Context, k8sClient client.Client, config *rest.Conf
}
for _, registry := range registries {
err = pkgaddon.EnableAddon(ctx, name, k8sClient, apply.NewAPIApplicator(k8sClient), config, registry, args, nil)
err = pkgaddon.EnableAddon(ctx, name, k8sClient, dc, apply.NewAPIApplicator(k8sClient), config, registry, args, nil)
if errors.Is(err, pkgaddon.ErrNotExist) {
continue
}
@@ -318,8 +340,8 @@ func enableAddon(ctx context.Context, k8sClient client.Client, config *rest.Conf
}
// enableAddonByLocal enable addon in local dir and return the addon name
func enableAddonByLocal(ctx context.Context, name string, dir string, k8sClient client.Client, config *rest.Config, args map[string]interface{}) error {
if err := pkgaddon.EnableAddonByLocalDir(ctx, name, dir, k8sClient, apply.NewAPIApplicator(k8sClient), config, args); err != nil {
func enableAddonByLocal(ctx context.Context, name string, dir string, k8sClient client.Client, dc *discovery.DiscoveryClient, config *rest.Config, args map[string]interface{}) error {
if err := pkgaddon.EnableAddonByLocalDir(ctx, name, dir, k8sClient, dc, apply.NewAPIApplicator(k8sClient), config, args); err != nil {
return err
}
if err := waitApplicationRunning(k8sClient, name); err != nil {

View File

@@ -43,9 +43,23 @@ func TestParseMap(t *testing.T) {
nilError: true,
},
{
args: []string{"errorparameter"},
res: nil,
nilError: false,
args: []string{"imagePullSecrets={a,b,c}"},
res: map[string]interface{}{
"imagePullSecrets": []interface{}{
"a", "b", "c",
},
},
nilError: true,
},
{
args: []string{"image.repo=www.test.com", "image.tag=1.1"},
res: map[string]interface{}{
"image": map[string]interface{}{
"repo": "www.test.com",
"tag": "1.1",
},
},
nilError: true,
},
}
for _, s := range testcase {
@@ -53,8 +67,6 @@ func TestParseMap(t *testing.T) {
assert.DeepEqual(t, s.res, r)
if s.nilError {
assert.NilError(t, err)
} else {
assert.Error(t, err, "parameter format should be foo=bar, errorparameter not match")
}
}
}

View File

@@ -23,6 +23,7 @@ import (
"runtime"
gov "github.com/hashicorp/go-version"
"github.com/pkg/errors"
"github.com/spf13/cobra"
"k8s.io/klog"
@@ -105,7 +106,6 @@ func NewCommand() *cobra.Command {
NewUnInstallCommand(commandArgs, "2", ioStream),
NewExportCommand(commandArgs, ioStream),
NewCUEPackageCommand(commandArgs, ioStream),
SystemCommandGroup(commandArgs, ioStream),
NewVersionCommand(ioStream),
NewCompletionCommand(),
@@ -134,11 +134,14 @@ func NewVersionCommand(ioStream util.IOStreams) *cobra.Command {
Short: "Prints vela build version information",
Long: "Prints vela build version information.",
Run: func(cmd *cobra.Command, args []string) {
fmt.Printf(`Version: %v
clusterVersion, _ := GetOAMReleaseVersion(types.DefaultKubeVelaNS)
fmt.Printf(`CLI Version: %v
Core Version: %s
GitRevision: %v
GolangVersion: %v
`,
version.VelaVersion,
clusterVersion,
version.GitRevision,
runtime.Version())
},
@@ -164,9 +167,13 @@ func NewVersionListCommand(ioStream util.IOStreams) *cobra.Command {
if err != nil {
return err
}
currentV, err := gov.NewVersion(version.VelaVersion)
clusterVersion, err := GetOAMReleaseVersion(types.DefaultKubeVelaNS)
if err != nil {
clusterVersion = version.VelaVersion
}
currentV, err := gov.NewVersion(clusterVersion)
if err != nil && !showAll {
return fmt.Errorf("can not parse current version %s", version.VelaVersion)
return fmt.Errorf("can not parse current version %s", clusterVersion)
}
for _, chartV := range versions {
if chartV != nil {
@@ -190,3 +197,18 @@ func NewVersionListCommand(ioStream util.IOStreams) *cobra.Command {
cmd.PersistentFlags().BoolVarP(&showAll, "all", "a", false, "List all available versions, if not, only list newer version")
return cmd
}
// GetOAMReleaseVersion gets version of vela-core runtime helm release
func GetOAMReleaseVersion(ns string) (string, error) {
results, err := helm.GetHelmRelease(ns)
if err != nil {
return "", err
}
for _, result := range results {
if result.Chart.ChartFullPath() == types.DefaultKubeVelaChartName {
return result.Chart.AppVersion(), nil
}
}
return "", errors.New("kubevela chart not found in your kubernetes cluster, refer to 'https://kubevela.io/docs/install' for installation")
}

Some files were not shown because too many files have changed in this diff Show More