mirror of
https://github.com/kubevela/kubevela.git
synced 2026-02-23 14:23:54 +00:00
Compare commits
53 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
21534a5909 | ||
|
|
e6bcc7de1f | ||
|
|
a67270fb00 | ||
|
|
ef80b6617e | ||
|
|
54469a970a | ||
|
|
7af36b0971 | ||
|
|
316e21791f | ||
|
|
b7935e88d0 | ||
|
|
07c5b26eaa | ||
|
|
5b59db5f0b | ||
|
|
f6032f3de9 | ||
|
|
2f825f3e0c | ||
|
|
4c6292b1c2 | ||
|
|
ec7aa50584 | ||
|
|
739bed82c2 | ||
|
|
bf03898851 | ||
|
|
1ae4216a7a | ||
|
|
2b97960608 | ||
|
|
af29eb020f | ||
|
|
f9ee044d45 | ||
|
|
91d37e7773 | ||
|
|
eb5c730e36 | ||
|
|
b1f76f6087 | ||
|
|
4d50017622 | ||
|
|
f4d4416789 | ||
|
|
b108801b60 | ||
|
|
6d7180af2d | ||
|
|
3e47887b72 | ||
|
|
4934447e75 | ||
|
|
d36718969f | ||
|
|
859ca7567f | ||
|
|
10dce9debc | ||
|
|
2e67238b61 | ||
|
|
341e07b636 | ||
|
|
b5e04f2060 | ||
|
|
99c4a130d3 | ||
|
|
f8ba3d5d00 | ||
|
|
d540491f46 | ||
|
|
30c492a50a | ||
|
|
84422e581c | ||
|
|
38d2bf6839 | ||
|
|
fbef61d076 | ||
|
|
52f9b7e691 | ||
|
|
e721449c46 | ||
|
|
ab998ce3f4 | ||
|
|
bcc978380f | ||
|
|
b9f9f7f3f9 | ||
|
|
18ceb467ed | ||
|
|
06eb8f055d | ||
|
|
9dec98fbba | ||
|
|
42e7f04267 | ||
|
|
188e453f8a | ||
|
|
c34cd657e8 |
2
.github/workflows/apiserver-test.yaml
vendored
2
.github/workflows/apiserver-test.yaml
vendored
@@ -15,7 +15,7 @@ on:
|
||||
|
||||
env:
|
||||
# Common versions
|
||||
GO_VERSION: '1.16'
|
||||
GO_VERSION: '1.17'
|
||||
GOLANGCI_VERSION: 'v1.38'
|
||||
KIND_VERSION: 'v0.7.0'
|
||||
|
||||
|
||||
2
.github/workflows/e2e-multicluster-test.yml
vendored
2
.github/workflows/e2e-multicluster-test.yml
vendored
@@ -13,7 +13,7 @@ on:
|
||||
|
||||
env:
|
||||
# Common versions
|
||||
GO_VERSION: '1.16'
|
||||
GO_VERSION: '1.17'
|
||||
GOLANGCI_VERSION: 'v1.38'
|
||||
KIND_VERSION: 'v0.7.0'
|
||||
|
||||
|
||||
2
.github/workflows/e2e-rollout-test.yml
vendored
2
.github/workflows/e2e-rollout-test.yml
vendored
@@ -13,7 +13,7 @@ on:
|
||||
|
||||
env:
|
||||
# Common versions
|
||||
GO_VERSION: '1.16'
|
||||
GO_VERSION: '1.17'
|
||||
GOLANGCI_VERSION: 'v1.38'
|
||||
KIND_VERSION: 'v0.7.0'
|
||||
|
||||
|
||||
2
.github/workflows/e2e-test.yml
vendored
2
.github/workflows/e2e-test.yml
vendored
@@ -13,7 +13,7 @@ on:
|
||||
|
||||
env:
|
||||
# Common versions
|
||||
GO_VERSION: '1.16'
|
||||
GO_VERSION: '1.17'
|
||||
GOLANGCI_VERSION: 'v1.38'
|
||||
KIND_VERSION: 'v0.7.0'
|
||||
|
||||
|
||||
2
.github/workflows/go.yml
vendored
2
.github/workflows/go.yml
vendored
@@ -13,7 +13,7 @@ on:
|
||||
|
||||
env:
|
||||
# Common versions
|
||||
GO_VERSION: '1.16'
|
||||
GO_VERSION: '1.17'
|
||||
GOLANGCI_VERSION: 'v1.38'
|
||||
KIND_VERSION: 'v0.7.0'
|
||||
|
||||
|
||||
2
.github/workflows/release.yml
vendored
2
.github/workflows/release.yml
vendored
@@ -27,7 +27,7 @@ jobs:
|
||||
- name: Set up Go
|
||||
uses: actions/setup-go@v2
|
||||
with:
|
||||
go-version: 1.16
|
||||
go-version: 1.17
|
||||
- name: Get release
|
||||
id: get_release
|
||||
uses: bruceadams/get-release@v1.2.2
|
||||
|
||||
4
.github/workflows/sync-api.yml
vendored
4
.github/workflows/sync-api.yml
vendored
@@ -11,10 +11,10 @@ jobs:
|
||||
sync-core-api:
|
||||
runs-on: ubuntu-20.04
|
||||
steps:
|
||||
- name: Set up Go 1.16
|
||||
- name: Set up Go 1.17
|
||||
uses: actions/setup-go@v1
|
||||
env:
|
||||
GO_VERSION: '1.16'
|
||||
GO_VERSION: '1.17'
|
||||
GOLANGCI_VERSION: 'v1.38'
|
||||
with:
|
||||
go-version: ${{ env.GO_VERSION }}
|
||||
|
||||
2
.github/workflows/unit-test.yml
vendored
2
.github/workflows/unit-test.yml
vendored
@@ -13,7 +13,7 @@ on:
|
||||
|
||||
env:
|
||||
# Common versions
|
||||
GO_VERSION: '1.16'
|
||||
GO_VERSION: '1.17'
|
||||
GOLANGCI_VERSION: 'v1.38'
|
||||
KIND_VERSION: 'v0.7.0'
|
||||
|
||||
|
||||
@@ -33,8 +33,8 @@ spec:
|
||||
arch: amd64
|
||||
{{addURIAndSha "https://github.com/oam-dev/kubevela/releases/download/{{ .TagName }}/kubectl-vela-{{ .TagName }}-windows-amd64.zip" .TagName }}
|
||||
files:
|
||||
- from: "*/kubectl-vela.exe"
|
||||
to: "."
|
||||
- from: "*/kubectl-vela"
|
||||
to: "kubectl-vela.exe"
|
||||
- from: "*/LICENSE"
|
||||
to: "."
|
||||
bin: "kubectl-vela.exe"
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
ARG BASE_IMAGE="alpine:latest"
|
||||
ARG BASE_IMAGE
|
||||
# Build the manager binary
|
||||
FROM --platform=${BUILDPLATFORM:-linux/amd64} golang:1.16-alpine as builder
|
||||
FROM --platform=${BUILDPLATFORM:-linux/amd64} golang:1.17-alpine as builder
|
||||
|
||||
WORKDIR /workspace
|
||||
# Copy the Go Modules manifests
|
||||
@@ -34,9 +34,9 @@ RUN GO111MODULE=on CGO_ENABLED=0 GOOS=linux GOARCH=${TARGETARCH} \
|
||||
# You can replace distroless as minimal base image to package the manager binary
|
||||
# Refer to https://github.com/GoogleContainerTools/distroless for more details
|
||||
# Overwrite `BASE_IMAGE` by passing `--build-arg=BASE_IMAGE=gcr.io/distroless/static:nonroot`
|
||||
FROM ${BASE_IMAGE:-alpine:latest}
|
||||
FROM ${BASE_IMAGE:-alpine:3.15}
|
||||
# This is required by daemon connnecting with cri
|
||||
RUN apk add --no-cache ca-certificates bash
|
||||
RUN apk add --no-cache ca-certificates bash expat
|
||||
|
||||
WORKDIR /
|
||||
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
ARG BASE_IMAGE="alpine:latest"
|
||||
ARG BASE_IMAGE
|
||||
# Build the manager binary
|
||||
FROM --platform=${BUILDPLATFORM:-linux/amd64} golang:1.16-alpine as builder
|
||||
FROM --platform=${BUILDPLATFORM:-linux/amd64} golang:1.17-alpine as builder
|
||||
ARG GOPROXY
|
||||
ENV GOPROXY=${GOPROXY:-https://goproxy.cn}
|
||||
WORKDIR /workspace
|
||||
@@ -32,9 +32,9 @@ RUN GO111MODULE=on CGO_ENABLED=0 GOOS=linux GOARCH=${TARGETARCH} \
|
||||
# Refer to https://github.com/GoogleContainerTools/distroless for more details
|
||||
# Overwrite `BASE_IMAGE` by passing `--build-arg=BASE_IMAGE=gcr.io/distroless/static:nonroot`
|
||||
|
||||
FROM ${BASE_IMAGE:-alpine:latest}
|
||||
FROM ${BASE_IMAGE:-alpine:3.15}
|
||||
# This is required by daemon connnecting with cri
|
||||
RUN apk add --no-cache ca-certificates bash
|
||||
RUN apk add --no-cache ca-certificates bash expat
|
||||
|
||||
WORKDIR /
|
||||
|
||||
|
||||
@@ -1,5 +1,6 @@
|
||||
ARG BASE_IMAGE
|
||||
# Build the manager binary
|
||||
FROM --platform=${BUILDPLATFORM:-linux/amd64} golang:1.16-alpine as builder
|
||||
FROM --platform=${BUILDPLATFORM:-linux/amd64} golang:1.17-alpine as builder
|
||||
|
||||
WORKDIR /workspace
|
||||
# Copy the Go Modules manifests
|
||||
@@ -24,8 +25,8 @@ ARG VERSION
|
||||
ARG GITVERSION
|
||||
|
||||
RUN apk add gcc musl-dev libc-dev ;\
|
||||
GO111MODULE=on CGO_ENABLED=0 GOOS=linux GOARCH=${TARGETARCH} \
|
||||
go test -c -o manager-${TARGETARCH} -cover -covermode=atomic -coverpkg ./... .
|
||||
GO111MODULE=on CGO_ENABLED=0 GOOS=linux GOARCH=${TARGETARCH} \
|
||||
go test -c -o manager-${TARGETARCH} -cover -covermode=atomic -coverpkg ./... .
|
||||
|
||||
RUN GO111MODULE=on CGO_ENABLED=0 GOOS=linux GOARCH=${TARGETARCH} \
|
||||
go build -a -ldflags "-s -w -X github.com/oam-dev/kubevela/version.VelaVersion=${VERSION:-undefined} -X github.com/oam-dev/kubevela/version.GitRevision=${GITVERSION:-undefined}" \
|
||||
@@ -35,10 +36,10 @@ RUN GO111MODULE=on CGO_ENABLED=0 GOOS=linux GOARCH=${TARGETARCH} \
|
||||
# You can replace distroless as minimal base image to package the manager binary
|
||||
# Refer to https://github.com/GoogleContainerTools/distroless for more details
|
||||
# Overwrite `BASE_IMAGE` by passing `--build-arg=BASE_IMAGE=gcr.io/distroless/static:nonroot`
|
||||
ARG BASE_IMAGE
|
||||
FROM ${BASE_IMAGE:-alpine:latest}
|
||||
|
||||
FROM ${BASE_IMAGE:-alpine:3.15}
|
||||
# This is required by daemon connnecting with cri
|
||||
RUN apk add --no-cache ca-certificates bash
|
||||
RUN apk add --no-cache ca-certificates bash expat
|
||||
|
||||
WORKDIR /
|
||||
|
||||
|
||||
5
Makefile
5
Makefile
@@ -9,9 +9,12 @@ include makefiles/e2e.mk
|
||||
all: build
|
||||
|
||||
# Run tests
|
||||
test: vet lint staticcheck unit-test-core
|
||||
test: vet lint staticcheck unit-test-core test-cli-gen
|
||||
@$(OK) unit-tests pass
|
||||
|
||||
test-cli-gen:
|
||||
mkdir -p ./bin/doc
|
||||
go run ./hack/docgen/gen.go ./bin/doc
|
||||
unit-test-core:
|
||||
go test -coverprofile=coverage.txt $(shell go list ./pkg/... ./cmd/... ./apis/... | grep -v apiserver)
|
||||
go test $(shell go list ./references/... | grep -v apiserver)
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
//go:build !ignore_autogenerated
|
||||
// +build !ignore_autogenerated
|
||||
|
||||
/*
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
//go:build !ignore_autogenerated
|
||||
// +build !ignore_autogenerated
|
||||
|
||||
/*
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
//go:build !ignore_autogenerated
|
||||
// +build !ignore_autogenerated
|
||||
|
||||
/*
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
//go:build !ignore_autogenerated
|
||||
// +build !ignore_autogenerated
|
||||
|
||||
/*
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
//go:build !ignore_autogenerated
|
||||
// +build !ignore_autogenerated
|
||||
|
||||
/*
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
//go:build !ignore_autogenerated
|
||||
// +build !ignore_autogenerated
|
||||
|
||||
/*
|
||||
|
||||
@@ -19,8 +19,6 @@ package types
|
||||
import "github.com/oam-dev/kubevela/pkg/oam"
|
||||
|
||||
const (
|
||||
// DefaultKubeVelaNS defines the default KubeVela namespace in Kubernetes
|
||||
DefaultKubeVelaNS = "vela-system"
|
||||
// DefaultKubeVelaReleaseName defines the default name of KubeVela Release
|
||||
DefaultKubeVelaReleaseName = "kubevela"
|
||||
// DefaultKubeVelaChartName defines the default chart name of KubeVela, this variable MUST align to the chart name of this repo
|
||||
@@ -33,8 +31,13 @@ const (
|
||||
DefaultAppNamespace = "default"
|
||||
// AutoDetectWorkloadDefinition defines the default workload type for ComponentDefinition which doesn't specify a workload
|
||||
AutoDetectWorkloadDefinition = "autodetects.core.oam.dev"
|
||||
// KubeVelaControllerDeployment defines the KubeVela controller's deployment name
|
||||
KubeVelaControllerDeployment = "kubevela-vela-core"
|
||||
)
|
||||
|
||||
// DefaultKubeVelaNS defines the default KubeVela namespace in Kubernetes
|
||||
var DefaultKubeVelaNS = "vela-system"
|
||||
|
||||
const (
|
||||
// AnnoDefinitionDescription is the annotation which describe what is the capability used for in a WorkloadDefinition/TraitDefinition Object
|
||||
AnnoDefinitionDescription = "definition.oam.dev/description"
|
||||
@@ -113,11 +116,3 @@ var DefaultFilterAnnots = []string{
|
||||
oam.AnnotationFilterAnnotationKeys,
|
||||
oam.AnnotationLastAppliedConfiguration,
|
||||
}
|
||||
|
||||
// Cluster contains base info of cluster
|
||||
type Cluster struct {
|
||||
Name string
|
||||
Type string
|
||||
EndPoint string
|
||||
Accepted bool
|
||||
}
|
||||
|
||||
@@ -126,6 +126,7 @@ spec:
|
||||
{{ end }}
|
||||
- "--system-definition-namespace={{ .Values.systemDefinitionNamespace }}"
|
||||
- "--oam-spec-ver={{ .Values.OAMSpecVer }}"
|
||||
- "--concurrent-reconciles={{ .Values.concurrentReconciles }}"
|
||||
image: {{ .Values.imageRegistry }}{{ .Values.image.repository }}:{{ .Values.image.tag }}
|
||||
imagePullPolicy: {{ quote .Values.image.pullPolicy }}
|
||||
resources:
|
||||
|
||||
@@ -21,4 +21,4 @@ version: 0.1.0
|
||||
appVersion: 0.1.0
|
||||
|
||||
home: https://kubevela.io
|
||||
icon: https://kubevela.io/img/logo.jpg
|
||||
icon: https://kubevela.io/img/logo.svg
|
||||
|
||||
268
charts/vela-core/templates/addon/fluxcd-def.yaml
Normal file
268
charts/vela-core/templates/addon/fluxcd-def.yaml
Normal file
@@ -0,0 +1,268 @@
|
||||
{{- if .Values.enableFluxcdAddon -}}
|
||||
apiVersion: core.oam.dev/v1beta1
|
||||
kind: Application
|
||||
metadata:
|
||||
labels:
|
||||
addons.oam.dev/name: fluxcd-def
|
||||
name: addon-fluxcd-def
|
||||
namespace: {{ .Release.Namespace }}
|
||||
spec:
|
||||
components:
|
||||
- name: fluxc-def-resources
|
||||
properties:
|
||||
objects:
|
||||
- apiVersion: core.oam.dev/v1beta1
|
||||
kind: ComponentDefinition
|
||||
metadata:
|
||||
annotations:
|
||||
definition.oam.dev/description: helm release is a group of K8s resources
|
||||
from either git repository or helm repo
|
||||
name: helm
|
||||
namespace: {{.Values.systemDefinitionNamespace}}
|
||||
spec:
|
||||
schematic:
|
||||
cue:
|
||||
template: "output: {\n\tapiVersion: \"source.toolkit.fluxcd.io/v1beta1\"\n\tmetadata:
|
||||
{\n\t\tname: context.name\n\t}\n\tif parameter.repoType == \"git\"
|
||||
{\n\t\tkind: \"GitRepository\"\n\t\tspec: {\n\t\t\turl: parameter.url\n\t\t\tif
|
||||
parameter.git.branch != _|_ {\n\t\t\t\tref: branch: parameter.git.branch\n\t\t\t}\n\t\t\t_secret\n\t\t\t_sourceCommonArgs\n\t\t}\n\t}\n\tif
|
||||
parameter.repoType == \"oss\" {\n\t\tkind: \"Bucket\"\n\t\tspec: {\n\t\t\tendpoint:
|
||||
\ parameter.url\n\t\t\tbucketName: parameter.oss.bucketName\n\t\t\tprovider:
|
||||
\ parameter.oss.provider\n\t\t\tif parameter.oss.region != _|_ {\n\t\t\t\tregion:
|
||||
parameter.oss.region\n\t\t\t}\n\t\t\t_secret\n\t\t\t_sourceCommonArgs\n\t\t}\n\t}\n\tif
|
||||
parameter.repoType == \"helm\" {\n\t\tkind: \"HelmRepository\"\n\t\tspec:
|
||||
{\n\t\t\turl: parameter.url\n\t\t\t_secret\n\t\t\t_sourceCommonArgs\n\t\t}\n\t}\n}\n\noutputs:
|
||||
release: {\n\tapiVersion: \"helm.toolkit.fluxcd.io/v2beta1\"\n\tkind:
|
||||
\ \"HelmRelease\"\n\tmetadata: {\n\t\tname: context.name\n\t}\n\tspec:
|
||||
{\n\t\ttimeout: parameter.installTimeout\n\t\tinterval: parameter.interval\n\t\tchart:
|
||||
{\n\t\t\tspec: {\n\t\t\t\tchart: parameter.chart\n\t\t\t\tversion:
|
||||
parameter.version\n\t\t\t\tsourceRef: {\n\t\t\t\t\tif parameter.repoType
|
||||
== \"git\" {\n\t\t\t\t\t\tkind: \"GitRepository\"\n\t\t\t\t\t}\n\t\t\t\t\tif
|
||||
parameter.repoType == \"helm\" {\n\t\t\t\t\t\tkind: \"HelmRepository\"\n\t\t\t\t\t}\n\t\t\t\t\tif
|
||||
parameter.repoType == \"oss\" {\n\t\t\t\t\t\tkind: \"Bucket\"\n\t\t\t\t\t}\n\t\t\t\t\tname:
|
||||
\ context.name\n\t\t\t\t}\n\t\t\t\tinterval: parameter.interval\n\t\t\t}\n\t\t}\n\t\tif
|
||||
parameter.targetNamespace != _|_ {\n\t\t\ttargetNamespace: parameter.targetNamespace\n\t\t}\n\t\tif
|
||||
parameter.releaseName != _|_ {\n\t\t\treleaseName: parameter.releaseName\n\t\t}\n\t\tif
|
||||
parameter.values != _|_ {\n\t\t\tvalues: parameter.values\n\t\t}\n\t}\n}\n\n_secret:
|
||||
{\n\tif parameter.secretRef != _|_ {\n\t\tsecretRef: {\n\t\t\tname:
|
||||
parameter.secretRef\n\t\t}\n\t}\n}\n\n_sourceCommonArgs: {\n\tinterval:
|
||||
parameter.pullInterval\n\tif parameter.timeout != _|_ {\n\t\ttimeout:
|
||||
parameter.timeout\n\t}\n}\n\nparameter: {\n\trepoType: *\"helm\" |
|
||||
\"git\" | \"oss\"\n\t// +usage=The interval at which to check for
|
||||
repository/bucket and relese updates, default to 5m\n\tpullInterval:
|
||||
*\"5m\" | string\n // +usage=The Interval at which to reconcile
|
||||
the Helm release, default to 30s\n interval: *\"30s\" | string\n\t//
|
||||
+usage=The Git or Helm repository URL, OSS endpoint, accept HTTP/S
|
||||
or SSH address as git url,\n\turl: string\n\t// +usage=The name of
|
||||
the secret containing authentication credentials\n\tsecretRef?: string\n\t//
|
||||
+usage=The timeout for operations like download index/clone repository,
|
||||
optional\n\ttimeout?: string\n\t// +usage=The timeout for operation
|
||||
`helm install`, optional\n\tinstallTimeout: *\"10m\" | string\n\n\tgit?:
|
||||
{\n\t\t// +usage=The Git reference to checkout and monitor for changes,
|
||||
defaults to master branch\n\t\tbranch: string\n\t}\n\toss?: {\n\t\t//
|
||||
+usage=The bucket's name, required if repoType is oss\n\t\tbucketName:
|
||||
string\n\t\t// +usage=\"generic\" for Minio, Amazon S3, Google Cloud
|
||||
Storage, Alibaba Cloud OSS, \"aws\" for retrieve credentials from
|
||||
the EC2 service when credentials not specified, default \"generic\"\n\t\tprovider:
|
||||
*\"generic\" | \"aws\"\n\t\t// +usage=The bucket region, optional\n\t\tregion?:
|
||||
string\n\t}\n\n\t// +usage=1.The relative path to helm chart for git/oss
|
||||
source. 2. chart name for helm resource 3. relative path for chart
|
||||
package(e.g. ./charts/podinfo-1.2.3.tgz)\n\tchart: string\n\t// +usage=Chart
|
||||
version\n\tversion: *\"*\" | string\n\t// +usage=The namespace for
|
||||
helm chart, optional\n\ttargetNamespace?: string\n\t// +usage=The
|
||||
release name\n\treleaseName?: string\n\t// +usage=Chart values\n\tvalues?:
|
||||
#nestedmap\n}\n\n#nestedmap: {\n\t...\n}\n"
|
||||
status:
|
||||
customStatus: "repoMessage: string\nreleaseMessage: string\nif context.output.status
|
||||
== _|_ {\n\trepoMessage: \"Fetching repository\"\n\treleaseMessage:
|
||||
\"Wating repository ready\"\n}\nif context.output.status != _|_ {\n\trepoStatus:
|
||||
context.output.status\n\tif repoStatus.conditions[0][\"type\"] != \"Ready\"
|
||||
{\n\t\trepoMessage: \"Fetch repository fail\"\n\t}\n\tif repoStatus.conditions[0][\"type\"]
|
||||
== \"Ready\" {\n\t\trepoMessage: \"Fetch repository successfully\"\n\t}\n\n\tif
|
||||
context.outputs.release.status == _|_ {\n\t\treleaseMessage: \"Creating
|
||||
helm release\"\n\t}\n\tif context.outputs.release.status != _|_ {\n\t\tif
|
||||
context.outputs.release.status.conditions[0][\"message\"] == \"Release
|
||||
reconciliation succeeded\" {\n\t\t\treleaseMessage: \"Create helm release
|
||||
successfully\"\n\t\t}\n\t\tif context.outputs.release.status.conditions[0][\"message\"]
|
||||
!= \"Release reconciliation succeeded\" {\n\t\t\treleaseBasicMessage:
|
||||
\"Delivery helm release in progress, message: \" + context.outputs.release.status.conditions[0][\"message\"]\n\t\t\tif
|
||||
len(context.outputs.release.status.conditions) == 1 {\n\t\t\t\treleaseMessage:
|
||||
releaseBasicMessage\n\t\t\t}\n\t\t\tif len(context.outputs.release.status.conditions)
|
||||
> 1 {\n\t\t\t\treleaseMessage: releaseBasicMessage + \", \" + context.outputs.release.status.conditions[1][\"message\"]\n\t\t\t}\n\t\t}\n\t}\n\n}\nmessage:
|
||||
repoMessage + \", \" + releaseMessage"
|
||||
healthPolicy: 'isHealth: len(context.outputs.release.status.conditions)
|
||||
!= 0 && context.outputs.release.status.conditions[0]["status"]=="True"'
|
||||
workload:
|
||||
type: autodetects.core.oam.dev
|
||||
- apiVersion: core.oam.dev/v1beta1
|
||||
kind: TraitDefinition
|
||||
metadata:
|
||||
annotations:
|
||||
definition.oam.dev/description: A list of JSON6902 patch to selected target
|
||||
name: kustomize-json-patch
|
||||
namespace: {{.Values.systemDefinitionNamespace}}
|
||||
spec:
|
||||
schematic:
|
||||
cue:
|
||||
template: "patch: {\n\tspec: {\n\t\tpatchesJson6902: parameter.patchesJson\n\t}\n}\n\nparameter:
|
||||
{\n\t// +usage=A list of JSON6902 patch.\n\tpatchesJson: [...#jsonPatchItem]\n}\n\n//
|
||||
+usage=Contains a JSON6902 patch\n#jsonPatchItem: {\n\ttarget: #selector\n\tpatch:
|
||||
[...{\n\t\t// +usage=operation to perform\n\t\top: string | \"add\"
|
||||
| \"remove\" | \"replace\" | \"move\" | \"copy\" | \"test\"\n\t\t//
|
||||
+usage=operate path e.g. /foo/bar\n\t\tpath: string\n\t\t// +usage=specify
|
||||
source path when op is copy/move\n\t\tfrom?: string\n\t\t// +usage=specify
|
||||
opraation value when op is test/add/replace\n\t\tvalue?: string\n\t}]\n}\n\n//
|
||||
+usage=Selector specifies a set of resources\n#selector: {\n\tgroup?:
|
||||
\ string\n\tversion?: string\n\tkind?: string\n\tnamespace?:
|
||||
\ string\n\tname?: string\n\tannotationSelector?:
|
||||
string\n\tlabelSelector?: string\n}\n"
|
||||
- apiVersion: core.oam.dev/v1beta1
|
||||
kind: TraitDefinition
|
||||
metadata:
|
||||
annotations:
|
||||
definition.oam.dev/description: A list of StrategicMerge or JSON6902 patch
|
||||
to selected target
|
||||
name: kustomize-patch
|
||||
namespace: {{.Values.systemDefinitionNamespace}}
|
||||
spec:
|
||||
schematic:
|
||||
cue:
|
||||
template: "patch: {\n\tspec: {\n\t\tpatches: parameter.patches\n\t}\n}\nparameter:
|
||||
{\n\t// +usage=a list of StrategicMerge or JSON6902 patch to selected
|
||||
target\n\tpatches: [...#patchItem]\n}\n\n// +usage=Contains a strategicMerge
|
||||
or JSON6902 patch\n#patchItem: {\n\t// +usage=Inline patch string,
|
||||
in yaml style\n\tpatch: string\n\t// +usage=Specify the target the
|
||||
patch should be applied to\n\ttarget: #selector\n}\n\n// +usage=Selector
|
||||
specifies a set of resources\n#selector: {\n\tgroup?: string\n\tversion?:
|
||||
\ string\n\tkind?: string\n\tnamespace?: string\n\tname?:
|
||||
\ string\n\tannotationSelector?: string\n\tlabelSelector?:
|
||||
\ string\n}\n"
|
||||
- apiVersion: core.oam.dev/v1beta1
|
||||
kind: ComponentDefinition
|
||||
metadata:
|
||||
annotations:
|
||||
definition.oam.dev/description: kustomize can fetching, building, updating
|
||||
and applying Kustomize manifests from git repo.
|
||||
name: kustomize
|
||||
namespace: {{.Values.systemDefinitionNamespace}}
|
||||
spec:
|
||||
schematic:
|
||||
cue:
|
||||
template: "output: {\n\tapiVersion: \"kustomize.toolkit.fluxcd.io/v1beta1\"\n\tkind:
|
||||
\ \"Kustomization\"\n\tmetadata: {\n\t\tname: context.name\n
|
||||
\ namespace: context.namespace\n\t}\n\tspec: {\n\t\tinterval: parameter.pullInterval\n\t\tsourceRef:
|
||||
{\n\t\t\tif parameter.repoType == \"git\" {\n\t\t\t\tkind: \"GitRepository\"\n\t\t\t}\n\t\t\tif
|
||||
parameter.repoType == \"oss\" {\n\t\t\t\tkind: \"Bucket\"\n\t\t\t}\n\t\t\tname:
|
||||
\ context.name\n\t\t\tnamespace: context.namespace\n\t\t}\n\t\tpath:
|
||||
\ parameter.path\n\t\tprune: true\n\t\tvalidation: \"client\"\n\t}\n}\n\noutputs:
|
||||
{\n repo: {\n\t apiVersion: \"source.toolkit.fluxcd.io/v1beta1\"\n\t
|
||||
\ metadata: {\n\t\t name: context.name\n namespace: context.namespace\n\t
|
||||
\ }\n\t if parameter.repoType == \"git\" {\n\t\t kind: \"GitRepository\"\n\t\t
|
||||
\ spec: {\n\t\t\t url: parameter.url\n\t\t\t if parameter.git.branch
|
||||
!= _|_ {\n\t\t\t\t ref: branch: parameter.git.branch\n\t\t\t }\n
|
||||
\ if parameter.git.provider != _|_ {\n if parameter.git.provider
|
||||
== \"GitHub\" {\n gitImplementation: \"go-git\"\n }\n
|
||||
\ if parameter.git.provider == \"AzureDevOps\" {\n gitImplementation:
|
||||
\"libgit2\"\n }\n }\n\t\t\t _secret\n\t\t\t _sourceCommonArgs\n\t\t
|
||||
\ }\n\t }\n\t if parameter.repoType == \"oss\" {\n\t\t kind: \"Bucket\"\n\t\t
|
||||
\ spec: {\n\t\t\t endpoint: parameter.url\n\t\t\t bucketName:
|
||||
parameter.oss.bucketName\n\t\t\t provider: parameter.oss.provider\n\t\t\t
|
||||
\ if parameter.oss.region != _|_ {\n\t\t\t\t region: parameter.oss.region\n\t\t\t
|
||||
\ }\n\t\t\t _secret\n\t\t\t _sourceCommonArgs\n\t\t }\n\t }\n
|
||||
\ }\n\n if parameter.imageRepository != _|_ {\n imageRepo: {\n
|
||||
\ apiVersion: \"image.toolkit.fluxcd.io/v1beta1\"\n kind:
|
||||
\"ImageRepository\"\n\t metadata: {\n\t\t name: context.name\n
|
||||
\ namespace: context.namespace\n\t }\n spec: {\n image:
|
||||
parameter.imageRepository.image\n interval: parameter.pullInterval\n
|
||||
\ if parameter.imageRepository.secretRef != _|_ {\n secretRef:
|
||||
name: parameter.imageRepository.secretRef\n }\n }\n }\n\n
|
||||
\ imagePolicy: {\n apiVersion: \"image.toolkit.fluxcd.io/v1beta1\"\n
|
||||
\ kind: \"ImagePolicy\"\n\t metadata: {\n\t\t name: context.name\n
|
||||
\ namespace: context.namespace\n\t }\n spec: {\n imageRepositoryRef:
|
||||
name: context.name\n policy: parameter.imageRepository.policy\n
|
||||
\ if parameter.imageRepository.filterTags != _|_ {\n filterTags:
|
||||
parameter.imageRepository.filterTags\n }\n }\n }\n\n
|
||||
\ imageUpdate: {\n apiVersion: \"image.toolkit.fluxcd.io/v1beta1\"\n
|
||||
\ kind: \"ImageUpdateAutomation\"\n\t metadata: {\n\t\t name:
|
||||
context.name\n namespace: context.namespace\n\t }\n spec:
|
||||
{\n interval: parameter.pullInterval\n sourceRef: {\n
|
||||
\ kind: \"GitRepository\"\n name: context.name\n
|
||||
\ }\n git: {\n checkout: ref: branch: parameter.git.branch\n
|
||||
\ commit: {\n author: {\n email: \"kubevelabot@users.noreply.github.com\"\n
|
||||
\ name: \"kubevelabot\"\n }\n if
|
||||
parameter.imageRepository.commitMessage != _|_ {\n messageTemplate:
|
||||
\"Update image automatically.\\n\" + parameter.imageRepository.commitMessage\n
|
||||
\ }\n if parameter.imageRepository.commitMessage
|
||||
== _|_ {\n messageTemplate: \"Update image automatically.\"\n
|
||||
\ }\n }\n push: branch: parameter.git.branch\n
|
||||
\ }\n update: {\n path:\tparameter.path\n strategy:
|
||||
\"Setters\"\n }\n }\n }\n }\n}\n\n_secret: {\n\tif
|
||||
parameter.secretRef != _|_ {\n\t\tsecretRef: {\n\t\t\tname: parameter.secretRef\n\t\t}\n\t}\n}\n\n_sourceCommonArgs:
|
||||
{\n\tinterval: parameter.pullInterval\n\tif parameter.timeout != _|_
|
||||
{\n\t\ttimeout: parameter.timeout\n\t}\n}\n\nparameter: {\n\trepoType:
|
||||
*\"git\" | \"oss\"\n // +usage=The image repository for automatically
|
||||
update image to git\n imageRepository?: {\n // +usage=The image
|
||||
url\n image: string\n // +usage=The name of the secret containing
|
||||
authentication credentials\n secretRef?: string\n // +usage=Policy
|
||||
gives the particulars of the policy to be followed in selecting the
|
||||
most recent image.\n policy: {\n // +usage=Alphabetical set
|
||||
of rules to use for alphabetical ordering of the tags.\n alphabetical?:
|
||||
{\n // +usage=Order specifies the sorting order of the tags.\n
|
||||
\ // +usage=Given the letters of the alphabet as tags, ascending
|
||||
order would select Z, and descending order would select A.\n order?:
|
||||
\"asc\" | \"desc\"\n }\n // +usage=Numerical set of rules
|
||||
to use for numerical ordering of the tags.\n numerical?: {\n
|
||||
\ // +usage=Order specifies the sorting order of the tags.\n
|
||||
\ // +usage=Given the integer values from 0 to 9 as tags, ascending
|
||||
order would select 9, and descending order would select 0.\n order:
|
||||
\"asc\" | \"desc\"\n }\n // +usage=SemVer gives a semantic
|
||||
version range to check against the tags available.\n semver?:
|
||||
{\n // +usage=Range gives a semver range for the image tag;
|
||||
the highest version within the range that's a tag yields the latest
|
||||
image.\n range: string\n }\n }\n // +usage=FilterTags
|
||||
enables filtering for only a subset of tags based on a set of rules.
|
||||
If no rules are provided, all the tags from the repository will be
|
||||
ordered and compared.\n filterTags?: {\n // +usage=Extract
|
||||
allows a capture group to be extracted from the specified regular
|
||||
expression pattern, useful before tag evaluation.\n extract?:
|
||||
string\n // +usage=Pattern specifies a regular expression pattern
|
||||
used to filter for image tags.\n pattern?: string\n }\n //
|
||||
+usage=The image url\n commitMessage?: string\n }\n\t// +usage=The
|
||||
interval at which to check for repository/bucket and release updates,
|
||||
default to 5m\n\tpullInterval: *\"5m\" | string\n\t// +usage=The Git
|
||||
or Helm repository URL, OSS endpoint, accept HTTP/S or SSH address
|
||||
as git url,\n\turl: string\n\t// +usage=The name of the secret containing
|
||||
authentication credentials\n\tsecretRef?: string\n\t// +usage=The
|
||||
timeout for operations like download index/clone repository, optional\n\ttimeout?:
|
||||
string\n\tgit?: {\n\t\t// +usage=The Git reference to checkout and
|
||||
monitor for changes, defaults to master branch\n\t\tbranch: string\n
|
||||
\ // +usage=Determines which git client library to use. Defaults
|
||||
to GitHub, it will pick go-git. AzureDevOps will pick libgit2.\n provider?:
|
||||
*\"GitHub\" | \"AzureDevOps\"\n\t}\n\toss?: {\n\t\t// +usage=The bucket's
|
||||
name, required if repoType is oss\n\t\tbucketName: string\n\t\t//
|
||||
+usage=\"generic\" for Minio, Amazon S3, Google Cloud Storage, Alibaba
|
||||
Cloud OSS, \"aws\" for retrieve credentials from the EC2 service when
|
||||
credentials not specified, default \"generic\"\n\t\tprovider: *\"generic\"
|
||||
| \"aws\"\n\t\t// +usage=The bucket region, optional\n\t\tregion?:
|
||||
string\n\t}\n\t//+usage=Path to the directory containing the kustomization.yaml
|
||||
file, or the set of plain YAMLs a kustomization.yaml should be generated
|
||||
for.\n\tpath: string\n}"
|
||||
workload:
|
||||
type: autodetects.core.oam.dev
|
||||
- apiVersion: core.oam.dev/v1beta1
|
||||
kind: TraitDefinition
|
||||
metadata:
|
||||
annotations:
|
||||
definition.oam.dev/description: A list of strategic merge to kustomize
|
||||
config
|
||||
name: kustomize-strategy-merge
|
||||
namespace: {{.Values.systemDefinitionNamespace}}
|
||||
spec:
|
||||
schematic:
|
||||
cue:
|
||||
template: "patch: {\n\tspec: {\n\t\tpatchesStrategicMerge: parameter.patchesStrategicMerge\n\t}\n}\n\nparameter:
|
||||
{\n\t// +usage=a list of strategicmerge, defined as inline yaml objects.\n\tpatchesStrategicMerge:
|
||||
[...#nestedmap]\n}\n\n#nestedmap: {\n\t...\n}\n"
|
||||
type: k8s-objects
|
||||
|
||||
{{- end }}
|
||||
4986
charts/vela-core/templates/addon/fluxcd.yaml
Normal file
4986
charts/vela-core/templates/addon/fluxcd.yaml
Normal file
File diff suppressed because it is too large
Load Diff
@@ -2,7 +2,7 @@ apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: vela-addon-registry
|
||||
namespace: vela-system
|
||||
namespace: {{ .Release.Namespace }}
|
||||
data:
|
||||
registries: '{
|
||||
"KubeVela":{
|
||||
@@ -13,4 +13,4 @@ data:
|
||||
"path": ""
|
||||
}
|
||||
}
|
||||
}'
|
||||
}'
|
||||
|
||||
@@ -5,8 +5,6 @@ kind: TraitDefinition
|
||||
metadata:
|
||||
annotations:
|
||||
definition.oam.dev/description: Rollout the component.
|
||||
labels:
|
||||
custom.definition.oam.dev/ui-hidden: "true"
|
||||
name: rollout
|
||||
namespace: {{.Values.systemDefinitionNamespace}}
|
||||
spec:
|
||||
@@ -22,8 +20,13 @@ spec:
|
||||
namespace: context.namespace
|
||||
}
|
||||
spec: {
|
||||
targetRevisionName: parameter.targetRevision
|
||||
componentName: context.name
|
||||
if parameter.targetRevision != _|_ {
|
||||
targetRevisionName: parameter.targetRevision
|
||||
}
|
||||
if parameter.targetRevision == _|_ {
|
||||
targetRevisionName: context.revision
|
||||
}
|
||||
componentName: context.name
|
||||
rolloutPlan: {
|
||||
rolloutStrategy: "IncreaseFirst"
|
||||
if parameter.rolloutBatches != _|_ {
|
||||
@@ -37,8 +40,8 @@ spec:
|
||||
}
|
||||
}
|
||||
parameter: {
|
||||
targetRevision: *context.revision | string
|
||||
targetSize: int
|
||||
targetRevision?: string
|
||||
targetSize: int
|
||||
rolloutBatches?: [...rolloutBatch]
|
||||
batchPartition?: int
|
||||
}
|
||||
|
||||
@@ -154,7 +154,7 @@ spec:
|
||||
}
|
||||
|
||||
if v.resources.requests.storage == _|_ {
|
||||
resources: requests: storage: "1Gi"
|
||||
resources: requests: storage: "8Gi"
|
||||
}
|
||||
if v.resources.requests.storage != _|_ {
|
||||
resources: requests: storage: v.resources.requests.storage
|
||||
|
||||
@@ -506,23 +506,49 @@ spec:
|
||||
import "strconv"
|
||||
ready: {
|
||||
if context.output.status.readyReplicas == _|_ {
|
||||
replica: "0"
|
||||
readyReplicas: 0
|
||||
}
|
||||
|
||||
if context.output.status.readyReplicas != _|_ {
|
||||
replica: strconv.FormatInt(context.output.status.readyReplicas, 10)
|
||||
readyReplicas: context.output.status.readyReplicas
|
||||
}
|
||||
}
|
||||
message: "Ready:" + ready.replica + "/" + strconv.FormatInt(context.output.spec.replicas, 10)
|
||||
|
||||
message: "Ready:" + strconv.FormatInt(ready.readyReplicas, 10) + "/" + strconv.FormatInt(context.output.spec.replicas, 10)
|
||||
healthPolicy: |-
|
||||
ready: {
|
||||
if context.output.status.readyReplicas == _|_ {
|
||||
replica: 0
|
||||
if context.output.status.updatedReplicas == _|_ {
|
||||
updatedReplicas : 0
|
||||
}
|
||||
|
||||
if context.output.status.updatedReplicas != _|_ {
|
||||
updatedReplicas : context.output.status.updatedReplicas
|
||||
}
|
||||
|
||||
if context.output.status.readyReplicas == _|_ {
|
||||
readyReplicas: 0
|
||||
}
|
||||
|
||||
if context.output.status.readyReplicas != _|_ {
|
||||
replica: context.output.status.readyReplicas
|
||||
readyReplicas: context.output.status.readyReplicas
|
||||
}
|
||||
|
||||
if context.output.status.replicas == _|_ {
|
||||
replicas: 0
|
||||
}
|
||||
if context.output.status.replicas != _|_ {
|
||||
replicas: context.output.status.replicas
|
||||
}
|
||||
|
||||
if context.output.status.observedGeneration != _|_ {
|
||||
observedGeneration: context.output.status.observedGeneration
|
||||
}
|
||||
|
||||
if context.output.status.observedGeneration == _|_ {
|
||||
observedGeneration: 0
|
||||
}
|
||||
}
|
||||
isHealth: context.output.spec.replicas == ready.replica
|
||||
isHealth: (context.output.spec.replicas == ready.readyReplicas) && (context.output.spec.replicas == ready.updatedReplicas) && (context.output.spec.replicas == ready.replicas) && (ready.observedGeneration == context.output.metadata.generation || ready.observedGeneration > context.output.metadata.generation)
|
||||
workload:
|
||||
definition:
|
||||
apiVersion: apps/v1
|
||||
|
||||
@@ -399,23 +399,49 @@ spec:
|
||||
import "strconv"
|
||||
ready: {
|
||||
if context.output.status.readyReplicas == _|_ {
|
||||
replica: "0"
|
||||
readyReplicas: 0
|
||||
}
|
||||
|
||||
if context.output.status.readyReplicas != _|_ {
|
||||
replica: strconv.FormatInt(context.output.status.readyReplicas, 10)
|
||||
readyReplicas: context.output.status.readyReplicas
|
||||
}
|
||||
}
|
||||
message: "Ready:" + ready.replica + "/" + strconv.FormatInt(context.output.spec.replicas, 10)
|
||||
|
||||
message: "Ready:" + strconv.FormatInt(ready.readyReplicas, 10) + "/" + strconv.FormatInt(context.output.spec.replicas, 10)
|
||||
healthPolicy: |-
|
||||
ready: {
|
||||
if context.output.status.readyReplicas == _|_ {
|
||||
replica: 0
|
||||
if context.output.status.updatedReplicas == _|_ {
|
||||
updatedReplicas : 0
|
||||
}
|
||||
|
||||
if context.output.status.updatedReplicas != _|_ {
|
||||
updatedReplicas : context.output.status.updatedReplicas
|
||||
}
|
||||
|
||||
if context.output.status.readyReplicas == _|_ {
|
||||
readyReplicas: 0
|
||||
}
|
||||
|
||||
if context.output.status.readyReplicas != _|_ {
|
||||
replica: context.output.status.readyReplicas
|
||||
readyReplicas: context.output.status.readyReplicas
|
||||
}
|
||||
|
||||
if context.output.status.replicas == _|_ {
|
||||
replicas: 0
|
||||
}
|
||||
if context.output.status.replicas != _|_ {
|
||||
replicas: context.output.status.replicas
|
||||
}
|
||||
|
||||
if context.output.status.observedGeneration != _|_ {
|
||||
observedGeneration: context.output.status.observedGeneration
|
||||
}
|
||||
|
||||
if context.output.status.observedGeneration == _|_ {
|
||||
observedGeneration: 0
|
||||
}
|
||||
}
|
||||
isHealth: context.output.spec.replicas == ready.replica
|
||||
isHealth: (context.output.spec.replicas == ready.readyReplicas) && (context.output.spec.replicas == ready.updatedReplicas) && (context.output.spec.replicas == ready.replicas) && (ready.observedGeneration == context.output.metadata.generation || ready.observedGeneration > context.output.metadata.generation)
|
||||
workload:
|
||||
definition:
|
||||
apiVersion: apps/v1
|
||||
|
||||
@@ -120,10 +120,8 @@ spec:
|
||||
- "--use-webhook=true"
|
||||
- "--webhook-port={{ .Values.webhookService.port }}"
|
||||
- "--webhook-cert-dir={{ .Values.admissionWebhooks.certificate.mountPath }}"
|
||||
- "--autogen-workload-definition={{ .Values.admissionWebhooks.autoGenWorkloadDefinition }}"
|
||||
{{ end }}
|
||||
- "--health-addr=:{{ .Values.healthCheck.port }}"
|
||||
- "--apply-once-only={{ .Values.applyOnceOnly }}"
|
||||
{{ if ne .Values.disableCaps "" }}
|
||||
- "--disable-caps={{ .Values.disableCaps }}"
|
||||
{{ end }}
|
||||
@@ -135,6 +133,9 @@ spec:
|
||||
- "--enable-cluster-gateway"
|
||||
{{ end }}
|
||||
- "--application-re-sync-period={{ .Values.controllerArgs.reSyncPeriod }}"
|
||||
- "--concurrent-reconciles={{ .Values.concurrentReconciles }}"
|
||||
- "--kube-api-qps={{ .Values.kubeClient.qps }}"
|
||||
- "--kube-api-burst={{ .Values.kubeClient.burst }}"
|
||||
image: {{ .Values.imageRegistry }}{{ .Values.image.repository }}:{{ .Values.image.tag }}
|
||||
imagePullPolicy: {{ quote .Values.image.pullPolicy }}
|
||||
resources:
|
||||
|
||||
@@ -5,6 +5,7 @@ metadata:
|
||||
helm.sh/hook: test-success
|
||||
helm.sh/hook-delete-policy: hook-succeeded
|
||||
name: helm-test-vela-app
|
||||
namespace: {{.Values.systemDefinitionNamespace}}
|
||||
spec:
|
||||
components:
|
||||
- name: helm-test-express-server
|
||||
@@ -23,11 +24,12 @@ apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: "{{ .Release.Name }}-application-test"
|
||||
namespace: {{.Values.systemDefinitionNamespace}}
|
||||
annotations:
|
||||
"helm.sh/hook": test
|
||||
helm.sh/hook-delete-policy: hook-succeeded
|
||||
spec:
|
||||
serviceAccountName: kubevela-vela-core
|
||||
serviceAccountName: {{ include "kubevela.serviceAccountName" . }}
|
||||
containers:
|
||||
- name: {{ .Release.Name }}-application-test
|
||||
image: {{ .Values.imageRegistry }}{{ .Values.test.k8s.repository }}:{{ .Values.test.k8s.tag }}
|
||||
@@ -42,16 +44,16 @@ spec:
|
||||
echo "Waiting application is ready..."
|
||||
|
||||
echo "waiting for application being Ready"
|
||||
kubectl -n vela-system wait --for=condition=Ready applications.core.oam.dev helm-test-vela-app --timeout=3m
|
||||
kubectl -n {{.Values.systemDefinitionNamespace}} wait --for=condition=Ready applications.core.oam.dev helm-test-vela-app --timeout=3m
|
||||
echo "application is Ready"
|
||||
|
||||
# wait for deploy being created
|
||||
echo "waiting for deployment being available"
|
||||
kubectl -n vela-system wait --for=condition=available deployments helm-test-express-server --timeout 3m
|
||||
kubectl -n {{.Values.systemDefinitionNamespace}} wait --for=condition=available deployments helm-test-express-server --timeout 3m
|
||||
echo "deployment being available"
|
||||
|
||||
# wait for ingress being created
|
||||
while ! [ `kubectl -n vela-system get ing helm-test-express-server | grep -v NAME | wc -l` = 1 ]; do
|
||||
while ! [ `kubectl -n {{.Values.systemDefinitionNamespace}} get ing helm-test-express-server | grep -v NAME | wc -l` = 1 ]; do
|
||||
echo "waiting for ingress being created"
|
||||
sleep 1
|
||||
done
|
||||
@@ -59,4 +61,4 @@ spec:
|
||||
|
||||
|
||||
echo "Application and its components are created"
|
||||
restartPolicy: Never
|
||||
restartPolicy: Never
|
||||
|
||||
@@ -3,8 +3,6 @@
|
||||
# Declare variables to be passed into your templates.
|
||||
|
||||
replicaCount: 1
|
||||
# Valid applyOnceOnly values: true/false/on/off/force
|
||||
applyOnceOnly: "off"
|
||||
|
||||
disableCaps: ""
|
||||
|
||||
@@ -81,8 +79,6 @@ admissionWebhooks:
|
||||
certManager:
|
||||
enabled: false
|
||||
revisionHistoryLimit: 3
|
||||
# If autoGenWorkloadDefinition is true, webhook will auto generated workloadDefinition which componentDefinition refers to
|
||||
autoGenWorkloadDefinition: true
|
||||
|
||||
#Enable debug logs for development purpose
|
||||
logDebug: false
|
||||
@@ -103,6 +99,12 @@ definitionRevisionLimit: 20
|
||||
# concurrentReconciles is the concurrent reconcile number of the controller
|
||||
concurrentReconciles: 4
|
||||
|
||||
kubeClient:
|
||||
# the qps for reconcile clients, default is 50
|
||||
qps: 50
|
||||
# the burst for reconcile clients, default is 100
|
||||
burst: 100
|
||||
|
||||
# dependCheckWait is the time to wait for ApplicationConfiguration's dependent-resource ready
|
||||
dependCheckWait: 30s
|
||||
|
||||
@@ -117,7 +119,7 @@ multicluster:
|
||||
image:
|
||||
repository: oamdev/cluster-gateway
|
||||
tag: v1.1.7
|
||||
pullPolicy: Always
|
||||
pullPolicy: IfNotPresent
|
||||
resources:
|
||||
limits:
|
||||
cpu: 100m
|
||||
@@ -132,4 +134,6 @@ test:
|
||||
tag: v1
|
||||
k8s:
|
||||
repository: oamdev/alpine-k8s
|
||||
tag: 1.18.2
|
||||
tag: 1.18.2
|
||||
|
||||
enableFluxcdAddon: false
|
||||
@@ -5,8 +5,6 @@ kind: TraitDefinition
|
||||
metadata:
|
||||
annotations:
|
||||
definition.oam.dev/description: Rollout the component.
|
||||
labels:
|
||||
custom.definition.oam.dev/ui-hidden: "true"
|
||||
name: rollout
|
||||
namespace: {{.Values.systemDefinitionNamespace}}
|
||||
spec:
|
||||
@@ -22,8 +20,13 @@ spec:
|
||||
namespace: context.namespace
|
||||
}
|
||||
spec: {
|
||||
targetRevisionName: parameter.targetRevision
|
||||
componentName: context.name
|
||||
if parameter.targetRevision != _|_ {
|
||||
targetRevisionName: parameter.targetRevision
|
||||
}
|
||||
if parameter.targetRevision == _|_ {
|
||||
targetRevisionName: context.revision
|
||||
}
|
||||
componentName: context.name
|
||||
rolloutPlan: {
|
||||
rolloutStrategy: "IncreaseFirst"
|
||||
if parameter.rolloutBatches != _|_ {
|
||||
@@ -37,8 +40,8 @@ spec:
|
||||
}
|
||||
}
|
||||
parameter: {
|
||||
targetRevision: *context.revision | string
|
||||
targetSize: int
|
||||
targetRevision?: string
|
||||
targetSize: int
|
||||
rolloutBatches?: [...rolloutBatch]
|
||||
batchPartition?: int
|
||||
}
|
||||
|
||||
@@ -154,7 +154,7 @@ spec:
|
||||
}
|
||||
|
||||
if v.resources.requests.storage == _|_ {
|
||||
resources: requests: storage: "1Gi"
|
||||
resources: requests: storage: "8Gi"
|
||||
}
|
||||
if v.resources.requests.storage != _|_ {
|
||||
resources: requests: storage: v.resources.requests.storage
|
||||
|
||||
@@ -506,23 +506,49 @@ spec:
|
||||
import "strconv"
|
||||
ready: {
|
||||
if context.output.status.readyReplicas == _|_ {
|
||||
replica: "0"
|
||||
readyReplicas: 0
|
||||
}
|
||||
|
||||
if context.output.status.readyReplicas != _|_ {
|
||||
replica: strconv.FormatInt(context.output.status.readyReplicas, 10)
|
||||
readyReplicas: context.output.status.readyReplicas
|
||||
}
|
||||
}
|
||||
message: "Ready:" + ready.replica + "/" + strconv.FormatInt(context.output.spec.replicas, 10)
|
||||
|
||||
message: "Ready:" + strconv.FormatInt(ready.readyReplicas, 10) + "/" + strconv.FormatInt(context.output.spec.replicas, 10)
|
||||
healthPolicy: |-
|
||||
ready: {
|
||||
if context.output.status.readyReplicas == _|_ {
|
||||
replica: 0
|
||||
if context.output.status.updatedReplicas == _|_ {
|
||||
updatedReplicas : 0
|
||||
}
|
||||
|
||||
if context.output.status.updatedReplicas != _|_ {
|
||||
updatedReplicas : context.output.status.updatedReplicas
|
||||
}
|
||||
|
||||
if context.output.status.readyReplicas == _|_ {
|
||||
readyReplicas: 0
|
||||
}
|
||||
|
||||
if context.output.status.readyReplicas != _|_ {
|
||||
replica: context.output.status.readyReplicas
|
||||
readyReplicas: context.output.status.readyReplicas
|
||||
}
|
||||
|
||||
if context.output.status.replicas == _|_ {
|
||||
replicas: 0
|
||||
}
|
||||
if context.output.status.replicas != _|_ {
|
||||
replicas: context.output.status.replicas
|
||||
}
|
||||
|
||||
if context.output.status.observedGeneration != _|_ {
|
||||
observedGeneration: context.output.status.observedGeneration
|
||||
}
|
||||
|
||||
if context.output.status.observedGeneration == _|_ {
|
||||
observedGeneration: 0
|
||||
}
|
||||
}
|
||||
isHealth: context.output.spec.replicas == ready.replica
|
||||
isHealth: (context.output.spec.replicas == ready.readyReplicas) && (context.output.spec.replicas == ready.updatedReplicas) && (context.output.spec.replicas == ready.replicas) && (ready.observedGeneration == context.output.metadata.generation || ready.observedGeneration > context.output.metadata.generation)
|
||||
workload:
|
||||
definition:
|
||||
apiVersion: apps/v1
|
||||
|
||||
@@ -399,23 +399,49 @@ spec:
|
||||
import "strconv"
|
||||
ready: {
|
||||
if context.output.status.readyReplicas == _|_ {
|
||||
replica: "0"
|
||||
readyReplicas: 0
|
||||
}
|
||||
|
||||
if context.output.status.readyReplicas != _|_ {
|
||||
replica: strconv.FormatInt(context.output.status.readyReplicas, 10)
|
||||
readyReplicas: context.output.status.readyReplicas
|
||||
}
|
||||
}
|
||||
message: "Ready:" + ready.replica + "/" + strconv.FormatInt(context.output.spec.replicas, 10)
|
||||
|
||||
message: "Ready:" + strconv.FormatInt(ready.readyReplicas, 10) + "/" + strconv.FormatInt(context.output.spec.replicas, 10)
|
||||
healthPolicy: |-
|
||||
ready: {
|
||||
if context.output.status.readyReplicas == _|_ {
|
||||
replica: 0
|
||||
if context.output.status.updatedReplicas == _|_ {
|
||||
updatedReplicas : 0
|
||||
}
|
||||
|
||||
if context.output.status.updatedReplicas != _|_ {
|
||||
updatedReplicas : context.output.status.updatedReplicas
|
||||
}
|
||||
|
||||
if context.output.status.readyReplicas == _|_ {
|
||||
readyReplicas: 0
|
||||
}
|
||||
|
||||
if context.output.status.readyReplicas != _|_ {
|
||||
replica: context.output.status.readyReplicas
|
||||
readyReplicas: context.output.status.readyReplicas
|
||||
}
|
||||
|
||||
if context.output.status.replicas == _|_ {
|
||||
replicas: 0
|
||||
}
|
||||
if context.output.status.replicas != _|_ {
|
||||
replicas: context.output.status.replicas
|
||||
}
|
||||
|
||||
if context.output.status.observedGeneration != _|_ {
|
||||
observedGeneration: context.output.status.observedGeneration
|
||||
}
|
||||
|
||||
if context.output.status.observedGeneration == _|_ {
|
||||
observedGeneration: 0
|
||||
}
|
||||
}
|
||||
isHealth: context.output.spec.replicas == ready.replica
|
||||
isHealth: (context.output.spec.replicas == ready.readyReplicas) && (context.output.spec.replicas == ready.updatedReplicas) && (context.output.spec.replicas == ready.replicas) && (ready.observedGeneration == context.output.metadata.generation || ready.observedGeneration > context.output.metadata.generation)
|
||||
workload:
|
||||
definition:
|
||||
apiVersion: apps/v1
|
||||
|
||||
@@ -136,6 +136,9 @@ spec:
|
||||
{{ if .Values.multicluster.enabled }}
|
||||
- "--enable-cluster-gateway"
|
||||
{{ end }}
|
||||
- "--concurrent-reconciles={{ .Values.concurrentReconciles }}"
|
||||
- "--kube-api-qps={{ .Values.kubeClient.qps }}"
|
||||
- "--kube-api-burst={{ .Values.kubeClient.burst }}"
|
||||
image: {{ .Values.imageRegistry }}{{ .Values.image.repository }}:{{ .Values.image.tag }}
|
||||
imagePullPolicy: {{ quote .Values.image.pullPolicy }}
|
||||
resources:
|
||||
|
||||
@@ -105,6 +105,12 @@ dependCheckWait: 30s
|
||||
# OAMSpecVer is the oam spec version controller want to setup
|
||||
OAMSpecVer: "minimal"
|
||||
|
||||
kubeClient:
|
||||
# the qps for reconcile clients, default is 50
|
||||
qps: 50
|
||||
# the burst for reconcile clients, default is 100
|
||||
burst: 100
|
||||
|
||||
multicluster:
|
||||
enabled: false
|
||||
clusterGateway:
|
||||
|
||||
@@ -30,6 +30,8 @@ import (
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/oam-dev/kubevela/pkg/utils/util"
|
||||
|
||||
"k8s.io/klog/v2"
|
||||
"k8s.io/klog/v2/klogr"
|
||||
ctrl "sigs.k8s.io/controller-runtime"
|
||||
@@ -46,6 +48,7 @@ import (
|
||||
"github.com/oam-dev/kubevela/pkg/multicluster"
|
||||
"github.com/oam-dev/kubevela/pkg/oam"
|
||||
"github.com/oam-dev/kubevela/pkg/oam/discoverymapper"
|
||||
"github.com/oam-dev/kubevela/pkg/resourcekeeper"
|
||||
"github.com/oam-dev/kubevela/pkg/utils/common"
|
||||
"github.com/oam-dev/kubevela/pkg/utils/system"
|
||||
oamwebhook "github.com/oam-dev/kubevela/pkg/webhook/core.oam.dev"
|
||||
@@ -130,7 +133,9 @@ func main() {
|
||||
"The duration the LeaderElector clients should wait between tries of actions")
|
||||
flag.BoolVar(&enableClusterGateway, "enable-cluster-gateway", false, "Enable cluster-gateway to use multicluster, disabled by default.")
|
||||
flag.BoolVar(&controllerArgs.EnableCompatibility, "enable-asi-compatibility", false, "enable compatibility for asi")
|
||||
flag.BoolVar(&controllerArgs.IgnoreAppWithoutControllerRequirement, "ignore-app-without-controller-version", false, "If true, application controller will not process the app without 'app.oam.dev/controller-version-require' annotation")
|
||||
standardcontroller.AddOptimizeFlags()
|
||||
flag.IntVar(&resourcekeeper.MaxDispatchConcurrent, "max-dispatch-concurrent", 10, "Set the max dispatch concurrent number, default is 10")
|
||||
|
||||
flag.Parse()
|
||||
// setup logging
|
||||
@@ -191,17 +196,19 @@ func main() {
|
||||
// wrapper the round tripper by multi cluster rewriter
|
||||
if enableClusterGateway {
|
||||
if _, err := multicluster.Initialize(restConfig, true); err != nil {
|
||||
klog.ErrorS(err, "failed to enable multicluster")
|
||||
klog.ErrorS(err, "failed to enable multi-cluster capability")
|
||||
os.Exit(1)
|
||||
}
|
||||
}
|
||||
ctrl.SetLogger(klogr.New())
|
||||
|
||||
leaderElectionID := util.GenerateLeaderElectionID(kubevelaName, controllerArgs.IgnoreAppWithoutControllerRequirement)
|
||||
mgr, err := ctrl.NewManager(restConfig, ctrl.Options{
|
||||
Scheme: scheme,
|
||||
MetricsBindAddress: metricsAddr,
|
||||
LeaderElection: enableLeaderElection,
|
||||
LeaderElectionNamespace: leaderElectionNamespace,
|
||||
LeaderElectionID: kubevelaName,
|
||||
LeaderElectionID: leaderElectionID,
|
||||
Port: webhookPort,
|
||||
CertDir: certDir,
|
||||
HealthProbeBindAddress: healthAddr,
|
||||
|
||||
@@ -4,7 +4,7 @@ This guide helps you get started developing KubeVela.
|
||||
|
||||
## Prerequisites
|
||||
|
||||
1. Golang version 1.16+
|
||||
1. Golang version 1.17+
|
||||
2. Kubernetes version v1.18+ with `~/.kube/config` configured.
|
||||
3. ginkgo 1.14.0+ (just for [E2E test](./developer-guide.md#e2e-test))
|
||||
4. golangci-lint 1.38.0+, it will install automatically if you run `make`, you can [install it manually](https://golangci-lint.run/usage/install/#local-installation) if the installation is too slow.
|
||||
@@ -15,6 +15,7 @@ This guide helps you get started developing KubeVela.
|
||||
<summary>Install Kubebuilder manually</summary>
|
||||
|
||||
linux:
|
||||
|
||||
```
|
||||
wget https://storage.googleapis.com/kubebuilder-tools/kubebuilder-tools-1.21.2-linux-amd64.tar.gz
|
||||
tar -zxvf kubebuilder-tools-1.21.2-linux-amd64.tar.gz
|
||||
@@ -23,6 +24,7 @@ sudo mv kubebuilder/bin/* /usr/local/kubebuilder/bin
|
||||
```
|
||||
|
||||
macOS:
|
||||
|
||||
```
|
||||
wget https://storage.googleapis.com/kubebuilder-tools/kubebuilder-tools-1.21.2-darwin-amd64.tar.gz
|
||||
tar -zxvf kubebuilder-tools-1.21.2-darwin-amd64.tar.gz
|
||||
@@ -30,14 +32,15 @@ mkdir -p /usr/local/kubebuilder/bin
|
||||
sudo mv kubebuilder/bin/* /usr/local/kubebuilder/bin
|
||||
```
|
||||
|
||||
For other OS or system architecture, please refer to https://storage.googleapis.com/kubebuilder-tools/
|
||||
For other OS or system architecture, please refer to https://storage.googleapis.com/kubebuilder-tools/
|
||||
|
||||
</details>
|
||||
|
||||
You may also be interested with KubeVela's [design](https://github.com/oam-dev/kubevela/tree/master/design/vela-core) before diving into its code.
|
||||
|
||||
## Build
|
||||
|
||||
* Clone this project
|
||||
- Clone this project
|
||||
|
||||
```shell script
|
||||
git clone git@github.com:oam-dev/kubevela.git
|
||||
@@ -50,7 +53,7 @@ KubeVela includes two parts, `vela core` and `vela cli`.
|
||||
|
||||
For local development, we probably need to build both of them.
|
||||
|
||||
* Build Vela CLI
|
||||
- Build Vela CLI
|
||||
|
||||
```shell script
|
||||
make
|
||||
@@ -58,7 +61,7 @@ make
|
||||
|
||||
After the vela cli built successfully, `make` command will create `vela` binary to `bin/` under the project.
|
||||
|
||||
* Configure `vela` binary to System PATH
|
||||
- Configure `vela` binary to System PATH
|
||||
|
||||
```shell script
|
||||
export PATH=$PATH:/your/path/to/project/kubevela/bin
|
||||
@@ -66,13 +69,13 @@ export PATH=$PATH:/your/path/to/project/kubevela/bin
|
||||
|
||||
Then you can use `vela` command directly.
|
||||
|
||||
* Build Vela Core
|
||||
- Build Vela Core
|
||||
|
||||
```shell script
|
||||
make manager
|
||||
```
|
||||
|
||||
* Run Vela Core
|
||||
- Run Vela Core
|
||||
|
||||
Firstly make sure your cluster has CRDs, below is the command that can help install all CRDs.
|
||||
|
||||
@@ -82,11 +85,13 @@ make core-install
|
||||
|
||||
To ensure you have created vela-system namespace and install definitions of necessary module.
|
||||
you can run the command:
|
||||
|
||||
```shell script
|
||||
make def-install
|
||||
```
|
||||
|
||||
And then run locally:
|
||||
|
||||
```shell script
|
||||
make core-run
|
||||
```
|
||||
@@ -182,8 +187,7 @@ mv ~/.kube/config.save ~/.kube/config
|
||||
make e2e-apiserver-test
|
||||
```
|
||||
|
||||
|
||||
## Next steps
|
||||
|
||||
* Read our [code conventions](coding-conventions.md)
|
||||
* Learn how to [Create a pull request](create-pull-request.md)
|
||||
- Read our [code conventions](coding-conventions.md)
|
||||
- Learn how to [Create a pull request](create-pull-request.md)
|
||||
|
||||
@@ -80,6 +80,27 @@ var _ = Describe("Addon Test", func() {
|
||||
}, 60*time.Second).Should(Succeed())
|
||||
})
|
||||
|
||||
It("Test Change default namespace can work", func() {
|
||||
output, err := e2e.LongTimeExecWithEnv("vela addon list", 600*time.Second, []string{"DEFAULT_VELA_NS=test-vela"})
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
Expect(output).To(ContainSubstring("test-addon"))
|
||||
Expect(output).To(ContainSubstring("disabled"))
|
||||
|
||||
output, err = e2e.LongTimeExecWithEnv("vela addon enable test-addon", 600*time.Second, []string{"DEFAULT_VELA_NS=test-vela"})
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
Expect(output).To(ContainSubstring("enabled Successfully."))
|
||||
|
||||
Eventually(func(g Gomega) {
|
||||
g.Expect(k8sClient.Get(context.Background(), types.NamespacedName{Name: "addon-test-addon", Namespace: "test-vela"}, &v1beta1.Application{})).Should(BeNil())
|
||||
}, 60*time.Second).Should(Succeed())
|
||||
|
||||
output, err = e2e.LongTimeExecWithEnv("vela addon disable test-addon", 600*time.Second, []string{"DEFAULT_VELA_NS=test-vela"})
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
Expect(output).To(ContainSubstring("Successfully disable addon"))
|
||||
Eventually(func(g Gomega) {
|
||||
g.Expect(apierrors.IsNotFound(k8sClient.Get(context.Background(), types.NamespacedName{Name: "addon-test-addon", Namespace: "test-vela"}, &v1beta1.Application{}))).Should(BeTrue())
|
||||
}, 60*time.Second).Should(Succeed())
|
||||
})
|
||||
})
|
||||
|
||||
Context("Addon registry test", func() {
|
||||
|
||||
@@ -21,6 +21,8 @@ import (
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"sigs.k8s.io/yaml"
|
||||
@@ -63,12 +65,25 @@ func ApplyMockServerConfig() error {
|
||||
return err
|
||||
}
|
||||
|
||||
otherRegistry := cm.DeepCopy()
|
||||
|
||||
err = k8sClient.Get(ctx, types.NamespacedName{Name: cm.Name, Namespace: cm.Namespace}, &originCm)
|
||||
if err != nil && apierrors.IsNotFound(err) {
|
||||
err = k8sClient.Create(ctx, &cm)
|
||||
if err = k8sClient.Create(ctx, &cm); err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
cm.ResourceVersion = originCm.ResourceVersion
|
||||
err = k8sClient.Update(ctx, &cm)
|
||||
if err = k8sClient.Update(ctx, &cm); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return err
|
||||
if err := k8sClient.Create(ctx, &v1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: "test-vela"}}); err != nil {
|
||||
return err
|
||||
}
|
||||
otherRegistry.SetNamespace("test-vela")
|
||||
if err := k8sClient.Create(ctx, otherRegistry); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
16
e2e/cli.go
16
e2e/cli.go
@@ -80,6 +80,22 @@ func asyncExec(cli string) (*gexec.Session, error) {
|
||||
return session, err
|
||||
}
|
||||
|
||||
func LongTimeExecWithEnv(cli string, timeout time.Duration, env []string) (string, error) {
|
||||
var output []byte
|
||||
c := strings.Fields(cli)
|
||||
commandName := path.Join(rudrPath, c[0])
|
||||
command := exec.Command(commandName, c[1:]...)
|
||||
command.Env = os.Environ()
|
||||
command.Env = append(command.Env, env...)
|
||||
|
||||
session, err := gexec.Start(command, ginkgo.GinkgoWriter, ginkgo.GinkgoWriter)
|
||||
if err != nil {
|
||||
return string(output), err
|
||||
}
|
||||
s := session.Wait(timeout)
|
||||
return string(s.Out.Contents()) + string(s.Err.Contents()), nil
|
||||
}
|
||||
|
||||
// InteractiveExec executes a command with interactive input
|
||||
func InteractiveExec(cli string, consoleFn func(*expect.Console)) (string, error) {
|
||||
var output []byte
|
||||
|
||||
@@ -130,10 +130,11 @@ var _ = Describe("Test Kubectl Plugin", func() {
|
||||
Expect(output).Should(ContainSubstring(showTdResult))
|
||||
})
|
||||
It("Test show componentDefinition use Helm Charts as Workload", func() {
|
||||
cdName := "test-webapp-chart"
|
||||
output, err := e2e.Exec(fmt.Sprintf("kubectl-vela show %s -n default", cdName))
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
Expect(output).Should(ContainSubstring("Properties"))
|
||||
Eventually(func() string {
|
||||
cdName := "test-webapp-chart"
|
||||
output, _ := e2e.Exec(fmt.Sprintf("kubectl-vela show %s -n default", cdName))
|
||||
return output
|
||||
}, 20*time.Second).Should(ContainSubstring("Properties"))
|
||||
})
|
||||
It("Test show componentDefinition def with raw Kube mode", func() {
|
||||
cdName := "kube-worker"
|
||||
|
||||
186
go.mod
186
go.mod
@@ -1,6 +1,6 @@
|
||||
module github.com/oam-dev/kubevela
|
||||
|
||||
go 1.16
|
||||
go 1.17
|
||||
|
||||
require (
|
||||
cuelang.org/go v0.2.2
|
||||
@@ -70,6 +70,7 @@ require (
|
||||
gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b
|
||||
gotest.tools v2.2.0+incompatible
|
||||
helm.sh/helm/v3 v3.6.1
|
||||
istio.io/api v0.0.0-20210128181506-0c4b8e54850f // indirect
|
||||
istio.io/client-go v0.0.0-20210128182905-ee2edd059e02
|
||||
k8s.io/api v0.22.1
|
||||
k8s.io/apiextensions-apiserver v0.22.1
|
||||
@@ -90,7 +91,190 @@ require (
|
||||
sigs.k8s.io/yaml v1.2.0
|
||||
)
|
||||
|
||||
require (
|
||||
cloud.google.com/go v0.81.0 // indirect
|
||||
github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 // indirect
|
||||
github.com/Azure/go-autorest v14.2.0+incompatible // indirect
|
||||
github.com/Azure/go-autorest/autorest v0.11.18 // indirect
|
||||
github.com/Azure/go-autorest/autorest/adal v0.9.13 // indirect
|
||||
github.com/Azure/go-autorest/autorest/date v0.3.0 // indirect
|
||||
github.com/Azure/go-autorest/logger v0.2.1 // indirect
|
||||
github.com/Azure/go-autorest/tracing v0.6.0 // indirect
|
||||
github.com/BurntSushi/toml v0.3.1 // indirect
|
||||
github.com/MakeNowJust/heredoc v0.0.0-20170808103936-bb23615498cd // indirect
|
||||
github.com/Masterminds/goutils v1.1.1 // indirect
|
||||
github.com/Masterminds/semver v1.5.0 // indirect
|
||||
github.com/Masterminds/semver/v3 v3.1.1 // indirect
|
||||
github.com/Masterminds/sprig v2.22.0+incompatible // indirect
|
||||
github.com/Masterminds/sprig/v3 v3.2.2 // indirect
|
||||
github.com/Masterminds/squirrel v1.5.0 // indirect
|
||||
github.com/Microsoft/go-winio v0.4.16 // indirect
|
||||
github.com/Microsoft/hcsshim v0.8.14 // indirect
|
||||
github.com/PuerkitoBio/purell v1.1.1 // indirect
|
||||
github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578 // indirect
|
||||
github.com/agext/levenshtein v1.2.2 // indirect
|
||||
github.com/alessio/shellescape v1.2.2 // indirect
|
||||
github.com/alibabacloud-go/debug v0.0.0-20190504072949-9472017b5c68 // indirect
|
||||
github.com/alibabacloud-go/endpoint-util v1.1.0 // indirect
|
||||
github.com/alibabacloud-go/openapi-util v0.0.7 // indirect
|
||||
github.com/alibabacloud-go/tea-utils v1.3.9 // indirect
|
||||
github.com/aliyun/credentials-go v1.1.2 // indirect
|
||||
github.com/apparentlymart/go-textseg/v13 v13.0.0 // indirect
|
||||
github.com/asaskevich/govalidator v0.0.0-20200428143746-21a406dcc535 // indirect
|
||||
github.com/aws/aws-sdk-go v1.36.30 // indirect
|
||||
github.com/beorn7/perks v1.0.1 // indirect
|
||||
github.com/blang/semver v3.5.1+incompatible // indirect
|
||||
github.com/cespare/xxhash/v2 v2.1.1 // indirect
|
||||
github.com/chai2010/gettext-go v0.0.0-20160711120539-c6fed771bfd5 // indirect
|
||||
github.com/cockroachdb/apd/v2 v2.0.1 // indirect
|
||||
github.com/containerd/cgroups v0.0.0-20200531161412-0dbf7f05ba59 // indirect
|
||||
github.com/containerd/continuity v0.0.0-20201208142359-180525291bb7 // indirect
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.0 // indirect
|
||||
github.com/creack/pty v1.1.11 // indirect
|
||||
github.com/cyphar/filepath-securejoin v0.2.2 // indirect
|
||||
github.com/deislabs/oras v0.11.1 // indirect
|
||||
github.com/docker/cli v20.10.5+incompatible // indirect
|
||||
github.com/docker/distribution v2.8.0-beta.1+incompatible // indirect
|
||||
github.com/docker/docker v17.12.0-ce-rc1.0.20200618181300-9dc6525e6118+incompatible // indirect
|
||||
github.com/docker/docker-credential-helpers v0.6.3 // indirect
|
||||
github.com/docker/go-connections v0.4.0 // indirect
|
||||
github.com/docker/go-metrics v0.0.0-20180209012529-399ea8c73916 // indirect
|
||||
github.com/docker/go-units v0.4.0 // indirect
|
||||
github.com/emicklei/go-restful v2.9.5+incompatible // indirect
|
||||
github.com/emicklei/proto v1.6.15 // indirect
|
||||
github.com/emirpasic/gods v1.12.0 // indirect
|
||||
github.com/evanphx/json-patch/v5 v5.1.0 // indirect
|
||||
github.com/exponent-io/jsonpath v0.0.0-20151013193312-d6023ce2651d // indirect
|
||||
github.com/fatih/camelcase v1.0.0 // indirect
|
||||
github.com/form3tech-oss/jwt-go v3.2.3+incompatible // indirect
|
||||
github.com/ghodss/yaml v1.0.0 // indirect
|
||||
github.com/go-errors/errors v1.0.1 // indirect
|
||||
github.com/go-logr/zapr v0.4.0 // indirect
|
||||
github.com/go-openapi/jsonpointer v0.19.5 // indirect
|
||||
github.com/go-openapi/jsonreference v0.19.5 // indirect
|
||||
github.com/go-openapi/swag v0.19.14 // indirect
|
||||
github.com/go-playground/locales v0.14.0 // indirect
|
||||
github.com/go-playground/universal-translator v0.18.0 // indirect
|
||||
github.com/go-stack/stack v1.8.0 // indirect
|
||||
github.com/gobuffalo/flect v0.2.3 // indirect
|
||||
github.com/gobwas/glob v0.2.3 // indirect
|
||||
github.com/gogo/protobuf v1.3.2 // indirect
|
||||
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect
|
||||
github.com/golang/protobuf v1.5.2 // indirect
|
||||
github.com/golang/snappy v0.0.1 // indirect
|
||||
github.com/google/btree v1.0.1 // indirect
|
||||
github.com/google/go-querystring v1.0.0 // indirect
|
||||
github.com/google/gofuzz v1.1.0 // indirect
|
||||
github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 // indirect
|
||||
github.com/googleapis/gnostic v0.5.5 // indirect
|
||||
github.com/gorilla/mux v1.8.0 // indirect
|
||||
github.com/gregjones/httpcache v0.0.0-20190611155906-901d90724c79 // indirect
|
||||
github.com/hashicorp/hcl v1.0.0 // indirect
|
||||
github.com/huandu/xstrings v1.3.2 // indirect
|
||||
github.com/inconshreveable/mousetrap v1.0.0 // indirect
|
||||
github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99 // indirect
|
||||
github.com/jmespath/go-jmespath v0.4.0 // indirect
|
||||
github.com/jmoiron/sqlx v1.3.1 // indirect
|
||||
github.com/josharian/intern v1.0.0 // indirect
|
||||
github.com/json-iterator/go v1.1.11 // indirect
|
||||
github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51 // indirect
|
||||
github.com/kevinburke/ssh_config v0.0.0-20201106050909-4977a11b4351 // indirect
|
||||
github.com/klauspost/compress v1.11.0 // indirect
|
||||
github.com/kr/pretty v0.3.0 // indirect
|
||||
github.com/kr/pty v1.1.8 // indirect
|
||||
github.com/kr/text v0.2.0 // indirect
|
||||
github.com/lann/builder v0.0.0-20180802200727-47ae307949d0 // indirect
|
||||
github.com/lann/ps v0.0.0-20150810152359-62de8c46ede0 // indirect
|
||||
github.com/leodido/go-urn v1.2.1 // indirect
|
||||
github.com/lib/pq v1.10.0 // indirect
|
||||
github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de // indirect
|
||||
github.com/mailru/easyjson v0.7.6 // indirect
|
||||
github.com/mattn/go-colorable v0.1.8 // indirect
|
||||
github.com/mattn/go-isatty v0.0.12 // indirect
|
||||
github.com/mattn/go-runewidth v0.0.9 // indirect
|
||||
github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369 // indirect
|
||||
github.com/mgutz/ansi v0.0.0-20170206155736-9520e82c474b // indirect
|
||||
github.com/mitchellh/copystructure v1.2.0 // indirect
|
||||
github.com/mitchellh/go-homedir v1.1.0 // indirect
|
||||
github.com/mitchellh/go-wordwrap v1.0.0 // indirect
|
||||
github.com/mitchellh/reflectwalk v1.0.2 // indirect
|
||||
github.com/moby/spdystream v0.2.0 // indirect
|
||||
github.com/moby/term v0.0.0-20210610120745-9d4ed1856297 // indirect
|
||||
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
|
||||
github.com/modern-go/reflect2 v1.0.1 // indirect
|
||||
github.com/monochromegane/go-gitignore v0.0.0-20200626010858-205db1a8cc00 // indirect
|
||||
github.com/morikuni/aec v1.0.0 // indirect
|
||||
github.com/mpvl/unique v0.0.0-20150818121801-cbe035fff7de // indirect
|
||||
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect
|
||||
github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f // indirect
|
||||
github.com/nxadm/tail v1.4.8 // indirect
|
||||
github.com/opencontainers/go-digest v1.0.0 // indirect
|
||||
github.com/pelletier/go-toml v1.9.3 // indirect
|
||||
github.com/peterbourgon/diskv v2.0.1+incompatible // indirect
|
||||
github.com/pmezard/go-difflib v1.0.0 // indirect
|
||||
github.com/prometheus/client_model v0.2.0 // indirect
|
||||
github.com/prometheus/common v0.26.0 // indirect
|
||||
github.com/prometheus/procfs v0.6.0 // indirect
|
||||
github.com/rogpeppe/go-internal v1.8.0 // indirect
|
||||
github.com/rubenv/sql-migrate v0.0.0-20200616145509-8d140a17f351 // indirect
|
||||
github.com/russross/blackfriday v1.5.2 // indirect
|
||||
github.com/russross/blackfriday/v2 v2.0.1 // indirect
|
||||
github.com/sergi/go-diff v1.1.0 // indirect
|
||||
github.com/shopspring/decimal v1.2.0 // indirect
|
||||
github.com/shurcooL/sanitized_anchor_name v1.0.0 // indirect
|
||||
github.com/spf13/afero v1.6.0 // indirect
|
||||
github.com/spf13/cast v1.3.1 // indirect
|
||||
github.com/src-d/gcfg v1.4.0 // indirect
|
||||
github.com/tidwall/match v1.1.1 // indirect
|
||||
github.com/tidwall/pretty v1.2.0 // indirect
|
||||
github.com/tjfoc/gmsm v1.3.2 // indirect
|
||||
github.com/xanzy/ssh-agent v0.3.0 // indirect
|
||||
github.com/xdg-go/pbkdf2 v1.0.0 // indirect
|
||||
github.com/xdg-go/scram v1.0.2 // indirect
|
||||
github.com/xdg-go/stringprep v1.0.2 // indirect
|
||||
github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f // indirect
|
||||
github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 // indirect
|
||||
github.com/xeipuuv/gojsonschema v1.2.0 // indirect
|
||||
github.com/xlab/treeprint v0.0.0-20181112141820-a009c3971eca // indirect
|
||||
github.com/youmark/pkcs8 v0.0.0-20181117223130-1be2e3e5546d // indirect
|
||||
github.com/zclconf/go-cty v1.8.0 // indirect
|
||||
go.opencensus.io v0.23.0 // indirect
|
||||
go.starlark.net v0.0.0-20200306205701-8dd3e2ee1dd5 // indirect
|
||||
go.uber.org/atomic v1.7.0 // indirect
|
||||
go.uber.org/multierr v1.6.0 // indirect
|
||||
golang.org/x/crypto v0.0.0-20210711020723-a769d52b0f97 // indirect
|
||||
golang.org/x/mod v0.4.2 // indirect
|
||||
golang.org/x/net v0.0.0-20211029224645-99673261e6eb // indirect
|
||||
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c // indirect
|
||||
golang.org/x/term v0.0.0-20210615171337-6886f2dfbf5b // indirect
|
||||
golang.org/x/text v0.3.6 // indirect
|
||||
golang.org/x/time v0.0.0-20210723032227-1f47c861a9ac // indirect
|
||||
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 // indirect
|
||||
gomodules.xyz/jsonpatch/v2 v2.2.0 // indirect
|
||||
google.golang.org/appengine v1.6.7 // indirect
|
||||
google.golang.org/genproto v0.0.0-20210602131652-f16073e35f0c // indirect
|
||||
google.golang.org/grpc v1.38.0 // indirect
|
||||
google.golang.org/protobuf v1.26.0 // indirect
|
||||
gopkg.in/gorp.v1 v1.7.2 // indirect
|
||||
gopkg.in/inf.v0 v0.9.1 // indirect
|
||||
gopkg.in/ini.v1 v1.62.0 // indirect
|
||||
gopkg.in/src-d/go-billy.v4 v4.3.2 // indirect
|
||||
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 // indirect
|
||||
gopkg.in/warnings.v0 v0.1.2 // indirect
|
||||
gopkg.in/yaml.v2 v2.4.0 // indirect
|
||||
istio.io/gogo-genproto v0.0.0-20190930162913-45029607206a // indirect
|
||||
k8s.io/apiserver v0.22.1 // indirect
|
||||
k8s.io/component-base v0.22.1 // indirect
|
||||
sigs.k8s.io/apiserver-network-proxy v0.0.24 // indirect
|
||||
sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.24 // indirect
|
||||
sigs.k8s.io/apiserver-runtime v1.0.3-0.20210913073608-0663f60bfee2 // indirect
|
||||
sigs.k8s.io/kustomize/api v0.8.5 // indirect
|
||||
sigs.k8s.io/kustomize/kyaml v0.10.15 // indirect
|
||||
sigs.k8s.io/structured-merge-diff/v4 v4.1.2 // indirect
|
||||
)
|
||||
|
||||
replace (
|
||||
github.com/docker/cli => github.com/docker/cli v20.10.9+incompatible
|
||||
github.com/docker/docker => github.com/moby/moby v17.12.0-ce-rc1.0.20200618181300-9dc6525e6118+incompatible
|
||||
github.com/wercker/stern => github.com/oam-dev/stern v1.13.2
|
||||
sigs.k8s.io/apiserver-network-proxy/konnectivity-client => sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.24
|
||||
|
||||
7
go.sum
7
go.sum
@@ -406,11 +406,12 @@ github.com/dgryski/go-bitstream v0.0.0-20180413035011-3522498ce2c8/go.mod h1:VMa
|
||||
github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no=
|
||||
github.com/dgryski/go-sip13 v0.0.0-20190329191031-25c5027a8c7b/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no=
|
||||
github.com/dnaeon/go-vcr v1.0.1/go.mod h1:aBB1+wY4s93YsC3HHjMBMrwTj2R9FHDzUr9KyGc8n1E=
|
||||
github.com/docker/cli v20.10.5+incompatible h1:bjflayQbWg+xOkF2WPEAOi4Y7zWhR7ptoPhV/VqLVDE=
|
||||
github.com/docker/cli v20.10.5+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8=
|
||||
github.com/docker/cli v20.10.9+incompatible h1:OJ7YkwQA+k2Oi51lmCojpjiygKpi76P7bg91b2eJxYU=
|
||||
github.com/docker/cli v20.10.9+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8=
|
||||
github.com/docker/distribution v0.0.0-20191216044856-a8371794149d/go.mod h1:0+TTO4EOBfRPhZXAeF1Vu+W3hHZ8eLp8PgKVZlcvtFY=
|
||||
github.com/docker/distribution v2.7.1+incompatible h1:a5mlkVzth6W5A4fOsS3D2EO5BUmsJpcB+cRlLU7cSug=
|
||||
github.com/docker/distribution v2.7.1+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w=
|
||||
github.com/docker/distribution v2.8.0-beta.1+incompatible h1:9MjVa+OTMHm4C0kKZB68jPlDM9Cg75ta4i46Gxxxn8o=
|
||||
github.com/docker/distribution v2.8.0-beta.1+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w=
|
||||
github.com/docker/docker-credential-helpers v0.6.3 h1:zI2p9+1NQYdnG6sMU26EX4aVGlqbInSQxQXLvzJ4RPQ=
|
||||
github.com/docker/docker-credential-helpers v0.6.3/go.mod h1:WRaJzqw3CTB9bk10avuGsjVBZsD05qeibJ1/TYlvc0Y=
|
||||
github.com/docker/go-connections v0.4.0 h1:El9xVISelRB7BuFusrZozjnkIM5YnzCViNKohAFqRJQ=
|
||||
|
||||
@@ -18,7 +18,7 @@ FAIL=false
|
||||
|
||||
for file in $(git ls-files | grep "\.go$" | grep -v vendor/); do
|
||||
echo -n "Header check: $file... "
|
||||
if [[ -z $(cat ${file} | grep "Copyright [0-9]\{4\}.\? The KubeVela Authors") && -z $(cat ${file} | grep "Copyright [0-9]\{4\} The Crossplane Authors") ]]; then
|
||||
if [[ -z $(cat ${file} | grep "Copyright [0-9]\{4\}\(-[0-9]\{4\}\)\?.\? The KubeVela Authors") && -z $(cat ${file} | grep "Copyright [0-9]\{4\} The Crossplane Authors") ]]; then
|
||||
ERR=true
|
||||
fi
|
||||
if [ $ERR == true ]; then
|
||||
|
||||
@@ -37,7 +37,7 @@ goimports:
|
||||
ifeq (, $(shell which goimports))
|
||||
@{ \
|
||||
set -e ;\
|
||||
GO111MODULE=off go get -u golang.org/x/tools/cmd/goimports ;\
|
||||
go install golang.org/x/tools/cmd/goimports@latest ;\
|
||||
}
|
||||
GOIMPORTS=$(GOBIN)/goimports
|
||||
else
|
||||
@@ -49,7 +49,7 @@ installcue:
|
||||
ifeq (, $(shell which cue))
|
||||
@{ \
|
||||
set -e ;\
|
||||
GO111MODULE=off go get -u cuelang.org/go/cmd/cue ;\
|
||||
go install cuelang.org/go/cmd/cue@latest ;\
|
||||
}
|
||||
CUE=$(GOBIN)/cue
|
||||
else
|
||||
|
||||
@@ -23,7 +23,6 @@ import (
|
||||
"fmt"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"text/template"
|
||||
@@ -32,8 +31,10 @@ import (
|
||||
"cuelang.org/go/cue"
|
||||
cueyaml "cuelang.org/go/encoding/yaml"
|
||||
"github.com/google/go-github/v32/github"
|
||||
"github.com/hashicorp/go-version"
|
||||
"github.com/pkg/errors"
|
||||
"golang.org/x/oauth2"
|
||||
appsv1 "k8s.io/api/apps/v1"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
@@ -41,6 +42,7 @@ import (
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
k8syaml "k8s.io/apimachinery/pkg/runtime/serializer/yaml"
|
||||
types2 "k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/client-go/discovery"
|
||||
"k8s.io/client-go/rest"
|
||||
"k8s.io/client-go/util/retry"
|
||||
"k8s.io/klog/v2"
|
||||
@@ -60,6 +62,7 @@ import (
|
||||
"github.com/oam-dev/kubevela/pkg/utils"
|
||||
"github.com/oam-dev/kubevela/pkg/utils/apply"
|
||||
"github.com/oam-dev/kubevela/pkg/utils/common"
|
||||
version2 "github.com/oam-dev/kubevela/version"
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -80,10 +83,13 @@ const (
|
||||
|
||||
// DefSchemaName is the addon definition schemas dir name
|
||||
DefSchemaName string = "schemas"
|
||||
|
||||
// AddonParameterDataKey is the key of parameter in addon args secrets
|
||||
AddonParameterDataKey string = "addonParameterDataKey"
|
||||
)
|
||||
|
||||
// ParameterFileName is the addon resources/parameter.cue file name
|
||||
var ParameterFileName = filepath.Join("resources", "parameter.cue")
|
||||
var ParameterFileName = strings.Join([]string{"resources", "parameter.cue"}, "/")
|
||||
|
||||
// ListOptions contains flags mark what files should be read in an addon directory
|
||||
type ListOptions struct {
|
||||
@@ -176,7 +182,7 @@ var Patterns = []Pattern{{Value: ReadmeFileName}, {Value: MetadataFileName}, {Va
|
||||
func GetPatternFromItem(it Item, r AsyncReader, rootPath string) string {
|
||||
relativePath := r.RelativePath(it)
|
||||
for _, p := range Patterns {
|
||||
if strings.HasPrefix(relativePath, filepath.Join(rootPath, p.Value)) {
|
||||
if strings.HasPrefix(relativePath, strings.Join([]string{rootPath, p.Value}, "/")) {
|
||||
return p.Value
|
||||
}
|
||||
}
|
||||
@@ -466,6 +472,10 @@ func RenderApp(ctx context.Context, addon *InstallPackage, config *rest.Config,
|
||||
}
|
||||
}
|
||||
app.Labels = util.MergeMapOverrideWithDst(app.Labels, map[string]string{oam.LabelAddonName: addon.Name})
|
||||
|
||||
// force override the namespace defined vela with DefaultVelaNS,this value can be modified by Env
|
||||
app.SetNamespace(types.DefaultKubeVelaNS)
|
||||
|
||||
for _, namespace := range addon.NeedNamespace {
|
||||
// vela-system must exist before rendering vela addon
|
||||
if namespace == types.DefaultKubeVelaNS {
|
||||
@@ -479,13 +489,14 @@ func RenderApp(ctx context.Context, addon *InstallPackage, config *rest.Config,
|
||||
app.Spec.Components = append(app.Spec.Components, comp)
|
||||
}
|
||||
|
||||
for _, tmpl := range addon.YAMLTemplates {
|
||||
comp, err := renderRawComponent(tmpl)
|
||||
if len(addon.YAMLTemplates) != 0 {
|
||||
comp, err := renderK8sObjectsComponent(addon.YAMLTemplates, addon.Name)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
app.Spec.Components = append(app.Spec.Components, *comp)
|
||||
}
|
||||
|
||||
for _, tmpl := range addon.CUETemplates {
|
||||
comp, err := renderCUETemplate(tmpl, addon.Parameters, args)
|
||||
if err != nil {
|
||||
@@ -546,19 +557,15 @@ func RenderApp(ctx context.Context, addon *InstallPackage, config *rest.Config,
|
||||
app.Spec.Workflow.Steps = append(app.Spec.Workflow.Steps, workflowSteps...)
|
||||
|
||||
default:
|
||||
for _, def := range addon.Definitions {
|
||||
comp, err := renderRawComponent(def)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
app.Spec.Components = append(app.Spec.Components, *comp)
|
||||
}
|
||||
for _, cueDef := range addon.CUEDefinitions {
|
||||
def := definition.Definition{Unstructured: unstructured.Unstructured{}}
|
||||
err := def.FromCUEString(cueDef.Data, config)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "fail to render definition: %s in cue's format", cueDef.Name)
|
||||
}
|
||||
if def.Unstructured.GetNamespace() == "" {
|
||||
def.Unstructured.SetNamespace(types.DefaultKubeVelaNS)
|
||||
}
|
||||
app.Spec.Components = append(app.Spec.Components, common2.ApplicationComponent{
|
||||
Name: cueDef.Name,
|
||||
Type: "raw",
|
||||
@@ -589,25 +596,28 @@ func RenderApp(ctx context.Context, addon *InstallPackage, config *rest.Config,
|
||||
func RenderDefinitions(addon *InstallPackage, config *rest.Config) ([]*unstructured.Unstructured, error) {
|
||||
defObjs := make([]*unstructured.Unstructured, 0)
|
||||
|
||||
if isDeployToRuntimeOnly(addon) {
|
||||
// Runtime cluster mode needs to deploy definitions to control plane k8s.
|
||||
for _, def := range addon.Definitions {
|
||||
obj, err := renderObject(def)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defObjs = append(defObjs, obj)
|
||||
}
|
||||
|
||||
for _, cueDef := range addon.CUEDefinitions {
|
||||
def := definition.Definition{Unstructured: unstructured.Unstructured{}}
|
||||
err := def.FromCUEString(cueDef.Data, config)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "fail to render definition: %s in cue's format", cueDef.Name)
|
||||
}
|
||||
defObjs = append(defObjs, &def.Unstructured)
|
||||
// No matter runtime mode or control mode , definition only needs to control plane k8s.
|
||||
for _, def := range addon.Definitions {
|
||||
obj, err := renderObject(def)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// we should ignore the namespace defined in definition yaml, override the filed by DefaultKubeVelaNS
|
||||
obj.SetNamespace(types.DefaultKubeVelaNS)
|
||||
defObjs = append(defObjs, obj)
|
||||
}
|
||||
|
||||
for _, cueDef := range addon.CUEDefinitions {
|
||||
def := definition.Definition{Unstructured: unstructured.Unstructured{}}
|
||||
err := def.FromCUEString(cueDef.Data, config)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "fail to render definition: %s in cue's format", cueDef.Name)
|
||||
}
|
||||
// we should ignore the namespace defined in definition yaml, override the filed by DefaultKubeVelaNS
|
||||
def.SetNamespace(types.DefaultKubeVelaNS)
|
||||
defObjs = append(defObjs, &def.Unstructured)
|
||||
}
|
||||
|
||||
return defObjs, nil
|
||||
}
|
||||
|
||||
@@ -735,17 +745,25 @@ func renderNamespace(namespace string) *unstructured.Unstructured {
|
||||
return u
|
||||
}
|
||||
|
||||
// renderRawComponent will return a component in raw type from string
|
||||
func renderRawComponent(elem ElementFile) (*common2.ApplicationComponent, error) {
|
||||
baseRawComponent := common2.ApplicationComponent{
|
||||
Type: "raw",
|
||||
Name: strings.ReplaceAll(elem.Name, ".", "-"),
|
||||
func renderK8sObjectsComponent(elems []ElementFile, addonName string) (*common2.ApplicationComponent, error) {
|
||||
var objects []*unstructured.Unstructured
|
||||
for _, elem := range elems {
|
||||
obj, err := renderObject(elem)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
objects = append(objects, obj)
|
||||
}
|
||||
obj, err := renderObject(elem)
|
||||
properties := map[string]interface{}{"objects": objects}
|
||||
propJSON, err := json.Marshal(properties)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
baseRawComponent.Properties = util.Object2RawExtension(obj)
|
||||
baseRawComponent := common2.ApplicationComponent{
|
||||
Type: "k8s-objects",
|
||||
Name: addonName + "-resources",
|
||||
Properties: &runtime.RawExtension{Raw: propJSON},
|
||||
}
|
||||
return &baseRawComponent, nil
|
||||
}
|
||||
|
||||
@@ -812,14 +830,9 @@ func Convert2AppName(name string) string {
|
||||
|
||||
// RenderArgsSecret render addon enable argument to secret
|
||||
func RenderArgsSecret(addon *InstallPackage, args map[string]interface{}) *unstructured.Unstructured {
|
||||
data := make(map[string]string)
|
||||
for k, v := range args {
|
||||
switch v := v.(type) {
|
||||
case bool:
|
||||
data[k] = strconv.FormatBool(v)
|
||||
default:
|
||||
data[k] = fmt.Sprintf("%v", v)
|
||||
}
|
||||
argsByte, err := json.Marshal(args)
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
sec := v1.Secret{
|
||||
TypeMeta: metav1.TypeMeta{APIVersion: "v1", Kind: "Secret"},
|
||||
@@ -827,8 +840,10 @@ func RenderArgsSecret(addon *InstallPackage, args map[string]interface{}) *unstr
|
||||
Name: Convert2SecName(addon.Name),
|
||||
Namespace: types.DefaultKubeVelaNS,
|
||||
},
|
||||
StringData: data,
|
||||
Type: v1.SecretTypeOpaque,
|
||||
Data: map[string][]byte{
|
||||
AddonParameterDataKey: argsByte,
|
||||
},
|
||||
Type: v1.SecretTypeOpaque,
|
||||
}
|
||||
u, err := util.Object2Unstructured(sec)
|
||||
if err != nil {
|
||||
@@ -837,6 +852,25 @@ func RenderArgsSecret(addon *InstallPackage, args map[string]interface{}) *unstr
|
||||
return u
|
||||
}
|
||||
|
||||
// FetchArgsFromSecret fetch addon args from secrets
|
||||
func FetchArgsFromSecret(sec *v1.Secret) (map[string]interface{}, error) {
|
||||
res := map[string]interface{}{}
|
||||
if args, ok := sec.Data[AddonParameterDataKey]; ok {
|
||||
err := json.Unmarshal(args, &res)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return res, nil
|
||||
}
|
||||
|
||||
// this is backward compatibility code for old way to storage parameter
|
||||
res = make(map[string]interface{}, len(sec.Data))
|
||||
for k, v := range sec.Data {
|
||||
res[k] = string(v)
|
||||
}
|
||||
return res, nil
|
||||
}
|
||||
|
||||
// Convert2SecName generate addon argument secret name
|
||||
func Convert2SecName(name string) string {
|
||||
return addonSecPrefix + name
|
||||
@@ -853,10 +887,11 @@ type Installer struct {
|
||||
registryMeta map[string]SourceMeta
|
||||
args map[string]interface{}
|
||||
cache *Cache
|
||||
dc *discovery.DiscoveryClient
|
||||
}
|
||||
|
||||
// NewAddonInstaller will create an installer for addon
|
||||
func NewAddonInstaller(ctx context.Context, cli client.Client, apply apply.Applicator, config *rest.Config, r *Registry, args map[string]interface{}, cache *Cache) Installer {
|
||||
func NewAddonInstaller(ctx context.Context, cli client.Client, discoveryClient *discovery.DiscoveryClient, apply apply.Applicator, config *rest.Config, r *Registry, args map[string]interface{}, cache *Cache) Installer {
|
||||
return Installer{
|
||||
ctx: ctx,
|
||||
config: config,
|
||||
@@ -865,12 +900,18 @@ func NewAddonInstaller(ctx context.Context, cli client.Client, apply apply.Appli
|
||||
r: r,
|
||||
args: args,
|
||||
cache: cache,
|
||||
dc: discoveryClient,
|
||||
}
|
||||
}
|
||||
|
||||
func (h *Installer) enableAddon(addon *InstallPackage) error {
|
||||
var err error
|
||||
h.addon = addon
|
||||
err = checkAddonVersionMeetRequired(h.ctx, addon.SystemRequirements, h.cli, h.dc)
|
||||
if err != nil {
|
||||
return ErrVersionMismatch
|
||||
}
|
||||
|
||||
if err = h.installDependency(addon); err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -945,6 +986,26 @@ func (h *Installer) installDependency(addon *InstallPackage) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// checkDependency checks if addon's dependency
|
||||
func (h *Installer) checkDependency(addon *InstallPackage) ([]string, error) {
|
||||
var app v1beta1.Application
|
||||
var needEnable []string
|
||||
for _, dep := range addon.Dependencies {
|
||||
err := h.cli.Get(h.ctx, client.ObjectKey{
|
||||
Namespace: types.DefaultKubeVelaNS,
|
||||
Name: Convert2AppName(dep.Name),
|
||||
}, &app)
|
||||
if err == nil {
|
||||
continue
|
||||
}
|
||||
if !apierrors.IsNotFound(err) {
|
||||
return nil, err
|
||||
}
|
||||
needEnable = append(needEnable, dep.Name)
|
||||
}
|
||||
return needEnable, nil
|
||||
}
|
||||
|
||||
func (h *Installer) dispatchAddonResource(addon *InstallPackage) error {
|
||||
app, err := RenderApp(h.ctx, addon, h.config, h.cli, h.args)
|
||||
if err != nil {
|
||||
@@ -959,7 +1020,7 @@ func (h *Installer) dispatchAddonResource(addon *InstallPackage) error {
|
||||
|
||||
app.SetLabels(util.MergeMapOverrideWithDst(app.GetLabels(), map[string]string{oam.LabelAddonRegistry: h.r.Name}))
|
||||
|
||||
defs, err := RenderDefinitions(h.addon, h.config)
|
||||
defs, err := RenderDefinitions(addon, h.config)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "render addon definitions fail")
|
||||
}
|
||||
@@ -969,7 +1030,7 @@ func (h *Installer) dispatchAddonResource(addon *InstallPackage) error {
|
||||
return errors.Wrap(err, "render addon definitions' schema fail")
|
||||
}
|
||||
|
||||
err = h.apply.Apply(h.ctx, app)
|
||||
err = h.apply.Apply(h.ctx, app, apply.DisableUpdateAnnotation())
|
||||
if err != nil {
|
||||
klog.Errorf("fail to create application: %v", err)
|
||||
return errors.Wrap(err, "fail to create application")
|
||||
@@ -977,7 +1038,7 @@ func (h *Installer) dispatchAddonResource(addon *InstallPackage) error {
|
||||
|
||||
for _, def := range defs {
|
||||
addOwner(def, app)
|
||||
err = h.apply.Apply(h.ctx, def)
|
||||
err = h.apply.Apply(h.ctx, def, apply.DisableUpdateAnnotation())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -985,7 +1046,7 @@ func (h *Installer) dispatchAddonResource(addon *InstallPackage) error {
|
||||
|
||||
for _, schema := range schemas {
|
||||
addOwner(schema, app)
|
||||
err = h.apply.Apply(h.ctx, schema)
|
||||
err = h.apply.Apply(h.ctx, schema, apply.DisableUpdateAnnotation())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -994,7 +1055,7 @@ func (h *Installer) dispatchAddonResource(addon *InstallPackage) error {
|
||||
if h.args != nil && len(h.args) > 0 {
|
||||
sec := RenderArgsSecret(addon, h.args)
|
||||
addOwner(sec, app)
|
||||
err = h.apply.Apply(h.ctx, sec)
|
||||
err = h.apply.Apply(h.ctx, sec, apply.DisableUpdateAnnotation())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -1072,3 +1133,99 @@ func FetchAddonRelatedApp(ctx context.Context, cli client.Client, addonName stri
|
||||
}
|
||||
return app, nil
|
||||
}
|
||||
|
||||
// checkAddonVersionMeetRequired will check the version of cli/ux and kubevela-core-controller whether meet the addon requirement, if not will return an error
|
||||
// please notice that this func is for check production environment which vela cli/ux or vela core is officalVersion
|
||||
// if version is for test or debug eg: latest/commit-id/branch-name this func will return nil error
|
||||
func checkAddonVersionMeetRequired(ctx context.Context, require *SystemRequirements, k8sClient client.Client, dc *discovery.DiscoveryClient) error {
|
||||
if require == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
// if not semver version, bypass check cli/ux. eg: {branch name/git commit id/UNKNOWN}
|
||||
if version2.IsOfficialKubeVelaVersion(version2.VelaVersion) {
|
||||
res, err := checkSemVer(version2.VelaVersion, require.VelaVersion)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if !res {
|
||||
return fmt.Errorf("vela cli/ux version: %s cannot meet requirement", version2.VelaVersion)
|
||||
}
|
||||
}
|
||||
|
||||
// check vela core controller version
|
||||
imageVersion, err := fetchVelaCoreImageTag(ctx, k8sClient)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// if not semver version, bypass check vela-core.
|
||||
if version2.IsOfficialKubeVelaVersion(imageVersion) {
|
||||
res, err := checkSemVer(imageVersion, require.VelaVersion)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if !res {
|
||||
return fmt.Errorf("the vela core controller: %s cannot meet requirement ", imageVersion)
|
||||
}
|
||||
}
|
||||
|
||||
// discovery client is nil so bypass check kubernetes version
|
||||
if dc == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
k8sVersion, err := dc.ServerVersion()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// if not semver version, bypass check kubernetes version.
|
||||
if version2.IsOfficialKubeVelaVersion(k8sVersion.GitVersion) {
|
||||
res, err := checkSemVer(k8sVersion.GitVersion, require.KubernetesVersion)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if !res {
|
||||
return fmt.Errorf("the kubernetes version %s cannot meet requirement", k8sVersion.GitVersion)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func checkSemVer(actual string, require string) (bool, error) {
|
||||
if len(require) == 0 {
|
||||
return true, nil
|
||||
}
|
||||
smeVer := strings.TrimPrefix(actual, "v")
|
||||
l := strings.ReplaceAll(require, "v", " ")
|
||||
constraint, err := version.NewConstraint(l)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
v, err := version.NewVersion(smeVer)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
return constraint.Check(v), nil
|
||||
}
|
||||
|
||||
func fetchVelaCoreImageTag(ctx context.Context, k8sClient client.Client) (string, error) {
|
||||
deploy := &appsv1.Deployment{}
|
||||
if err := k8sClient.Get(ctx, types2.NamespacedName{Namespace: types.DefaultKubeVelaNS, Name: types.KubeVelaControllerDeployment}, deploy); err != nil {
|
||||
return "", err
|
||||
}
|
||||
var tag string
|
||||
for _, c := range deploy.Spec.Template.Spec.Containers {
|
||||
if c.Name == types.DefaultKubeVelaReleaseName {
|
||||
l := strings.Split(c.Image, ":")
|
||||
if len(l) == 1 {
|
||||
// if tag is empty mean use latest image
|
||||
return "latest", nil
|
||||
}
|
||||
tag = l[1]
|
||||
}
|
||||
}
|
||||
return tag, nil
|
||||
}
|
||||
|
||||
@@ -21,6 +21,10 @@ import (
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
appsv1 "k8s.io/api/apps/v1"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
types2 "k8s.io/apimachinery/pkg/types"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
|
||||
@@ -30,6 +34,8 @@ import (
|
||||
|
||||
"github.com/oam-dev/kubevela/apis/core.oam.dev/common"
|
||||
"github.com/oam-dev/kubevela/apis/core.oam.dev/v1beta1"
|
||||
"github.com/oam-dev/kubevela/apis/types"
|
||||
"github.com/oam-dev/kubevela/pkg/oam/util"
|
||||
)
|
||||
|
||||
var _ = Describe("Addon test", func() {
|
||||
@@ -176,6 +182,94 @@ var _ = Describe("Addon test", func() {
|
||||
})
|
||||
})
|
||||
|
||||
var _ = Describe("Addon func test", func() {
|
||||
var deploy appsv1.Deployment
|
||||
|
||||
AfterEach(func() {
|
||||
Expect(k8sClient.Delete(ctx, &deploy))
|
||||
})
|
||||
|
||||
It("fetchVelaCoreImageTag func test", func() {
|
||||
deploy = appsv1.Deployment{}
|
||||
tag, err := fetchVelaCoreImageTag(ctx, k8sClient)
|
||||
Expect(err).Should(util.NotFoundMatcher{})
|
||||
Expect(tag).Should(BeEquivalentTo(""))
|
||||
|
||||
Expect(yaml.Unmarshal([]byte(deployYaml), &deploy)).Should(BeNil())
|
||||
deploy.SetNamespace(types.DefaultKubeVelaNS)
|
||||
Expect(k8sClient.Create(ctx, &deploy)).Should(BeNil())
|
||||
|
||||
Eventually(func() error {
|
||||
tag, err := fetchVelaCoreImageTag(ctx, k8sClient)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if tag != "v1.2.3" {
|
||||
return fmt.Errorf("tag missmatch want %s actual %s", "v1.2.3", tag)
|
||||
}
|
||||
return err
|
||||
}, 30*time.Second, 300*time.Millisecond).Should(BeNil())
|
||||
})
|
||||
|
||||
It("checkAddonVersionMeetRequired func test", func() {
|
||||
deploy = appsv1.Deployment{}
|
||||
Expect(checkAddonVersionMeetRequired(ctx, &SystemRequirements{VelaVersion: ">=v1.2.1"}, k8sClient, dc)).Should(util.NotFoundMatcher{})
|
||||
Expect(yaml.Unmarshal([]byte(deployYaml), &deploy)).Should(BeNil())
|
||||
deploy.SetNamespace(types.DefaultKubeVelaNS)
|
||||
Expect(k8sClient.Create(ctx, &deploy)).Should(BeNil())
|
||||
|
||||
Expect(checkAddonVersionMeetRequired(ctx, &SystemRequirements{VelaVersion: ">=v1.2.1"}, k8sClient, dc)).Should(BeNil())
|
||||
Expect(checkAddonVersionMeetRequired(ctx, &SystemRequirements{VelaVersion: ">=v1.2.4"}, k8sClient, dc)).ShouldNot(BeNil())
|
||||
})
|
||||
})
|
||||
|
||||
var _ = Describe("Test addon util func", func() {
|
||||
|
||||
It("test render and fetch args", func() {
|
||||
i := InstallPackage{Meta: Meta{Name: "test-addon"}}
|
||||
args := map[string]interface{}{
|
||||
"imagePullSecrets": []string{
|
||||
"myreg", "myreg1",
|
||||
},
|
||||
}
|
||||
u := RenderArgsSecret(&i, args)
|
||||
secName := u.GetName()
|
||||
secNs := u.GetNamespace()
|
||||
Expect(k8sClient.Create(ctx, u)).Should(BeNil())
|
||||
|
||||
sec := v1.Secret{}
|
||||
Expect(k8sClient.Get(ctx, types2.NamespacedName{Namespace: secNs, Name: secName}, &sec)).Should(BeNil())
|
||||
res, err := FetchArgsFromSecret(&sec)
|
||||
Expect(err).Should(BeNil())
|
||||
Expect(res).Should(BeEquivalentTo(map[string]interface{}{"imagePullSecrets": []interface{}{"myreg", "myreg1"}}))
|
||||
})
|
||||
|
||||
It("test render and fetch args backward compatibility", func() {
|
||||
secArgs := v1.Secret{
|
||||
TypeMeta: metav1.TypeMeta{APIVersion: "v1", Kind: "Secret"},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: Convert2SecName("test-addon-old-args"),
|
||||
Namespace: types.DefaultKubeVelaNS,
|
||||
},
|
||||
StringData: map[string]string{
|
||||
"repo": "www.test.com",
|
||||
"tag": "v1.3.1",
|
||||
},
|
||||
Type: v1.SecretTypeOpaque,
|
||||
}
|
||||
secName := secArgs.GetName()
|
||||
secNs := secArgs.GetNamespace()
|
||||
Expect(k8sClient.Create(ctx, &secArgs)).Should(BeNil())
|
||||
|
||||
sec := v1.Secret{}
|
||||
Expect(k8sClient.Get(ctx, types2.NamespacedName{Namespace: secNs, Name: secName}, &sec)).Should(BeNil())
|
||||
res, err := FetchArgsFromSecret(&sec)
|
||||
Expect(err).Should(BeNil())
|
||||
Expect(res).Should(BeEquivalentTo(map[string]interface{}{"repo": "www.test.com", "tag": "v1.3.1"}))
|
||||
})
|
||||
|
||||
})
|
||||
|
||||
const (
|
||||
appYaml = `apiVersion: core.oam.dev/v1beta1
|
||||
kind: Application
|
||||
@@ -201,4 +295,56 @@ spec:
|
||||
image: crccheck/hello-world
|
||||
port: 8000
|
||||
`
|
||||
deployYaml = `apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: kubevela-vela-core
|
||||
namespace: vela-system
|
||||
spec:
|
||||
progressDeadlineSeconds: 600
|
||||
replicas: 1
|
||||
revisionHistoryLimit: 10
|
||||
selector:
|
||||
matchLabels:
|
||||
app.kubernetes.io/instance: kubevela
|
||||
app.kubernetes.io/name: vela-core
|
||||
strategy:
|
||||
rollingUpdate:
|
||||
maxSurge: 25%
|
||||
maxUnavailable: 25%
|
||||
type: RollingUpdate
|
||||
template:
|
||||
metadata:
|
||||
annotations:
|
||||
prometheus.io/path: /metrics
|
||||
prometheus.io/port: "8080"
|
||||
prometheus.io/scrape: "true"
|
||||
labels:
|
||||
app.kubernetes.io/instance: kubevela
|
||||
app.kubernetes.io/name: vela-core
|
||||
spec:
|
||||
containers:
|
||||
- args:
|
||||
image: oamdev/vela-core:v1.2.3
|
||||
imagePullPolicy: Always
|
||||
name: kubevela
|
||||
ports:
|
||||
- containerPort: 9443
|
||||
name: webhook-server
|
||||
protocol: TCP
|
||||
- containerPort: 9440
|
||||
name: healthz
|
||||
protocol: TCP
|
||||
resources:
|
||||
limits:
|
||||
cpu: 500m
|
||||
memory: 1Gi
|
||||
requests:
|
||||
cpu: 50m
|
||||
memory: 20Mi
|
||||
dnsPolicy: ClusterFirst
|
||||
restartPolicy: Always
|
||||
schedulerName: default-scheduler
|
||||
securityContext: {}
|
||||
terminationGracePeriodSeconds: 30`
|
||||
)
|
||||
|
||||
@@ -20,6 +20,7 @@ import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"encoding/xml"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"os"
|
||||
@@ -27,6 +28,8 @@ import (
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
version2 "github.com/oam-dev/kubevela/version"
|
||||
|
||||
"github.com/crossplane/crossplane-runtime/pkg/test"
|
||||
"github.com/google/go-github/v32/github"
|
||||
v1alpha12 "github.com/oam-dev/cluster-gateway/pkg/apis/cluster/v1alpha1"
|
||||
@@ -258,6 +261,39 @@ func TestRenderDeploy2RuntimeAddon(t *testing.T) {
|
||||
assert.Equal(t, steps[len(steps)-1].Type, "deploy2runtime")
|
||||
}
|
||||
|
||||
func TestRenderDefinitions(t *testing.T) {
|
||||
addonDeployToRuntime := baseAddon
|
||||
addonDeployToRuntime.Meta.DeployTo = &DeployTo{
|
||||
DisableControlPlane: false,
|
||||
RuntimeCluster: false,
|
||||
}
|
||||
defs, err := RenderDefinitions(&addonDeployToRuntime, nil)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, len(defs), 1)
|
||||
def := defs[0]
|
||||
assert.Equal(t, def.GetAPIVersion(), "core.oam.dev/v1beta1")
|
||||
assert.Equal(t, def.GetKind(), "TraitDefinition")
|
||||
|
||||
app, err := RenderApp(ctx, &addonDeployToRuntime, nil, nil, map[string]interface{}{})
|
||||
assert.NoError(t, err)
|
||||
// addon which app work on no-runtime-cluster mode workflow is nil
|
||||
assert.Nil(t, app.Spec.Workflow)
|
||||
}
|
||||
|
||||
func TestRenderK8sObjects(t *testing.T) {
|
||||
addonMultiYaml := multiYamlAddon
|
||||
addonMultiYaml.Meta.DeployTo = &DeployTo{
|
||||
DisableControlPlane: false,
|
||||
RuntimeCluster: false,
|
||||
}
|
||||
|
||||
app, err := RenderApp(ctx, &addonMultiYaml, nil, nil, map[string]interface{}{})
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, len(app.Spec.Components), 1)
|
||||
comp := app.Spec.Components[0]
|
||||
assert.Equal(t, comp.Type, "k8s-objects")
|
||||
}
|
||||
|
||||
func TestGetAddonStatus(t *testing.T) {
|
||||
getFunc := test.MockGetFn(func(ctx context.Context, key client.ObjectKey, obj client.Object) error {
|
||||
switch key.Name {
|
||||
@@ -402,6 +438,22 @@ var baseAddon = InstallPackage{
|
||||
},
|
||||
}
|
||||
|
||||
var multiYamlAddon = InstallPackage{
|
||||
Meta: Meta{
|
||||
Name: "test-render-multi-yaml-addon",
|
||||
},
|
||||
YAMLTemplates: []ElementFile{
|
||||
{
|
||||
Data: testYamlObject1,
|
||||
Name: "test-object-1",
|
||||
},
|
||||
{
|
||||
Data: testYamlObject2,
|
||||
Name: "test-object-2",
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
var testCueDef = `annotations: {
|
||||
type: "trait"
|
||||
annotations: {}
|
||||
@@ -433,6 +485,53 @@ template: {
|
||||
}
|
||||
`
|
||||
|
||||
var testYamlObject1 = `
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: nginx-deployment
|
||||
labels:
|
||||
app: nginx
|
||||
spec:
|
||||
replicas: 3
|
||||
selector:
|
||||
matchLabels:
|
||||
app: nginx
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: nginx
|
||||
spec:
|
||||
containers:
|
||||
- name: nginx
|
||||
image: nginx:1.14.2
|
||||
ports:
|
||||
- containerPort: 80
|
||||
`
|
||||
var testYamlObject2 = `
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: nginx-deployment-2
|
||||
labels:
|
||||
app: nginx
|
||||
spec:
|
||||
replicas: 3
|
||||
selector:
|
||||
matchLabels:
|
||||
app: nginx
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: nginx
|
||||
spec:
|
||||
containers:
|
||||
- name: nginx
|
||||
image: nginx:1.14.2
|
||||
ports:
|
||||
- containerPort: 80
|
||||
`
|
||||
|
||||
func TestRenderApp4Observability(t *testing.T) {
|
||||
k8sClient := fake.NewClientBuilder().Build()
|
||||
testcases := []struct {
|
||||
@@ -555,3 +654,96 @@ func TestGitLabReaderNotPanic(t *testing.T) {
|
||||
_, err := NewAsyncReader("https://gitlab.com/test/catalog", "", "addons", "", gitType)
|
||||
assert.EqualError(t, err, "git type repository only support github for now")
|
||||
}
|
||||
|
||||
func TestCheckSemVer(t *testing.T) {
|
||||
testCases := []struct {
|
||||
actual string
|
||||
require string
|
||||
nilError bool
|
||||
res bool
|
||||
}{
|
||||
{
|
||||
actual: "v1.2.1",
|
||||
require: "<=v1.2.1",
|
||||
res: true,
|
||||
},
|
||||
{
|
||||
actual: "v1.2.1",
|
||||
require: ">v1.2.1",
|
||||
res: false,
|
||||
},
|
||||
{
|
||||
actual: "v1.2.1",
|
||||
require: "<=v1.2.3",
|
||||
res: true,
|
||||
},
|
||||
{
|
||||
actual: "v1.2",
|
||||
require: "<=v1.2.3",
|
||||
res: true,
|
||||
},
|
||||
{
|
||||
actual: "v1.2.1",
|
||||
require: ">v1.2.3",
|
||||
res: false,
|
||||
},
|
||||
{
|
||||
actual: "v1.2.1",
|
||||
require: "=v1.2.1",
|
||||
res: true,
|
||||
},
|
||||
{
|
||||
actual: "1.2.1",
|
||||
require: "=v1.2.1",
|
||||
res: true,
|
||||
},
|
||||
{
|
||||
actual: "1.2.1",
|
||||
require: "",
|
||||
res: true,
|
||||
},
|
||||
{
|
||||
actual: "v1.2.2",
|
||||
require: "<=v1.2.3, >=v1.2.1",
|
||||
res: true,
|
||||
},
|
||||
{
|
||||
actual: "v1.2.0",
|
||||
require: "v1.2.0, <=v1.2.3",
|
||||
res: true,
|
||||
},
|
||||
{
|
||||
actual: "1.2.2",
|
||||
require: "v1.2.2",
|
||||
res: true,
|
||||
},
|
||||
{
|
||||
actual: "1.2.02",
|
||||
require: "v1.2.2",
|
||||
res: true,
|
||||
},
|
||||
}
|
||||
for _, testCase := range testCases {
|
||||
result, err := checkSemVer(testCase.actual, testCase.require)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, result, testCase.res)
|
||||
}
|
||||
}
|
||||
|
||||
func TestCheckAddonVersionMeetRequired(t *testing.T) {
|
||||
k8sClient := &test.MockClient{
|
||||
MockGet: test.NewMockGetFn(nil, func(obj client.Object) error {
|
||||
return nil
|
||||
}),
|
||||
}
|
||||
ctx := context.Background()
|
||||
assert.NoError(t, checkAddonVersionMeetRequired(ctx, &SystemRequirements{VelaVersion: ">=1.2.4"}, k8sClient, nil))
|
||||
|
||||
version2.VelaVersion = "v1.2.3"
|
||||
if err := checkAddonVersionMeetRequired(ctx, &SystemRequirements{VelaVersion: ">=1.2.4"}, k8sClient, nil); err == nil {
|
||||
assert.Error(t, fmt.Errorf("should meet error"))
|
||||
}
|
||||
|
||||
version2.VelaVersion = "v1.2.4"
|
||||
assert.NoError(t, checkAddonVersionMeetRequired(ctx, &SystemRequirements{VelaVersion: ">=1.2.4"}, k8sClient, nil))
|
||||
}
|
||||
|
||||
@@ -35,6 +35,9 @@ var (
|
||||
|
||||
// ErrNotExist means addon not exists
|
||||
ErrNotExist = NewAddonError("addon not exist")
|
||||
|
||||
// ErrVersionMismatch means addon version requirement mismatch
|
||||
ErrVersionMismatch = NewAddonError("addon version requirements mismatch")
|
||||
)
|
||||
|
||||
// WrapErrRateLimit return ErrRateLimit if is the situation, or return error directly
|
||||
|
||||
@@ -21,6 +21,8 @@ import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
|
||||
"k8s.io/client-go/discovery"
|
||||
|
||||
"k8s.io/klog/v2"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
@@ -49,8 +51,8 @@ const (
|
||||
)
|
||||
|
||||
// EnableAddon will enable addon with dependency check, source is where addon from.
|
||||
func EnableAddon(ctx context.Context, name string, cli client.Client, apply apply.Applicator, config *rest.Config, r Registry, args map[string]interface{}, cache *Cache) error {
|
||||
h := NewAddonInstaller(ctx, cli, apply, config, &r, args, cache)
|
||||
func EnableAddon(ctx context.Context, name string, cli client.Client, discoveryClient *discovery.DiscoveryClient, apply apply.Applicator, config *rest.Config, r Registry, args map[string]interface{}, cache *Cache) error {
|
||||
h := NewAddonInstaller(ctx, cli, discoveryClient, apply, config, &r, args, cache)
|
||||
pkg, err := h.loadInstallPackage(name)
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -76,7 +78,7 @@ func DisableAddon(ctx context.Context, cli client.Client, name string) error {
|
||||
}
|
||||
|
||||
// EnableAddonByLocalDir enable an addon from local dir
|
||||
func EnableAddonByLocalDir(ctx context.Context, name string, dir string, cli client.Client, applicator apply.Applicator, config *rest.Config, args map[string]interface{}) error {
|
||||
func EnableAddonByLocalDir(ctx context.Context, name string, dir string, cli client.Client, dc *discovery.DiscoveryClient, applicator apply.Applicator, config *rest.Config, args map[string]interface{}) error {
|
||||
r := localReader{dir: dir, name: name}
|
||||
metas, err := r.ListAddonMeta()
|
||||
if err != nil {
|
||||
@@ -91,7 +93,15 @@ func EnableAddonByLocalDir(ctx context.Context, name string, dir string, cli cli
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
h := NewAddonInstaller(ctx, cli, applicator, config, &Registry{Name: LocalAddonRegistryName}, args, nil)
|
||||
h := NewAddonInstaller(ctx, cli, dc, applicator, config, &Registry{Name: LocalAddonRegistryName}, args, nil)
|
||||
needEnableAddonNames, err := h.checkDependency(pkg)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if len(needEnableAddonNames) > 0 {
|
||||
return fmt.Errorf("you must first enable dependencies: %v", needEnableAddonNames)
|
||||
}
|
||||
|
||||
err = h.enableAddon(pkg)
|
||||
if err != nil {
|
||||
return err
|
||||
|
||||
@@ -169,7 +169,6 @@ func (r *Registry) BuildReader() (AsyncReader, error) {
|
||||
return NewAsyncReader(g.URL, "", g.Path, g.Token, gitType)
|
||||
}
|
||||
return nil, errors.New("registry don't have enough info to build a reader")
|
||||
|
||||
}
|
||||
|
||||
// GetUIData get UIData of an addon
|
||||
|
||||
@@ -21,6 +21,8 @@ import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"k8s.io/client-go/discovery"
|
||||
|
||||
v12 "k8s.io/api/core/v1"
|
||||
crdv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
@@ -49,6 +51,7 @@ var testEnv *envtest.Environment
|
||||
var dm discoverymapper.DiscoveryMapper
|
||||
var pd *packages.PackageDiscover
|
||||
var testns string
|
||||
var dc *discovery.DiscoveryClient
|
||||
|
||||
func TestAddon(t *testing.T) {
|
||||
RegisterFailHandler(Fail)
|
||||
@@ -79,6 +82,11 @@ var _ = BeforeSuite(func(done Done) {
|
||||
k8sClient, err = client.New(cfg, client.Options{Scheme: scheme})
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(k8sClient).ToNot(BeNil())
|
||||
|
||||
dc, err = discovery.NewDiscoveryClientForConfig(cfg)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(dc).ShouldNot(BeNil())
|
||||
|
||||
dm, err = discoverymapper.New(cfg)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(dm).ToNot(BeNil())
|
||||
|
||||
@@ -54,16 +54,17 @@ type InstallPackage struct {
|
||||
|
||||
// Meta defines the format for a single addon
|
||||
type Meta struct {
|
||||
Name string `json:"name" validate:"required"`
|
||||
Version string `json:"version"`
|
||||
Description string `json:"description"`
|
||||
Icon string `json:"icon"`
|
||||
URL string `json:"url,omitempty"`
|
||||
Tags []string `json:"tags,omitempty"`
|
||||
DeployTo *DeployTo `json:"deployTo,omitempty"`
|
||||
Dependencies []*Dependency `json:"dependencies,omitempty"`
|
||||
NeedNamespace []string `json:"needNamespace,omitempty"`
|
||||
Invisible bool `json:"invisible"`
|
||||
Name string `json:"name" validate:"required"`
|
||||
Version string `json:"version"`
|
||||
Description string `json:"description"`
|
||||
Icon string `json:"icon"`
|
||||
URL string `json:"url,omitempty"`
|
||||
Tags []string `json:"tags,omitempty"`
|
||||
DeployTo *DeployTo `json:"deployTo,omitempty"`
|
||||
Dependencies []*Dependency `json:"dependencies,omitempty"`
|
||||
NeedNamespace []string `json:"needNamespace,omitempty"`
|
||||
Invisible bool `json:"invisible"`
|
||||
SystemRequirements *SystemRequirements `json:"system,omitempty"`
|
||||
}
|
||||
|
||||
// DeployTo defines where the addon to deploy to
|
||||
@@ -84,3 +85,9 @@ type ElementFile struct {
|
||||
Data string
|
||||
Name string
|
||||
}
|
||||
|
||||
// SystemRequirements is this addon need version
|
||||
type SystemRequirements struct {
|
||||
VelaVersion string `json:"vela,omitempty"`
|
||||
KubernetesVersion string `json:"kubernetes,omitempty"`
|
||||
}
|
||||
|
||||
@@ -17,6 +17,7 @@ limitations under the License.
|
||||
package clients
|
||||
|
||||
import (
|
||||
"k8s.io/client-go/discovery"
|
||||
"k8s.io/client-go/rest"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client/config"
|
||||
@@ -84,3 +85,16 @@ func GetPackageDiscover() (*packages.PackageDiscover, error) {
|
||||
}
|
||||
return pd, nil
|
||||
}
|
||||
|
||||
// GetDiscoveryClient return a discovery client
|
||||
func GetDiscoveryClient() (*discovery.DiscoveryClient, error) {
|
||||
conf, err := GetKubeConfig()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
dc, err := discovery.NewDiscoveryClientForConfig(conf)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return dc, nil
|
||||
}
|
||||
|
||||
@@ -127,8 +127,8 @@ type AddonBaseStatus struct {
|
||||
type DetailAddonResponse struct {
|
||||
addon.Meta
|
||||
|
||||
APISchema *openapi3.Schema `json:"schema"`
|
||||
UISchema []*utils.UIParameter `json:"uiSchema"`
|
||||
APISchema *openapi3.Schema `json:"schema"`
|
||||
UISchema utils.UISchema `json:"uiSchema"`
|
||||
|
||||
// More details about the addon, e.g. README
|
||||
Detail string `json:"detail,omitempty"`
|
||||
@@ -147,9 +147,9 @@ type AddonDefinition struct {
|
||||
// AddonStatusResponse defines the format of addon status response
|
||||
type AddonStatusResponse struct {
|
||||
AddonBaseStatus
|
||||
Args map[string]string `json:"args"`
|
||||
EnablingProgress *EnablingProgress `json:"enabling_progress,omitempty"`
|
||||
AppStatus common.AppStatus `json:"appStatus,omitempty"`
|
||||
Args map[string]interface{} `json:"args"`
|
||||
EnablingProgress *EnablingProgress `json:"enabling_progress,omitempty"`
|
||||
AppStatus common.AppStatus `json:"appStatus,omitempty"`
|
||||
// the status of multiple clusters
|
||||
Clusters map[string]map[string]interface{} `json:"clusters,omitempty"`
|
||||
}
|
||||
@@ -315,7 +315,7 @@ type ApplicationStatusResponse struct {
|
||||
type ApplicationStatisticsResponse struct {
|
||||
EnvCount int64 `json:"envCount"`
|
||||
TargetCount int64 `json:"targetCount"`
|
||||
RevisonCount int64 `json:"revisonCount"`
|
||||
RevisionCount int64 `json:"revisionCount"`
|
||||
WorkflowCount int64 `json:"workflowCount"`
|
||||
}
|
||||
|
||||
|
||||
@@ -26,6 +26,8 @@ import (
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"k8s.io/client-go/discovery"
|
||||
|
||||
k8stypes "k8s.io/apimachinery/pkg/types"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
@@ -101,6 +103,10 @@ func NewAddonUsecase(cacheTime time.Duration) AddonHandler {
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
dc, err := clients.GetDiscoveryClient()
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
ds := pkgaddon.NewRegistryDataStore(kubecli)
|
||||
cache := pkgaddon.NewCache(ds)
|
||||
|
||||
@@ -114,6 +120,7 @@ func NewAddonUsecase(cacheTime time.Duration) AddonHandler {
|
||||
config: config,
|
||||
apply: apply.NewAPIApplicator(kubecli),
|
||||
mutex: new(sync.RWMutex),
|
||||
discoveryClient: dc,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -123,6 +130,7 @@ type defaultAddonHandler struct {
|
||||
kubeClient client.Client
|
||||
config *rest.Config
|
||||
apply apply.Applicator
|
||||
discoveryClient *discovery.DiscoveryClient
|
||||
|
||||
mutex *sync.RWMutex
|
||||
}
|
||||
@@ -204,10 +212,12 @@ func (u *defaultAddonHandler) StatusAddon(ctx context.Context, name string) (*ap
|
||||
if err != nil && !errors2.IsNotFound(err) {
|
||||
return nil, bcode.ErrAddonSecretGet
|
||||
} else if errors2.IsNotFound(err) {
|
||||
res.Args = make(map[string]string, len(sec.Data))
|
||||
for k, v := range sec.Data {
|
||||
res.Args[k] = string(v)
|
||||
}
|
||||
return &res, nil
|
||||
}
|
||||
|
||||
res.Args, err = pkgaddon.FetchArgsFromSecret(&sec)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &res, nil
|
||||
@@ -351,15 +361,23 @@ func (u *defaultAddonHandler) EnableAddon(ctx context.Context, name string, args
|
||||
return err
|
||||
}
|
||||
for _, r := range registries {
|
||||
err = pkgaddon.EnableAddon(ctx, name, u.kubeClient, u.apply, u.config, r, args.Args, u.addonRegistryCache)
|
||||
err = pkgaddon.EnableAddon(ctx, name, u.kubeClient, u.discoveryClient, u.apply, u.config, r, args.Args, u.addonRegistryCache)
|
||||
if err == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
if err != nil && errors.As(err, &pkgaddon.ErrNotExist) {
|
||||
// if reach this line error must is not nil
|
||||
if errors.Is(err, pkgaddon.ErrNotExist) {
|
||||
// one registry return addon not exist error, should not break other registry func
|
||||
continue
|
||||
}
|
||||
|
||||
// wrap this error with special bcode
|
||||
if errors.Is(err, pkgaddon.ErrVersionMismatch) {
|
||||
return bcode.ErrAddonSystemVersionMismatch
|
||||
}
|
||||
// except `addon not found`, other errors should return directly
|
||||
return err
|
||||
}
|
||||
return bcode.ErrAddonNotExist
|
||||
}
|
||||
@@ -411,13 +429,21 @@ func (u *defaultAddonHandler) UpdateAddon(ctx context.Context, name string, args
|
||||
}
|
||||
|
||||
for _, r := range registries {
|
||||
err = pkgaddon.EnableAddon(ctx, name, u.kubeClient, u.apply, u.config, r, args.Args, u.addonRegistryCache)
|
||||
err = pkgaddon.EnableAddon(ctx, name, u.kubeClient, u.discoveryClient, u.apply, u.config, r, args.Args, u.addonRegistryCache)
|
||||
if err == nil {
|
||||
return nil
|
||||
}
|
||||
if err != nil && !errors.Is(err, pkgaddon.ErrNotExist) {
|
||||
return bcode.WrapGithubRateLimitErr(err)
|
||||
|
||||
if errors.Is(err, pkgaddon.ErrNotExist) {
|
||||
continue
|
||||
}
|
||||
|
||||
// wrap this error with special bcode
|
||||
if errors.Is(err, pkgaddon.ErrVersionMismatch) {
|
||||
return bcode.ErrAddonSystemVersionMismatch
|
||||
}
|
||||
// except `addon not found`, other errors should return directly
|
||||
return err
|
||||
}
|
||||
return bcode.ErrAddonNotExist
|
||||
}
|
||||
|
||||
@@ -1343,7 +1343,7 @@ func (c *applicationUsecaseImpl) Statistics(ctx context.Context, app *model.Appl
|
||||
return &apisv1.ApplicationStatisticsResponse{
|
||||
EnvCount: int64(len(envbinding)),
|
||||
TargetCount: int64(len(targetMap)),
|
||||
RevisonCount: count,
|
||||
RevisionCount: count,
|
||||
WorkflowCount: c.workflowUsecase.CountWorkflow(ctx, app),
|
||||
}, nil
|
||||
}
|
||||
|
||||
@@ -33,6 +33,7 @@ import (
|
||||
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
|
||||
velatypes "github.com/oam-dev/kubevela/apis/types"
|
||||
"github.com/oam-dev/kubevela/pkg/apiserver/clients"
|
||||
"github.com/oam-dev/kubevela/pkg/apiserver/datastore"
|
||||
"github.com/oam-dev/kubevela/pkg/apiserver/log"
|
||||
@@ -198,14 +199,14 @@ func joinClusterByKubeConfigString(ctx context.Context, k8sClient client.Client,
|
||||
defer func() {
|
||||
_ = os.Remove(tmpFileName)
|
||||
}()
|
||||
cluster, err := multicluster.JoinClusterByKubeConfig(ctx, k8sClient, tmpFileName, clusterName)
|
||||
clusterConfig, err := multicluster.JoinClusterByKubeConfig(ctx, k8sClient, tmpFileName, clusterName, multicluster.JoinClusterCreateNamespaceOption(velatypes.DefaultKubeVelaNS))
|
||||
if err != nil {
|
||||
if errors.Is(err, multicluster.ErrClusterExists) {
|
||||
return "", bcode.ErrClusterExistsInKubernetes
|
||||
}
|
||||
return "", errors.Wrapf(err, "failed to join cluster")
|
||||
}
|
||||
return cluster.Server, nil
|
||||
return clusterConfig.Cluster.Server, nil
|
||||
}
|
||||
|
||||
func createClusterModelFromRequest(req apis.CreateClusterRequest, oldCluster *model.Cluster) (newCluster *model.Cluster) {
|
||||
|
||||
@@ -296,6 +296,9 @@ func patchSchema(defaultSchema, customSchema []*utils.UIParameter) []*utils.UIPa
|
||||
if cusSchema.Additional != nil {
|
||||
dSchema.Additional = cusSchema.Additional
|
||||
}
|
||||
if cusSchema.Style != nil {
|
||||
dSchema.Style = cusSchema.Style
|
||||
}
|
||||
}
|
||||
}
|
||||
sort.Slice(defaultSchema, func(i, j int) bool {
|
||||
|
||||
@@ -61,6 +61,9 @@ var (
|
||||
|
||||
// ErrAddonDependencyNotSatisfy means addon's dependencies is not enabled
|
||||
ErrAddonDependencyNotSatisfy = NewBcode(500, 50017, "addon's dependencies is not enabled")
|
||||
|
||||
// ErrAddonSystemVersionMismatch means addon's version required mismatch
|
||||
ErrAddonSystemVersionMismatch = NewBcode(400, 50018, "addon's system version requirement mismatch")
|
||||
)
|
||||
|
||||
// isGithubRateLimit check if error is github rate limit
|
||||
|
||||
@@ -21,6 +21,9 @@ import (
|
||||
"strings"
|
||||
)
|
||||
|
||||
// UISchema ui schema
|
||||
type UISchema []*UIParameter
|
||||
|
||||
// UIParameter Structured import table simple UI model
|
||||
type UIParameter struct {
|
||||
Sort uint `json:"sort"`
|
||||
@@ -29,6 +32,7 @@ type UIParameter struct {
|
||||
Validate *Validate `json:"validate,omitempty"`
|
||||
JSONKey string `json:"jsonKey"`
|
||||
UIType string `json:"uiType"`
|
||||
Style *Style `json:"style,omitempty"`
|
||||
// means disable parameter in ui
|
||||
Disable *bool `json:"disable,omitempty"`
|
||||
SubParameterGroupOption []GroupOption `json:"subParameterGroupOption,omitempty"`
|
||||
@@ -37,6 +41,12 @@ type UIParameter struct {
|
||||
Additional *bool `json:"additional,omitempty"`
|
||||
}
|
||||
|
||||
// Style ui style
|
||||
type Style struct {
|
||||
// ColSpan the width of a responsive layout
|
||||
ColSpan int `json:"colSpan"`
|
||||
}
|
||||
|
||||
// GroupOption define multiple data structure composition options.
|
||||
type GroupOption struct {
|
||||
Label string `json:"label"`
|
||||
@@ -53,6 +63,8 @@ type Validate struct {
|
||||
Pattern string `json:"pattern,omitempty"`
|
||||
Options []Option `json:"options,omitempty"`
|
||||
DefaultValue interface{} `json:"defaultValue,omitempty"`
|
||||
// the parameter cannot be changed twice.
|
||||
Immutable bool `json:"immutable"`
|
||||
}
|
||||
|
||||
// Option select option
|
||||
@@ -61,13 +73,6 @@ type Option struct {
|
||||
Value interface{} `json:"value"`
|
||||
}
|
||||
|
||||
// ParseUIParameterFromDefinition cue of parameter in Definitions was analyzed to obtain the form description model.
|
||||
func ParseUIParameterFromDefinition(definition []byte) ([]*UIParameter, error) {
|
||||
var params []*UIParameter
|
||||
|
||||
return params, nil
|
||||
}
|
||||
|
||||
// FirstUpper Sets the first letter of the string to upper.
|
||||
func FirstUpper(s string) string {
|
||||
if s == "" {
|
||||
@@ -113,16 +113,6 @@ func (c *applicationWebService) GetWebService() *restful.WebService {
|
||||
Returns(400, "", bcode.Bcode{}).
|
||||
Writes(apis.ApplicationStatisticsResponse{}))
|
||||
|
||||
ws.Route(ws.PUT("/{name}").To(c.updateApplication).
|
||||
Doc("update one application ").
|
||||
Metadata(restfulspec.KeyOpenAPITags, tags).
|
||||
Filter(c.appCheckFilter).
|
||||
Param(ws.PathParameter("name", "identifier of the application ").DataType("string")).
|
||||
Reads(apis.UpdateApplicationRequest{}).
|
||||
Returns(200, "", apis.ApplicationBase{}).
|
||||
Returns(400, "", bcode.Bcode{}).
|
||||
Writes(apis.ApplicationBase{}))
|
||||
|
||||
ws.Route(ws.POST("/{name}/triggers").To(c.createApplicationTrigger).
|
||||
Doc("create one application trigger").
|
||||
Metadata(restfulspec.KeyOpenAPITags, tags).
|
||||
|
||||
@@ -224,7 +224,8 @@ func (af *Appfile) PrepareWorkflowAndPolicy(ctx context.Context) ([]*unstructure
|
||||
}
|
||||
|
||||
func (af *Appfile) generateUnstructured(workload *Workload) (*unstructured.Unstructured, error) {
|
||||
un, err := generateUnstructuredFromCUEModule(workload, af.Name, af.AppRevisionName, af.Namespace, af.Components, af.Artifacts)
|
||||
ctxData := GenerateContextDataFromAppFile(af, workload.Name)
|
||||
un, err := generateUnstructuredFromCUEModule(workload, af.Artifacts, ctxData)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -235,13 +236,13 @@ func (af *Appfile) generateUnstructured(workload *Workload) (*unstructured.Unstr
|
||||
return un, nil
|
||||
}
|
||||
|
||||
func generateUnstructuredFromCUEModule(wl *Workload, appName, revision, ns string, components []common.ApplicationComponent, artifacts []*types.ComponentManifest) (*unstructured.Unstructured, error) {
|
||||
pCtx := process.NewPolicyContext(ns, wl.Name, appName, revision, components)
|
||||
func generateUnstructuredFromCUEModule(wl *Workload, artifacts []*types.ComponentManifest, ctxData process.ContextData) (*unstructured.Unstructured, error) {
|
||||
pCtx := process.NewContext(ctxData)
|
||||
pCtx.PushData(model.ContextDataArtifacts, prepareArtifactsData(artifacts))
|
||||
if err := wl.EvalContext(pCtx); err != nil {
|
||||
return nil, errors.Wrapf(err, "evaluate base template app=%s in namespace=%s", appName, ns)
|
||||
return nil, errors.Wrapf(err, "evaluate base template app=%s in namespace=%s", ctxData.AppName, ctxData.Namespace)
|
||||
}
|
||||
return makeWorkloadWithContext(pCtx, wl, ns, appName)
|
||||
return makeWorkloadWithContext(pCtx, wl, ctxData.Namespace, ctxData.AppName)
|
||||
}
|
||||
|
||||
// artifacts contains resources in unstructured shape of all components
|
||||
@@ -292,17 +293,18 @@ func (af *Appfile) GenerateComponentManifest(wl *Workload) (*types.ComponentMani
|
||||
if af.Namespace == "" {
|
||||
af.Namespace = corev1.NamespaceDefault
|
||||
}
|
||||
ctxData := GenerateContextDataFromAppFile(af, wl.Name)
|
||||
// generate context here to avoid nil pointer panic
|
||||
wl.Ctx = NewBasicContext(af.Name, wl.Name, af.AppRevisionName, af.Namespace, wl.Params)
|
||||
wl.Ctx = NewBasicContext(GenerateContextDataFromAppFile(af, wl.Name), wl.Params)
|
||||
switch wl.CapabilityCategory {
|
||||
case types.HelmCategory:
|
||||
return generateComponentFromHelmModule(wl, af.Name, af.AppRevisionName, af.Namespace)
|
||||
return generateComponentFromHelmModule(wl, ctxData)
|
||||
case types.KubeCategory:
|
||||
return generateComponentFromKubeModule(wl, af.Name, af.AppRevisionName, af.Namespace)
|
||||
return generateComponentFromKubeModule(wl, ctxData)
|
||||
case types.TerraformCategory:
|
||||
return generateComponentFromTerraformModule(wl, af.Name, af.Namespace)
|
||||
default:
|
||||
return generateComponentFromCUEModule(wl, af.Name, af.AppRevisionName, af.Namespace)
|
||||
return generateComponentFromCUEModule(wl, ctxData)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -471,31 +473,31 @@ func (af *Appfile) setWorkloadRefToTrait(wlRef corev1.ObjectReference, trait *un
|
||||
}
|
||||
|
||||
// PrepareProcessContext prepares a DSL process Context
|
||||
func PrepareProcessContext(wl *Workload, applicationName, revision, namespace string) (process.Context, error) {
|
||||
func PrepareProcessContext(wl *Workload, ctxData process.ContextData) (process.Context, error) {
|
||||
if wl.Ctx == nil {
|
||||
wl.Ctx = NewBasicContext(applicationName, wl.Name, revision, namespace, wl.Params)
|
||||
wl.Ctx = NewBasicContext(ctxData, wl.Params)
|
||||
}
|
||||
if err := wl.EvalContext(wl.Ctx); err != nil {
|
||||
return nil, errors.Wrapf(err, "evaluate base template app=%s in namespace=%s", applicationName, namespace)
|
||||
return nil, errors.Wrapf(err, "evaluate base template app=%s in namespace=%s", ctxData.AppName, ctxData.Namespace)
|
||||
}
|
||||
return wl.Ctx, nil
|
||||
}
|
||||
|
||||
// NewBasicContext prepares a basic DSL process Context
|
||||
func NewBasicContext(applicationName, workloadName, revision, namespace string, params map[string]interface{}) process.Context {
|
||||
pCtx := process.NewContext(namespace, workloadName, applicationName, revision)
|
||||
func NewBasicContext(contextData process.ContextData, params map[string]interface{}) process.Context {
|
||||
pCtx := process.NewContext(contextData)
|
||||
if params != nil {
|
||||
pCtx.SetParameters(params)
|
||||
}
|
||||
return pCtx
|
||||
}
|
||||
|
||||
func generateComponentFromCUEModule(wl *Workload, appName, revision, ns string) (*types.ComponentManifest, error) {
|
||||
pCtx, err := PrepareProcessContext(wl, appName, revision, ns)
|
||||
func generateComponentFromCUEModule(wl *Workload, ctxData process.ContextData) (*types.ComponentManifest, error) {
|
||||
pCtx, err := PrepareProcessContext(wl, ctxData)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return baseGenerateComponent(pCtx, wl, appName, ns)
|
||||
return baseGenerateComponent(pCtx, wl, ctxData.AppName, ctxData.Namespace)
|
||||
}
|
||||
|
||||
func generateComponentFromTerraformModule(wl *Workload, appName, ns string) (*types.ComponentManifest, error) {
|
||||
@@ -504,6 +506,7 @@ func generateComponentFromTerraformModule(wl *Workload, appName, ns string) (*ty
|
||||
|
||||
func baseGenerateComponent(pCtx process.Context, wl *Workload, appName, ns string) (*types.ComponentManifest, error) {
|
||||
var err error
|
||||
pCtx.PushData(model.ContextComponentType, wl.Type)
|
||||
for _, tr := range wl.Traits {
|
||||
if err := tr.EvalContext(pCtx); err != nil {
|
||||
return nil, errors.Wrapf(err, "evaluate template trait=%s app=%s", tr.Name, wl.Name)
|
||||
@@ -663,7 +666,7 @@ output: {
|
||||
return templateStr, nil
|
||||
}
|
||||
|
||||
func generateComponentFromKubeModule(wl *Workload, appName, revision, ns string) (*types.ComponentManifest, error) {
|
||||
func generateComponentFromKubeModule(wl *Workload, ctxData process.ContextData) (*types.ComponentManifest, error) {
|
||||
templateStr, err := GenerateCUETemplate(wl)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -671,7 +674,7 @@ func generateComponentFromKubeModule(wl *Workload, appName, revision, ns string)
|
||||
wl.FullTemplate.TemplateStr = templateStr
|
||||
|
||||
// re-use the way CUE module generates comp & acComp
|
||||
compManifest, err := generateComponentFromCUEModule(wl, appName, revision, ns)
|
||||
compManifest, err := generateComponentFromCUEModule(wl, ctxData)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -838,7 +841,7 @@ func setParameterValuesToKubeObj(obj *unstructured.Unstructured, values paramVal
|
||||
return nil
|
||||
}
|
||||
|
||||
func generateComponentFromHelmModule(wl *Workload, appName, revision, ns string) (*types.ComponentManifest, error) {
|
||||
func generateComponentFromHelmModule(wl *Workload, ctxData process.ContextData) (*types.ComponentManifest, error) {
|
||||
templateStr, err := GenerateCUETemplate(wl)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -848,22 +851,38 @@ func generateComponentFromHelmModule(wl *Workload, appName, revision, ns string)
|
||||
// re-use the way CUE module generates comp & acComp
|
||||
compManifest := &types.ComponentManifest{
|
||||
Name: wl.Name,
|
||||
Namespace: ns,
|
||||
Namespace: ctxData.Namespace,
|
||||
ExternalRevision: wl.ExternalRevision,
|
||||
StandardWorkload: &unstructured.Unstructured{},
|
||||
}
|
||||
|
||||
if wl.FullTemplate.Reference.Type != types.AutoDetectWorkloadDefinition {
|
||||
compManifest, err = generateComponentFromCUEModule(wl, appName, revision, ns)
|
||||
compManifest, err = generateComponentFromCUEModule(wl, ctxData)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
rls, repo, err := helm.RenderHelmReleaseAndHelmRepo(wl.FullTemplate.Helm, wl.Name, appName, ns, wl.Params)
|
||||
rls, repo, err := helm.RenderHelmReleaseAndHelmRepo(wl.FullTemplate.Helm, wl.Name, ctxData.AppName, ctxData.Namespace, wl.Params)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
compManifest.PackagedWorkloadResources = []*unstructured.Unstructured{rls, repo}
|
||||
return compManifest, nil
|
||||
}
|
||||
|
||||
// GenerateContextDataFromAppFile generates process context data from app file
|
||||
func GenerateContextDataFromAppFile(appfile *Appfile, wlName string) process.ContextData {
|
||||
data := process.ContextData{
|
||||
Namespace: appfile.Namespace,
|
||||
AppName: appfile.Name,
|
||||
CompName: wlName,
|
||||
AppRevisionName: appfile.AppRevisionName,
|
||||
Components: appfile.Components,
|
||||
}
|
||||
if appfile.AppAnnotations != nil {
|
||||
data.WorkflowName = appfile.AppAnnotations[oam.AnnotationWorkflowName]
|
||||
data.PublishVersion = appfile.AppAnnotations[oam.AnnotationPublishVersion]
|
||||
}
|
||||
return data
|
||||
}
|
||||
|
||||
@@ -25,6 +25,7 @@ import (
|
||||
. "github.com/onsi/ginkgo"
|
||||
. "github.com/onsi/gomega"
|
||||
|
||||
"cuelang.org/go/cue"
|
||||
"github.com/crossplane/crossplane-runtime/pkg/test"
|
||||
"github.com/google/go-cmp/cmp"
|
||||
terraformtypes "github.com/oam-dev/terraform-controller/api/types/crossplane-runtime"
|
||||
@@ -42,6 +43,7 @@ import (
|
||||
oamtypes "github.com/oam-dev/kubevela/apis/types"
|
||||
"github.com/oam-dev/kubevela/pkg/cue/definition"
|
||||
"github.com/oam-dev/kubevela/pkg/cue/model"
|
||||
"github.com/oam-dev/kubevela/pkg/oam"
|
||||
"github.com/oam-dev/kubevela/pkg/oam/util"
|
||||
)
|
||||
|
||||
@@ -872,7 +874,12 @@ variable "password" {
|
||||
revision: "v1",
|
||||
}
|
||||
|
||||
pCtx := NewBasicContext(args.appName, args.wl.Name, args.revision, ns, args.wl.Params)
|
||||
ctxData := GenerateContextDataFromAppFile(&Appfile{
|
||||
Name: args.appName,
|
||||
Namespace: ns,
|
||||
AppRevisionName: args.revision,
|
||||
}, args.wl.Name)
|
||||
pCtx := NewBasicContext(ctxData, args.wl.Params)
|
||||
comp, err := evalWorkloadWithContext(pCtx, args.wl, ns, args.appName, compName)
|
||||
Expect(comp.StandardWorkload).ShouldNot(BeNil())
|
||||
Expect(comp.Name).Should(Equal(""))
|
||||
@@ -1322,3 +1329,60 @@ spec:
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func TestBaseGenerateComponent(t *testing.T) {
|
||||
var appName = "test-app"
|
||||
var ns = "test-ns"
|
||||
var traitName = "mytrait"
|
||||
var wlName = "my-wl-1"
|
||||
var workflowName = "my-wf"
|
||||
var publishVersion = "123"
|
||||
ctxData := GenerateContextDataFromAppFile(&Appfile{
|
||||
Name: appName,
|
||||
Namespace: ns,
|
||||
AppAnnotations: map[string]string{
|
||||
oam.AnnotationWorkflowName: workflowName,
|
||||
oam.AnnotationPublishVersion: publishVersion,
|
||||
},
|
||||
}, wlName)
|
||||
pContext := NewBasicContext(ctxData, nil)
|
||||
base := `
|
||||
apiVersion: "apps/v1"
|
||||
kind: "Deployment"
|
||||
spec: {
|
||||
template: {
|
||||
spec: containers: [{
|
||||
image: "nginx"
|
||||
}]
|
||||
}
|
||||
}
|
||||
`
|
||||
var r cue.Runtime
|
||||
inst, err := r.Compile("-", base)
|
||||
assert.NilError(t, err)
|
||||
bs, _ := model.NewBase(inst.Value())
|
||||
err = pContext.SetBase(bs)
|
||||
assert.NilError(t, err)
|
||||
tr := &Trait{
|
||||
Name: traitName,
|
||||
engine: definition.NewTraitAbstractEngine(traitName, nil),
|
||||
Template: `outputs:mytrait:{
|
||||
if context.componentType == "stateless" {
|
||||
kind: "Deployment"
|
||||
}
|
||||
if context.componentType == "stateful" {
|
||||
kind: "StatefulSet"
|
||||
}
|
||||
name: context.name
|
||||
envSourceContainerName: context.name
|
||||
workflowName: context.workflowName
|
||||
publishVersion: context.publishVersion
|
||||
}`,
|
||||
}
|
||||
wl := &Workload{Type: "stateful", Traits: []*Trait{tr}}
|
||||
cm, err := baseGenerateComponent(pContext, wl, appName, ns)
|
||||
assert.NilError(t, err)
|
||||
assert.Equal(t, cm.Traits[0].Object["kind"], "StatefulSet")
|
||||
assert.Equal(t, cm.Traits[0].Object["workflowName"], workflowName)
|
||||
assert.Equal(t, cm.Traits[0].Object["publishVersion"], publishVersion)
|
||||
}
|
||||
|
||||
@@ -33,7 +33,8 @@ func (p *Parser) ValidateCUESchematicAppfile(a *Appfile) error {
|
||||
if wl.CapabilityCategory != types.CUECategory {
|
||||
continue
|
||||
}
|
||||
pCtx, err := newValidationProcessContext(wl, a.Name, a.AppRevisionName, a.Namespace)
|
||||
ctxData := GenerateContextDataFromAppFile(a, wl.Name)
|
||||
pCtx, err := newValidationProcessContext(wl, ctxData)
|
||||
if err != nil {
|
||||
return errors.WithMessagef(err, "cannot create the validation process context of app=%s in namespace=%s", a.Name, a.Namespace)
|
||||
}
|
||||
@@ -49,7 +50,7 @@ func (p *Parser) ValidateCUESchematicAppfile(a *Appfile) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func newValidationProcessContext(wl *Workload, appName, revisionName, ns string) (process.Context, error) {
|
||||
func newValidationProcessContext(wl *Workload, ctxData process.ContextData) (process.Context, error) {
|
||||
baseHooks := []process.BaseHook{
|
||||
// add more hook funcs here to validate CUE base
|
||||
}
|
||||
@@ -58,9 +59,11 @@ func newValidationProcessContext(wl *Workload, appName, revisionName, ns string)
|
||||
validateAuxiliaryNameUnique(),
|
||||
}
|
||||
|
||||
pCtx := process.NewContextWithHooks(ns, wl.Name, appName, revisionName, baseHooks, auxiliaryHooks)
|
||||
ctxData.BaseHooks = baseHooks
|
||||
ctxData.AuxiliaryHooks = auxiliaryHooks
|
||||
pCtx := process.NewContext(ctxData)
|
||||
if err := wl.EvalContext(pCtx); err != nil {
|
||||
return nil, errors.Wrapf(err, "evaluate base template app=%s in namespace=%s", appName, ns)
|
||||
return nil, errors.Wrapf(err, "evaluate base template app=%s in namespace=%s", ctxData.AppName, ctxData.Namespace)
|
||||
}
|
||||
return pCtx, nil
|
||||
}
|
||||
|
||||
@@ -58,7 +58,13 @@ var _ = Describe("Test validate CUE schematic Appfile", func() {
|
||||
},
|
||||
engine: definition.NewWorkloadAbstractEngine("myweb", pd),
|
||||
}
|
||||
pCtx, err := newValidationProcessContext(wl, "myapp", "myapp-v1", "test-ns")
|
||||
|
||||
ctxData := GenerateContextDataFromAppFile(&Appfile{
|
||||
Name: "myapp",
|
||||
Namespace: "test-ns",
|
||||
AppRevisionName: "myapp-v1",
|
||||
}, wl.Name)
|
||||
pCtx, err := newValidationProcessContext(wl, ctxData)
|
||||
Expect(err).Should(BeNil())
|
||||
Eventually(func() string {
|
||||
for _, tr := range wl.Traits {
|
||||
|
||||
@@ -51,7 +51,7 @@ func (c *HTTPCmd) Run(meta *registry.Meta) (res interface{}, err error) {
|
||||
var (
|
||||
r io.Reader
|
||||
client = &http.Client{
|
||||
Transport: &http.Transport{},
|
||||
Transport: http.DefaultTransport,
|
||||
Timeout: time.Second * 3,
|
||||
}
|
||||
)
|
||||
|
||||
@@ -21,7 +21,6 @@ import (
|
||||
"strings"
|
||||
"sync"
|
||||
|
||||
"github.com/oam-dev/kubevela/pkg/oam"
|
||||
"github.com/oam-dev/kubevela/pkg/resourcetracker"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
@@ -87,15 +86,13 @@ func DefaultNewControllerClient(cache cache.Cache, config *rest.Config, options
|
||||
AddFunc: func(obj interface{}) {
|
||||
lock.Lock()
|
||||
rtCount++
|
||||
metrics.ResourceTrackerNumberGauge.WithLabelValues(
|
||||
metrics.ExtractMetricValuesFromObjectLabel(obj, oam.LabelAppName, oam.LabelAppNamespace)...).Set(float64(rtCount))
|
||||
metrics.ResourceTrackerNumberGauge.WithLabelValues("application").Set(float64(rtCount))
|
||||
lock.Unlock()
|
||||
},
|
||||
DeleteFunc: func(obj interface{}) {
|
||||
lock.Lock()
|
||||
rtCount--
|
||||
metrics.ResourceTrackerNumberGauge.WithLabelValues(
|
||||
metrics.ExtractMetricValuesFromObjectLabel(obj, oam.LabelAppName, oam.LabelAppNamespace)...).Set(float64(rtCount))
|
||||
metrics.ResourceTrackerNumberGauge.WithLabelValues("application").Set(float64(rtCount))
|
||||
lock.Unlock()
|
||||
},
|
||||
})
|
||||
|
||||
@@ -1,153 +0,0 @@
|
||||
/*
|
||||
Copyright 2021 The KubeVela Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package clustermanager
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
"github.com/oam-dev/cluster-gateway/pkg/apis/cluster/v1alpha1"
|
||||
"github.com/pkg/errors"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
crdv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1"
|
||||
apierror "k8s.io/apimachinery/pkg/api/errors"
|
||||
k8stypes "k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/client-go/tools/clientcmd"
|
||||
clusterv1 "open-cluster-management.io/api/cluster/v1"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
|
||||
"github.com/oam-dev/kubevela/apis/types"
|
||||
"github.com/oam-dev/kubevela/pkg/multicluster"
|
||||
"github.com/oam-dev/kubevela/pkg/utils/common"
|
||||
)
|
||||
|
||||
// GetClient returns a kube client for given kubeConfigData
|
||||
func GetClient(kubeConfigData []byte) (client.Client, error) {
|
||||
clientConfig, err := clientcmd.NewClientConfigFromBytes(kubeConfigData)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
restConfig, err := clientConfig.ClientConfig()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return client.New(restConfig, client.Options{Scheme: common.Scheme})
|
||||
}
|
||||
|
||||
// GetRegisteredClusters will get all registered clusters in control plane
|
||||
func GetRegisteredClusters(c client.Client) ([]types.Cluster, error) {
|
||||
var clusters []types.Cluster
|
||||
secrets := corev1.SecretList{}
|
||||
if err := c.List(context.Background(), &secrets, client.HasLabels{v1alpha1.LabelKeyClusterCredentialType}, client.InNamespace(multicluster.ClusterGatewaySecretNamespace)); err != nil {
|
||||
return nil, errors.Wrapf(err, "failed to get clusterSecret secrets")
|
||||
}
|
||||
for _, clusterSecret := range secrets.Items {
|
||||
endpoint := string(clusterSecret.Data["endpoint"])
|
||||
if endp, ok := clusterSecret.GetLabels()[v1alpha1.LabelKeyClusterEndpointType]; ok {
|
||||
endpoint = endp
|
||||
}
|
||||
clusters = append(clusters, types.Cluster{
|
||||
Name: clusterSecret.Name,
|
||||
Type: clusterSecret.GetLabels()[v1alpha1.LabelKeyClusterCredentialType],
|
||||
EndPoint: endpoint,
|
||||
Accepted: true,
|
||||
})
|
||||
}
|
||||
|
||||
crdName := k8stypes.NamespacedName{Name: "managedclusters." + clusterv1.GroupName}
|
||||
if err := c.Get(context.Background(), crdName, &crdv1.CustomResourceDefinition{}); err != nil {
|
||||
if apierror.IsNotFound(err) {
|
||||
return clusters, nil
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
|
||||
managedClusters := clusterv1.ManagedClusterList{}
|
||||
if err := c.List(context.Background(), &managedClusters); err != nil {
|
||||
return nil, errors.Wrapf(err, "failed to get managed clusters")
|
||||
}
|
||||
for _, cluster := range managedClusters.Items {
|
||||
if len(cluster.Spec.ManagedClusterClientConfigs) != 0 {
|
||||
clusters = append(clusters, types.Cluster{
|
||||
Name: cluster.Name,
|
||||
Type: "OCM ManagedServiceAccount",
|
||||
EndPoint: "-",
|
||||
Accepted: cluster.Spec.HubAcceptsClient,
|
||||
})
|
||||
}
|
||||
}
|
||||
return clusters, nil
|
||||
}
|
||||
|
||||
// EnsureClusterNotExists will check the cluster is not existed in control plane
|
||||
func EnsureClusterNotExists(c client.Client, clusterName string) error {
|
||||
exist, err := clusterExists(c, clusterName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if exist {
|
||||
return fmt.Errorf("cluster %s already exists", clusterName)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// EnsureClusterExists will check the cluster is existed in control plane
|
||||
func EnsureClusterExists(c client.Client, clusterName string) error {
|
||||
exist, err := clusterExists(c, clusterName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if !exist {
|
||||
return fmt.Errorf("cluster %s not exists", clusterName)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// clusterExists will check whether the cluster exist or not
|
||||
func clusterExists(c client.Client, clusterName string) (bool, error) {
|
||||
err := c.Get(context.Background(),
|
||||
k8stypes.NamespacedName{
|
||||
Name: clusterName,
|
||||
Namespace: multicluster.ClusterGatewaySecretNamespace,
|
||||
},
|
||||
&corev1.Secret{})
|
||||
if err == nil {
|
||||
return true, nil
|
||||
}
|
||||
if !apierror.IsNotFound(err) {
|
||||
return false, errors.Wrapf(err, "failed to check duplicate cluster")
|
||||
}
|
||||
|
||||
crdName := k8stypes.NamespacedName{Name: "managedclusters." + clusterv1.GroupName}
|
||||
if err = c.Get(context.Background(), crdName, &crdv1.CustomResourceDefinition{}); err != nil {
|
||||
if apierror.IsNotFound(err) {
|
||||
return false, nil
|
||||
}
|
||||
return false, errors.Wrapf(err, "failed to get managedcluster CRD to check duplicate cluster")
|
||||
}
|
||||
err = c.Get(context.Background(), k8stypes.NamespacedName{
|
||||
Name: clusterName,
|
||||
Namespace: multicluster.ClusterGatewaySecretNamespace,
|
||||
}, &clusterv1.ManagedCluster{})
|
||||
if err == nil {
|
||||
return true, nil
|
||||
}
|
||||
if !apierror.IsNotFound(err) {
|
||||
return false, errors.Wrapf(err, "failed to check duplicate cluster")
|
||||
}
|
||||
return false, nil
|
||||
}
|
||||
@@ -83,4 +83,7 @@ type Args struct {
|
||||
|
||||
// EnableCompatibility indicates that will change some functions of controller to adapt to multiple platforms, such as asi.
|
||||
EnableCompatibility bool
|
||||
|
||||
// IgnoreAppWithoutControllerRequirement indicates that application controller will not process the app without 'app.oam.dev/controller-version-require' annotation.
|
||||
IgnoreAppWithoutControllerRequirement bool
|
||||
}
|
||||
|
||||
@@ -92,6 +92,8 @@ type options struct {
|
||||
appRevisionLimit int
|
||||
concurrentReconciles int
|
||||
disableStatusUpdate bool
|
||||
ignoreAppNoCtrlReq bool
|
||||
controllerVersion string
|
||||
}
|
||||
|
||||
// +kubebuilder:rbac:groups=core.oam.dev,resources=applications,verbs=get;list;watch;create;update;patch;delete
|
||||
@@ -100,6 +102,7 @@ type options struct {
|
||||
// Reconcile process app event
|
||||
// nolint:gocyclo
|
||||
func (r *Reconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) {
|
||||
|
||||
ctx, cancel := context.WithTimeout(ctx, common2.ReconcileTimeout)
|
||||
defer cancel()
|
||||
|
||||
@@ -117,6 +120,11 @@ func (r *Reconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Resu
|
||||
return r.result(client.IgnoreNotFound(err)).ret()
|
||||
}
|
||||
|
||||
if !r.matchControllerRequirement(app) {
|
||||
logCtx.Info("skip app: not match the controller requirement of app")
|
||||
return ctrl.Result{}, nil
|
||||
}
|
||||
|
||||
timeReporter := timeReconcile(app)
|
||||
defer timeReporter()
|
||||
|
||||
@@ -590,5 +598,19 @@ func parseOptions(args core.Args) options {
|
||||
disableStatusUpdate: args.EnableCompatibility,
|
||||
appRevisionLimit: args.AppRevisionLimit,
|
||||
concurrentReconciles: args.ConcurrentReconciles,
|
||||
ignoreAppNoCtrlReq: args.IgnoreAppWithoutControllerRequirement,
|
||||
controllerVersion: version.VelaVersion,
|
||||
}
|
||||
}
|
||||
|
||||
func (r *Reconciler) matchControllerRequirement(app *v1beta1.Application) bool {
|
||||
if app.Annotations != nil {
|
||||
if requireVersion, ok := app.Annotations[oam.AnnotationControllerRequirement]; ok {
|
||||
return requireVersion == r.controllerVersion
|
||||
}
|
||||
}
|
||||
if r.ignoreAppNoCtrlReq {
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
@@ -2275,7 +2275,119 @@ var _ = Describe("Test Application Controller", func() {
|
||||
Namespace: app.Namespace,
|
||||
}, checkWeb)).Should(BeNil())
|
||||
Expect(*(checkWeb.Spec.Replicas)).Should(BeEquivalentTo(int32(0)))
|
||||
})
|
||||
|
||||
It("app apply resource in parallel", func() {
|
||||
wfDef := &v1beta1.WorkflowStepDefinition{}
|
||||
wfDefJson, _ := yaml.YAMLToJSON([]byte(applyInParallelWorkflowDefinitionYaml))
|
||||
Expect(json.Unmarshal(wfDefJson, wfDef)).Should(BeNil())
|
||||
Expect(k8sClient.Create(ctx, wfDef.DeepCopy())).Should(SatisfyAny(BeNil(), &util.AlreadyExistMatcher{}))
|
||||
|
||||
ns := &corev1.Namespace{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "vela-test-apply-in-parallel",
|
||||
},
|
||||
}
|
||||
app := appwithNoTrait.DeepCopy()
|
||||
app.Name = "vela-test-app"
|
||||
app.SetNamespace(ns.Name)
|
||||
app.Spec.Workflow = &v1beta1.Workflow{
|
||||
Steps: []v1beta1.WorkflowStep{{
|
||||
Name: "apply-in-parallel",
|
||||
Type: "apply-test",
|
||||
Properties: &runtime.RawExtension{Raw: []byte(`{"parallelism": 20}`)},
|
||||
}},
|
||||
}
|
||||
Expect(k8sClient.Create(ctx, ns)).Should(BeNil())
|
||||
Expect(k8sClient.Create(ctx, app)).Should(BeNil())
|
||||
appKey := client.ObjectKey{
|
||||
Name: app.Name,
|
||||
Namespace: app.Namespace,
|
||||
}
|
||||
_, err := testutil.ReconcileOnceAfterFinalizer(reconciler, reconcile.Request{NamespacedName: appKey})
|
||||
Expect(err).Should(BeNil())
|
||||
|
||||
deployList := new(v1.DeploymentList)
|
||||
Expect(k8sClient.List(ctx, deployList, client.InNamespace(app.Namespace))).Should(BeNil())
|
||||
Expect(len(deployList.Items)).Should(Equal(20))
|
||||
|
||||
checkApp := new(v1beta1.Application)
|
||||
Expect(k8sClient.Get(ctx, appKey, checkApp)).Should(Succeed())
|
||||
rt := new(v1beta1.ResourceTracker)
|
||||
expectRTName := fmt.Sprintf("%s-%s", checkApp.Status.LatestRevision.Name, checkApp.GetNamespace())
|
||||
Eventually(func() error {
|
||||
return k8sClient.Get(ctx, client.ObjectKey{Name: expectRTName}, rt)
|
||||
}, 10*time.Second, 500*time.Millisecond).Should(Succeed())
|
||||
|
||||
Expect(len(rt.Spec.ManagedResources)).Should(Equal(20))
|
||||
})
|
||||
|
||||
It("test controller requirement", func() {
|
||||
|
||||
ns := corev1.Namespace{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "test-controller-requirement",
|
||||
},
|
||||
}
|
||||
Expect(k8sClient.Create(context.Background(), &ns)).Should(BeNil())
|
||||
|
||||
appWithoutCtrlReq := appwithNoTrait.DeepCopy()
|
||||
appWithoutCtrlReq.SetNamespace(ns.Name)
|
||||
appWithoutCtrlReq.SetName("app-no-ctrl-req")
|
||||
Expect(k8sClient.Create(context.Background(), appWithoutCtrlReq)).Should(BeNil())
|
||||
|
||||
appWithCtrlReqV1 := appwithNoTrait.DeepCopy()
|
||||
appWithCtrlReqV1.SetNamespace(ns.Name)
|
||||
appWithCtrlReqV1.SetName("app-with-ctrl-v1")
|
||||
appWithCtrlReqV1.Annotations = map[string]string{
|
||||
oam.AnnotationControllerRequirement: "v1",
|
||||
}
|
||||
Expect(k8sClient.Create(context.Background(), appWithCtrlReqV1)).Should(BeNil())
|
||||
|
||||
appWithCtrlReqV2 := appwithNoTrait.DeepCopy()
|
||||
appWithCtrlReqV2.SetNamespace(ns.Name)
|
||||
appWithCtrlReqV2.SetName("app-with-ctrl-v2")
|
||||
appWithCtrlReqV2.Annotations = map[string]string{
|
||||
oam.AnnotationControllerRequirement: "v2",
|
||||
}
|
||||
Expect(k8sClient.Create(context.Background(), appWithCtrlReqV2)).Should(BeNil())
|
||||
|
||||
v1OREmptyReconciler := *reconciler
|
||||
v1OREmptyReconciler.ignoreAppNoCtrlReq = false
|
||||
v1OREmptyReconciler.controllerVersion = "v1"
|
||||
|
||||
v2OnlyReconciler := *reconciler
|
||||
v2OnlyReconciler.ignoreAppNoCtrlReq = true
|
||||
v2OnlyReconciler.controllerVersion = "v2"
|
||||
|
||||
check := func(r reconcile.Reconciler, app *v1beta1.Application, do bool) {
|
||||
testutil.ReconcileOnceAfterFinalizer(r, reconcile.Request{NamespacedName: client.ObjectKey{
|
||||
Name: app.Name,
|
||||
Namespace: app.Namespace,
|
||||
}})
|
||||
checkApp := &v1beta1.Application{}
|
||||
Expect(k8sClient.Get(context.Background(), client.ObjectKey{
|
||||
Name: app.Name,
|
||||
Namespace: app.Namespace,
|
||||
}, checkApp)).Should(BeNil())
|
||||
|
||||
if do {
|
||||
Expect(checkApp.Annotations[oam.AnnotationKubeVelaVersion]).ShouldNot(BeEmpty())
|
||||
} else {
|
||||
if checkApp.Annotations == nil {
|
||||
return
|
||||
}
|
||||
Expect(checkApp.Annotations[oam.AnnotationKubeVelaVersion]).Should(BeEmpty())
|
||||
}
|
||||
}
|
||||
|
||||
check(&v2OnlyReconciler, appWithoutCtrlReq, false)
|
||||
check(&v2OnlyReconciler, appWithCtrlReqV1, false)
|
||||
check(&v1OREmptyReconciler, appWithCtrlReqV2, false)
|
||||
|
||||
check(&v1OREmptyReconciler, appWithoutCtrlReq, true)
|
||||
check(&v1OREmptyReconciler, appWithCtrlReqV1, true)
|
||||
check(&v2OnlyReconciler, appWithCtrlReqV2, true)
|
||||
})
|
||||
})
|
||||
|
||||
@@ -3184,6 +3296,46 @@ spec:
|
||||
}
|
||||
}
|
||||
parameter: objects: [...{}]
|
||||
`
|
||||
applyInParallelWorkflowDefinitionYaml = `
|
||||
apiVersion: core.oam.dev/v1beta1
|
||||
kind: WorkflowStepDefinition
|
||||
metadata:
|
||||
name: apply-test
|
||||
namespace: vela-system
|
||||
spec:
|
||||
schematic:
|
||||
cue:
|
||||
template: |
|
||||
import (
|
||||
"vela/op"
|
||||
"list"
|
||||
)
|
||||
|
||||
components: op.#LoadInOrder & {}
|
||||
targetComponent: components.value[0]
|
||||
resources: op.#RenderComponent & {
|
||||
value: targetComponent
|
||||
}
|
||||
workload: resources.output
|
||||
arr: list.Range(0, parameter.parallelism, 1)
|
||||
patchWorkloads: op.#Steps & {
|
||||
for idx in arr {
|
||||
"\(idx)": op.#PatchK8sObject & {
|
||||
value: workload
|
||||
patch: {
|
||||
// +patchStrategy=retainKeys
|
||||
metadata: name: "\(targetComponent.name)-\(idx)"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
workloads: [ for patchResult in patchWorkloads {patchResult.result}]
|
||||
apply: op.#ApplyInParallel & {
|
||||
value: workloads
|
||||
}
|
||||
parameter: parallelism: int
|
||||
|
||||
`
|
||||
)
|
||||
|
||||
|
||||
@@ -32,6 +32,7 @@ import (
|
||||
"github.com/oam-dev/kubevela/pkg/appfile"
|
||||
"github.com/oam-dev/kubevela/pkg/controller/core.oam.dev/v1alpha2/application/assemble"
|
||||
"github.com/oam-dev/kubevela/pkg/cue/model/value"
|
||||
"github.com/oam-dev/kubevela/pkg/cue/process"
|
||||
"github.com/oam-dev/kubevela/pkg/monitor/metrics"
|
||||
"github.com/oam-dev/kubevela/pkg/multicluster"
|
||||
"github.com/oam-dev/kubevela/pkg/oam"
|
||||
@@ -60,16 +61,19 @@ func (h *AppHandler) GenerateApplicationSteps(ctx context.Context,
|
||||
appParser *appfile.Parser,
|
||||
af *appfile.Appfile,
|
||||
appRev *v1beta1.ApplicationRevision) ([]wfTypes.TaskRunner, error) {
|
||||
|
||||
handlerProviders := providers.NewProviders()
|
||||
kube.Install(handlerProviders, h.r.Client, h.Dispatch, h.Delete)
|
||||
oamProvider.Install(handlerProviders, app, h.applyComponentFunc(
|
||||
appParser, appRev, af), h.renderComponentFunc(appParser, appRev, af))
|
||||
http.Install(handlerProviders, h.r.Client, app.Namespace)
|
||||
taskDiscover := tasks.NewTaskDiscover(handlerProviders, h.r.pd, h.r.Client, h.r.dm)
|
||||
pCtx := process.NewContext(generateContextDataFromApp(app, appRev.Name))
|
||||
taskDiscover := tasks.NewTaskDiscover(handlerProviders, h.r.pd, h.r.Client, h.r.dm, pCtx)
|
||||
multiclusterProvider.Install(handlerProviders, h.r.Client, app)
|
||||
terraformProvider.Install(handlerProviders, app, func(comp common.ApplicationComponent) (*appfile.Workload, error) {
|
||||
return appParser.ParseWorkloadFromRevision(comp, appRev)
|
||||
})
|
||||
|
||||
var tasks []wfTypes.TaskRunner
|
||||
for _, step := range af.WorkflowSteps {
|
||||
options := &wfTypes.GeneratorOptions{
|
||||
@@ -290,3 +294,17 @@ func generateStepID(stepName string, wfStatus *common.WorkflowStatus) string {
|
||||
}
|
||||
return id
|
||||
}
|
||||
|
||||
func generateContextDataFromApp(app *v1beta1.Application, appRev string) process.ContextData {
|
||||
data := process.ContextData{
|
||||
Namespace: app.Namespace,
|
||||
AppName: app.Name,
|
||||
CompName: app.Name,
|
||||
AppRevisionName: appRev,
|
||||
}
|
||||
if app.Annotations != nil {
|
||||
data.WorkflowName = app.Annotations[oam.AnnotationWorkflowName]
|
||||
data.PublishVersion = app.Annotations[oam.AnnotationPublishVersion]
|
||||
}
|
||||
return data
|
||||
}
|
||||
|
||||
@@ -370,15 +370,21 @@ func ComputeAppRevisionHash(appRevision *v1beta1.ApplicationRevision) (string, e
|
||||
// currentAppRevIsNew check application revision already exist or not
|
||||
func (h *AppHandler) currentAppRevIsNew(ctx context.Context) (bool, bool, error) {
|
||||
// the last revision doesn't exist.
|
||||
if h.app.Status.LatestRevision == nil {
|
||||
if h.app.Status.LatestRevision == nil || DisableAllApplicationRevision {
|
||||
return true, true, nil
|
||||
}
|
||||
|
||||
isLatestRev := deepEqualAppInRevision(h.latestAppRev, h.currentAppRev)
|
||||
if metav1.HasAnnotation(h.app.ObjectMeta, oam.AnnotationAutoUpdate) {
|
||||
isLatestRev = h.app.Status.LatestRevision.RevisionHash == h.currentRevHash && DeepEqualRevision(h.latestAppRev, h.currentAppRev)
|
||||
}
|
||||
|
||||
// diff the latest revision first
|
||||
if h.app.Status.LatestRevision.RevisionHash == h.currentRevHash && DeepEqualRevision(h.latestAppRev, h.currentAppRev) {
|
||||
if isLatestRev {
|
||||
appSpec := h.currentAppRev.Spec.Application.Spec
|
||||
traitDef := h.currentAppRev.Spec.TraitDefinitions
|
||||
h.currentAppRev = h.latestAppRev.DeepCopy()
|
||||
h.currentRevHash = h.app.Status.LatestRevision.RevisionHash
|
||||
h.currentAppRev.Spec.Application.Spec = appSpec
|
||||
h.currentAppRev.Spec.TraitDefinitions = traitDef
|
||||
return false, false, nil
|
||||
@@ -444,6 +450,10 @@ func DeepEqualRevision(old, new *v1beta1.ApplicationRevision) bool {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return deepEqualAppInRevision(old, new)
|
||||
}
|
||||
|
||||
func deepEqualAppInRevision(old, new *v1beta1.ApplicationRevision) bool {
|
||||
return apiequality.Semantic.DeepEqual(filterSkipAffectAppRevTrait(old.Spec.Application.Spec, old.Spec.TraitDefinitions),
|
||||
filterSkipAffectAppRevTrait(new.Spec.Application.Spec, new.Spec.TraitDefinitions))
|
||||
}
|
||||
|
||||
@@ -245,6 +245,28 @@ var _ = Describe("test generate revision ", func() {
|
||||
verifyDeepEqualRevision()
|
||||
})
|
||||
|
||||
It("Test application revision compare", func() {
|
||||
By("Apply the application")
|
||||
appParser := appfile.NewApplicationParser(reconciler.Client, reconciler.dm, reconciler.pd)
|
||||
ctx = util.SetNamespaceInCtx(ctx, app.Namespace)
|
||||
generatedAppfile, err := appParser.GenerateAppFile(ctx, &app)
|
||||
Expect(err).Should(Succeed())
|
||||
comps, err = generatedAppfile.GenerateComponentManifests()
|
||||
Expect(err).Should(Succeed())
|
||||
Expect(handler.PrepareCurrentAppRevision(ctx, generatedAppfile)).Should(Succeed())
|
||||
Expect(handler.FinalizeAndApplyAppRevision(ctx)).Should(Succeed())
|
||||
prevHash := generatedAppfile.AppRevisionHash
|
||||
handler.app.Status.LatestRevision = &common.Revision{Name: generatedAppfile.AppRevisionName, Revision: 1, RevisionHash: generatedAppfile.AppRevisionHash}
|
||||
generatedAppfile.Workloads[0].FullTemplate.ComponentDefinition = nil
|
||||
Expect(handler.PrepareCurrentAppRevision(ctx, generatedAppfile)).Should(Succeed())
|
||||
nonChangeHash := generatedAppfile.AppRevisionHash
|
||||
handler.app.Annotations = map[string]string{oam.AnnotationAutoUpdate: "true"}
|
||||
Expect(handler.PrepareCurrentAppRevision(ctx, generatedAppfile)).Should(Succeed())
|
||||
changedHash := generatedAppfile.AppRevisionHash
|
||||
Expect(nonChangeHash).Should(Equal(prevHash))
|
||||
Expect(changedHash).ShouldNot(Equal(prevHash))
|
||||
})
|
||||
|
||||
It("Test apply success for none rollout case", func() {
|
||||
By("Apply the application")
|
||||
appParser := appfile.NewApplicationParser(reconciler.Client, reconciler.dm, reconciler.pd)
|
||||
|
||||
@@ -439,8 +439,8 @@ func CUEBasedHealthCheck(ctx context.Context, c client.Client, wlRef WorkloadRef
|
||||
|
||||
switch wl.CapabilityCategory {
|
||||
case oamtypes.TerraformCategory:
|
||||
pCtx = af.NewBasicContext(appfile.Name, wl.Name, appfile.AppRevisionName, appfile.Namespace, wl.Params)
|
||||
ctx := context.Background()
|
||||
pCtx = af.NewBasicContext(af.GenerateContextDataFromAppFile(appfile, wl.Name), wl.Params)
|
||||
var configuration terraformapi.Configuration
|
||||
if err := c.Get(ctx, client.ObjectKey{Name: wl.Name, Namespace: ns}, &configuration); err != nil {
|
||||
wlHealth.HealthStatus = StatusUnhealthy
|
||||
@@ -454,7 +454,8 @@ func CUEBasedHealthCheck(ctx context.Context, c client.Client, wlRef WorkloadRef
|
||||
wlHealth.Diagnosis = configuration.Status.Apply.Message
|
||||
okToCheckTrait = true
|
||||
default:
|
||||
pCtx = process.NewProcessContextWithCtx(ctx, ns, wl.Name, appfile.Name, appfile.AppRevisionName)
|
||||
pCtx = process.NewContext(af.GenerateContextDataFromAppFile(appfile, wl.Name))
|
||||
pCtx.SetCtx(ctx)
|
||||
if wl.CapabilityCategory != oamtypes.CUECategory {
|
||||
templateStr, err := af.GenerateCUETemplate(wl)
|
||||
if err != nil {
|
||||
|
||||
@@ -61,11 +61,13 @@ const (
|
||||
TerraformVariableMap string = "map"
|
||||
TerraformVariableObject string = "object"
|
||||
TerraformVariableNull string = ""
|
||||
TerraformVariableAny string = "any"
|
||||
|
||||
TerraformListTypePrefix string = "list("
|
||||
TerraformTupleTypePrefix string = "tuple("
|
||||
TerraformMapTypePrefix string = "map("
|
||||
TerraformObjectTypePrefix string = "object("
|
||||
TerraformSetTypePrefix string = "set("
|
||||
|
||||
typeTraitDefinition = "trait"
|
||||
typeComponentDefinition = "component"
|
||||
@@ -157,21 +159,44 @@ func GetOpenAPISchemaFromTerraformComponentDefinition(configuration string) ([]b
|
||||
schema = openapi3.NewArraySchema()
|
||||
case TerraformVariableMap, TerraformVariableObject:
|
||||
schema = openapi3.NewObjectSchema()
|
||||
case TerraformVariableAny:
|
||||
switch v.Default.(type) {
|
||||
case []interface{}:
|
||||
schema = openapi3.NewArraySchema()
|
||||
case map[string]interface{}:
|
||||
schema = openapi3.NewObjectSchema()
|
||||
}
|
||||
case TerraformVariableNull:
|
||||
return nil, fmt.Errorf("null type variable is NOT supported, please specify a type for the variable: %s", v.Name)
|
||||
switch v.Default.(type) {
|
||||
case nil, string:
|
||||
schema = openapi3.NewStringSchema()
|
||||
case []interface{}:
|
||||
schema = openapi3.NewArraySchema()
|
||||
case map[string]interface{}:
|
||||
schema = openapi3.NewObjectSchema()
|
||||
case int, float64:
|
||||
schema = openapi3.NewFloat64Schema()
|
||||
default:
|
||||
return nil, fmt.Errorf("null type variable is NOT supported, please specify a type for the variable: %s", v.Name)
|
||||
}
|
||||
}
|
||||
|
||||
// To identify unusual list type
|
||||
if schema == nil {
|
||||
switch {
|
||||
case strings.HasPrefix(v.Type, TerraformListTypePrefix) || strings.HasPrefix(v.Type, TerraformTupleTypePrefix):
|
||||
case strings.HasPrefix(v.Type, TerraformListTypePrefix) || strings.HasPrefix(v.Type, TerraformTupleTypePrefix) ||
|
||||
strings.HasPrefix(v.Type, TerraformSetTypePrefix):
|
||||
schema = openapi3.NewArraySchema()
|
||||
case strings.HasPrefix(v.Type, TerraformMapTypePrefix) || strings.HasPrefix(v.Type, TerraformObjectTypePrefix):
|
||||
schema = openapi3.NewObjectSchema()
|
||||
default:
|
||||
return nil, fmt.Errorf("the type `%s` of variable %s is NOT supported", v.Type, v.Name)
|
||||
}
|
||||
}
|
||||
schema.Title = k
|
||||
required = append(required, k)
|
||||
if v.Required {
|
||||
required = append(required, k)
|
||||
}
|
||||
if v.Default != nil {
|
||||
schema.Default = v.Default
|
||||
}
|
||||
|
||||
@@ -20,6 +20,7 @@ package utils
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
@@ -302,21 +303,9 @@ variable "password" {
|
||||
|
||||
variable "intVar" {
|
||||
type = "number"
|
||||
}
|
||||
|
||||
variable "boolVar" {
|
||||
type = "bool"
|
||||
}
|
||||
|
||||
variable "listVar" {
|
||||
type = "list"
|
||||
}
|
||||
|
||||
variable "mapVar" {
|
||||
type = "map"
|
||||
}`,
|
||||
want: want{
|
||||
subStr: "account_name",
|
||||
subStr: `"required":["intVar"]`,
|
||||
err: nil,
|
||||
},
|
||||
},
|
||||
@@ -326,8 +315,38 @@ variable "name" {
|
||||
default = "abc"
|
||||
}`,
|
||||
want: want{
|
||||
subStr: "",
|
||||
err: errors.New("null type variable is NOT supported, please specify a type for the variable: name"),
|
||||
subStr: "abc",
|
||||
err: nil,
|
||||
},
|
||||
},
|
||||
"null type variable, while default value is a slice": {
|
||||
configuration: `
|
||||
variable "name" {
|
||||
default = [123]
|
||||
}`,
|
||||
want: want{
|
||||
subStr: "123",
|
||||
err: nil,
|
||||
},
|
||||
},
|
||||
"null type variable, while default value is a map": {
|
||||
configuration: `
|
||||
variable "name" {
|
||||
default = {a = 1}
|
||||
}`,
|
||||
want: want{
|
||||
subStr: "a",
|
||||
err: nil,
|
||||
},
|
||||
},
|
||||
"null type variable, while default value is number": {
|
||||
configuration: `
|
||||
variable "name" {
|
||||
default = 123
|
||||
}`,
|
||||
want: want{
|
||||
subStr: "123",
|
||||
err: nil,
|
||||
},
|
||||
},
|
||||
"complicated list variable": {
|
||||
@@ -354,6 +373,38 @@ variable "bbb" {
|
||||
config = string
|
||||
})
|
||||
default = []
|
||||
}`,
|
||||
want: want{
|
||||
subStr: "bbb",
|
||||
err: nil,
|
||||
},
|
||||
},
|
||||
"not supported complicated variable": {
|
||||
configuration: `
|
||||
variable "bbb" {
|
||||
type = xxxxx(string)
|
||||
}`,
|
||||
want: want{
|
||||
subStr: "",
|
||||
err: fmt.Errorf("the type `%s` of variable %s is NOT supported", "xxxxx(string)", "bbb"),
|
||||
},
|
||||
},
|
||||
"any type, slice default": {
|
||||
configuration: `
|
||||
variable "bbb" {
|
||||
type = any
|
||||
default = []
|
||||
}`,
|
||||
want: want{
|
||||
subStr: "bbb",
|
||||
err: nil,
|
||||
},
|
||||
},
|
||||
"any type, map default": {
|
||||
configuration: `
|
||||
variable "bbb" {
|
||||
type = any
|
||||
default = {}
|
||||
}`,
|
||||
want: want{
|
||||
subStr: "bbb",
|
||||
@@ -423,6 +474,34 @@ variable "aaa" {
|
||||
config = string
|
||||
}))
|
||||
default = []
|
||||
}`,
|
||||
},
|
||||
},
|
||||
"configuration is remote with path": {
|
||||
args: args{
|
||||
name: "aws-subnet",
|
||||
url: "https://github.com/kubevela-contrib/terraform-modules.git",
|
||||
path: "aws/subnet",
|
||||
data: []byte(`
|
||||
variable "aaa" {
|
||||
type = list(object({
|
||||
type = string
|
||||
sourceArn = string
|
||||
config = string
|
||||
}))
|
||||
default = []
|
||||
}`),
|
||||
variableFile: "variables.tf",
|
||||
},
|
||||
want: want{
|
||||
config: `
|
||||
variable "aaa" {
|
||||
type = list(object({
|
||||
type = string
|
||||
sourceArn = string
|
||||
config = string
|
||||
}))
|
||||
default = []
|
||||
}`,
|
||||
},
|
||||
},
|
||||
@@ -454,7 +533,12 @@ variable "aaa" {
|
||||
}
|
||||
|
||||
patch := ApplyFunc(git.PlainCloneContext, func(ctx context.Context, path string, isBare bool, o *git.CloneOptions) (*git.Repository, error) {
|
||||
tmpPath := filepath.Join("./tmp/terraform", tc.args.name)
|
||||
var tmpPath string
|
||||
if tc.args.path != "" {
|
||||
tmpPath = filepath.Join("./tmp/terraform", tc.args.name, tc.args.path)
|
||||
} else {
|
||||
tmpPath = filepath.Join("./tmp/terraform", tc.args.name)
|
||||
}
|
||||
err := os.MkdirAll(tmpPath, os.ModePerm)
|
||||
assert.NilError(t, err)
|
||||
err = ioutil.WriteFile(filepath.Clean(filepath.Join(tmpPath, tc.args.variableFile)), tc.args.data, 0644)
|
||||
|
||||
@@ -217,7 +217,12 @@ parameter: {
|
||||
}
|
||||
|
||||
for _, v := range testCases {
|
||||
ctx := process.NewContext("default", "test", "myapp", "myapp-v1")
|
||||
ctx := process.NewContext(process.ContextData{
|
||||
AppName: "myapp",
|
||||
CompName: "test",
|
||||
Namespace: "default",
|
||||
AppRevisionName: "myapp-v1",
|
||||
})
|
||||
wt := NewWorkloadAbstractEngine("testWorkload", &packages.PackageDiscover{})
|
||||
err := wt.Complete(ctx, v.workloadTemplate, v.params)
|
||||
hasError := err != nil
|
||||
@@ -918,7 +923,12 @@ parameter: [string]: string`,
|
||||
}
|
||||
|
||||
`
|
||||
ctx := process.NewContext("default", "test", "myapp", "myapp-v1")
|
||||
ctx := process.NewContext(process.ContextData{
|
||||
AppName: "myapp",
|
||||
CompName: "test",
|
||||
Namespace: "default",
|
||||
AppRevisionName: "myapp-v1",
|
||||
})
|
||||
wt := NewWorkloadAbstractEngine("-", &packages.PackageDiscover{})
|
||||
if err := wt.Complete(ctx, baseTemplate, map[string]interface{}{
|
||||
"replicas": 2,
|
||||
@@ -1017,7 +1027,12 @@ outputs: service :{
|
||||
}
|
||||
for k, v := range testcases {
|
||||
wd := NewWorkloadAbstractEngine(k, &packages.PackageDiscover{})
|
||||
ctx := process.NewContext("default", k, "myapp", "myapp-v1")
|
||||
ctx := process.NewContext(process.ContextData{
|
||||
AppName: "myapp",
|
||||
CompName: k,
|
||||
Namespace: "default",
|
||||
AppRevisionName: "myapp-v1",
|
||||
})
|
||||
err := wd.Complete(ctx, v.template, map[string]interface{}{})
|
||||
assert.NoError(t, err)
|
||||
_, assists := ctx.Output()
|
||||
@@ -1095,7 +1110,12 @@ outputs: abc :{
|
||||
}
|
||||
for k, v := range testcases {
|
||||
td := NewTraitAbstractEngine(k, &packages.PackageDiscover{})
|
||||
ctx := process.NewContext("default", k, "myapp", "myapp-v1")
|
||||
ctx := process.NewContext(process.ContextData{
|
||||
AppName: "myapp",
|
||||
CompName: k,
|
||||
Namespace: "default",
|
||||
AppRevisionName: "myapp-v1",
|
||||
})
|
||||
err := td.Complete(ctx, v.template, map[string]interface{}{})
|
||||
assert.NoError(t, err)
|
||||
_, assists := ctx.Output()
|
||||
|
||||
@@ -35,12 +35,18 @@ const (
|
||||
ContextAppRevisionNum = "appRevisionNum"
|
||||
// ContextNamespace is the namespace of the app
|
||||
ContextNamespace = "namespace"
|
||||
// ContextPublishVersion is the publish version of the app
|
||||
ContextPublishVersion = "publishVersion"
|
||||
// ContextWorkflowName is the name of the workflow
|
||||
ContextWorkflowName = "workflowName"
|
||||
// OutputSecretName is used to store all secret names which are generated by cloud resource components
|
||||
OutputSecretName = "outputSecretName"
|
||||
// ContextCompRevisionName is the component revision name of context
|
||||
ContextCompRevisionName = "revision"
|
||||
// ContextComponents is the components of app
|
||||
ContextComponents = "components"
|
||||
// ContextComponentType is the component type of current trait binding with
|
||||
ContextComponentType = "componentType"
|
||||
// ComponentRevisionPlaceHolder is the component revision name placeHolder, this field will be replace with real value
|
||||
// after component be created
|
||||
ComponentRevisionPlaceHolder = "KUBEVELA_COMPONENT_REVISION_PLACEHOLDER"
|
||||
|
||||
@@ -317,7 +317,7 @@ func (val *Value) LookupValue(paths ...string) (*Value, error) {
|
||||
func (val *Value) LookupByScript(script string) (*Value, error) {
|
||||
var outputKey = "zz_output__"
|
||||
script = strings.TrimSpace(script)
|
||||
scriptFile, err := parser.ParseFile("-", script)
|
||||
scriptFile, err := parser.ParseFile("-", script, parser.ParseComments)
|
||||
if err != nil {
|
||||
return nil, errors.WithMessage(err, "parse script")
|
||||
}
|
||||
@@ -327,7 +327,7 @@ func (val *Value) LookupByScript(script string) (*Value, error) {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
rawFile, err := parser.ParseFile("-", raw)
|
||||
rawFile, err := parser.ParseFile("-", raw, parser.ParseComments)
|
||||
if err != nil {
|
||||
return nil, errors.WithMessage(err, "parse script")
|
||||
}
|
||||
|
||||
@@ -597,6 +597,23 @@ func TestLookupByScript(t *testing.T) {
|
||||
}{
|
||||
{
|
||||
src: `
|
||||
traits: {
|
||||
ingress: {
|
||||
// +patchKey=name
|
||||
test: [{name: "main", image: "busybox"}]
|
||||
}
|
||||
}
|
||||
`,
|
||||
script: `traits["ingress"]`,
|
||||
expect: `// +patchKey=name
|
||||
test: [{
|
||||
name: "main"
|
||||
image: "busybox"
|
||||
}]
|
||||
`,
|
||||
},
|
||||
{
|
||||
src: `
|
||||
apply: containers: [{name: "main", image: "busybox"}]
|
||||
`,
|
||||
script: `apply.containers[0].image`,
|
||||
|
||||
@@ -107,18 +107,17 @@ func (pd *PackageDiscover) ImportBuiltinPackagesFor(bi *build.Instance) {
|
||||
|
||||
// ImportPackagesAndBuildInstance Combine import built-in packages and build cue template together to avoid data race
|
||||
func (pd *PackageDiscover) ImportPackagesAndBuildInstance(bi *build.Instance) (inst *cue.Instance, err error) {
|
||||
var r cue.Runtime
|
||||
if pd == nil {
|
||||
return r.Build(bi)
|
||||
}
|
||||
pd.ImportBuiltinPackagesFor(bi)
|
||||
if err := stdlib.AddImportsFor(bi, ""); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
var r cue.Runtime
|
||||
pd.mutex.Lock()
|
||||
defer pd.mutex.Unlock()
|
||||
cueInst, err := r.Build(bi)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return cueInst, err
|
||||
return r.Build(bi)
|
||||
}
|
||||
|
||||
// ListPackageKinds list packages and their kinds
|
||||
|
||||
@@ -62,10 +62,12 @@ type templateContext struct {
|
||||
// appName is the name of Application
|
||||
appName string
|
||||
// appRevision is the revision name of Application
|
||||
appRevision string
|
||||
configs []map[string]string
|
||||
base model.Instance
|
||||
auxiliaries []Auxiliary
|
||||
appRevision string
|
||||
workflowName string
|
||||
publishVersion string
|
||||
configs []map[string]string
|
||||
base model.Instance
|
||||
auxiliaries []Auxiliary
|
||||
// namespace is the namespace of Application which is used to set the namespace for Crossplane connection secret,
|
||||
// ComponentDefinition/TratiDefinition OpenAPI v3 schema
|
||||
namespace string
|
||||
@@ -94,57 +96,41 @@ type RequiredSecrets struct {
|
||||
Data map[string]interface{}
|
||||
}
|
||||
|
||||
// ContextData is the core data of process context
|
||||
type ContextData struct {
|
||||
Namespace string
|
||||
AppName string
|
||||
CompName string
|
||||
AppRevisionName string
|
||||
WorkflowName string
|
||||
PublishVersion string
|
||||
|
||||
Ctx context.Context
|
||||
BaseHooks []BaseHook
|
||||
AuxiliaryHooks []AuxiliaryHook
|
||||
Components []common.ApplicationComponent
|
||||
}
|
||||
|
||||
// NewContext create render templateContext
|
||||
func NewContext(namespace, name, appName, appRevision string) Context {
|
||||
return &templateContext{
|
||||
name: name,
|
||||
appName: appName,
|
||||
appRevision: appRevision,
|
||||
func NewContext(data ContextData) Context {
|
||||
ctx := &templateContext{
|
||||
namespace: data.Namespace,
|
||||
name: data.CompName,
|
||||
appName: data.AppName,
|
||||
appRevision: data.AppRevisionName,
|
||||
workflowName: data.WorkflowName,
|
||||
publishVersion: data.PublishVersion,
|
||||
|
||||
configs: []map[string]string{},
|
||||
auxiliaries: []Auxiliary{},
|
||||
namespace: namespace,
|
||||
parameters: map[string]interface{}{},
|
||||
}
|
||||
}
|
||||
|
||||
// NewProcessContextWithCtx create render templateContext with ctx
|
||||
func NewProcessContextWithCtx(ctx context.Context, namespace, name, appName, appRevision string) Context {
|
||||
return &templateContext{
|
||||
name: name,
|
||||
appName: appName,
|
||||
appRevision: appRevision,
|
||||
configs: []map[string]string{},
|
||||
auxiliaries: []Auxiliary{},
|
||||
namespace: namespace,
|
||||
parameters: map[string]interface{}{},
|
||||
ctx: ctx,
|
||||
}
|
||||
}
|
||||
|
||||
// NewContextWithHooks create render templateContext with hooks for validation
|
||||
func NewContextWithHooks(namespace, name, appName, appRevision string, baseHooks []BaseHook, auxHooks []AuxiliaryHook) Context {
|
||||
return &templateContext{
|
||||
name: name,
|
||||
appName: appName,
|
||||
appRevision: appRevision,
|
||||
configs: []map[string]string{},
|
||||
auxiliaries: []Auxiliary{},
|
||||
namespace: namespace,
|
||||
parameters: map[string]interface{}{},
|
||||
baseHooks: baseHooks,
|
||||
auxiliaryHooks: auxHooks,
|
||||
}
|
||||
}
|
||||
|
||||
// NewPolicyContext create Application Scope templateContext for Policy
|
||||
func NewPolicyContext(namespace, name, appName, appRevision string, components []common.ApplicationComponent) Context {
|
||||
return &templateContext{
|
||||
name: name,
|
||||
appName: appName,
|
||||
appRevision: appRevision,
|
||||
namespace: namespace,
|
||||
components: components,
|
||||
ctx: data.Ctx,
|
||||
baseHooks: data.BaseHooks,
|
||||
auxiliaryHooks: data.AuxiliaryHooks,
|
||||
components: data.Components,
|
||||
}
|
||||
return ctx
|
||||
}
|
||||
|
||||
// SetParameters sets templateContext parameters
|
||||
@@ -185,6 +171,8 @@ func (ctx *templateContext) BaseContextFile() string {
|
||||
buff += fmt.Sprintf(model.ContextAppRevisionNum+": %d\n", revNum)
|
||||
buff += fmt.Sprintf(model.ContextNamespace+": \"%s\"\n", ctx.namespace)
|
||||
buff += fmt.Sprintf(model.ContextCompRevisionName+": \"%s\"\n", model.ComponentRevisionPlaceHolder)
|
||||
buff += fmt.Sprintf(model.ContextWorkflowName+": \"%s\"\n", ctx.workflowName)
|
||||
buff += fmt.Sprintf(model.ContextPublishVersion+": \"%s\"\n", ctx.publishVersion)
|
||||
|
||||
if ctx.base != nil {
|
||||
buff += fmt.Sprintf(model.OutputFieldName+": %s\n", structMarshal(ctx.base.String()))
|
||||
|
||||
@@ -100,7 +100,14 @@ image: "myserver"
|
||||
},
|
||||
}
|
||||
|
||||
ctx := NewContext("myns", "mycomp", "myapp", "myapp-v1")
|
||||
ctx := NewContext(ContextData{
|
||||
AppName: "myapp",
|
||||
CompName: "mycomp",
|
||||
Namespace: "myns",
|
||||
AppRevisionName: "myapp-v1",
|
||||
WorkflowName: "myworkflow",
|
||||
PublishVersion: "mypublishversion",
|
||||
})
|
||||
ctx.SetBase(base)
|
||||
ctx.AppendAuxiliaries(svcAux)
|
||||
ctx.AppendAuxiliaries(svcAuxWithAbnormalName)
|
||||
@@ -130,6 +137,14 @@ image: "myserver"
|
||||
assert.Equal(t, nil, err)
|
||||
assert.Equal(t, int64(1), myAppRevisionNum)
|
||||
|
||||
myWorkflowName, err := ctxInst.Lookup("context", model.ContextWorkflowName).String()
|
||||
assert.Equal(t, nil, err)
|
||||
assert.Equal(t, "myworkflow", myWorkflowName)
|
||||
|
||||
myPublishVersion, err := ctxInst.Lookup("context", model.ContextPublishVersion).String()
|
||||
assert.Equal(t, nil, err)
|
||||
assert.Equal(t, "mypublishversion", myPublishVersion)
|
||||
|
||||
inputJs, err := ctxInst.Lookup("context", model.OutputFieldName).MarshalJSON()
|
||||
assert.Equal(t, nil, err)
|
||||
assert.Equal(t, `{"image":"myserver"}`, string(inputJs))
|
||||
|
||||
@@ -18,7 +18,6 @@ package metrics
|
||||
|
||||
import (
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
)
|
||||
|
||||
var (
|
||||
@@ -116,17 +115,5 @@ var (
|
||||
ResourceTrackerNumberGauge = prometheus.NewGaugeVec(prometheus.GaugeOpts{
|
||||
Name: "resourcetracker_number",
|
||||
Help: "resourceTracker number.",
|
||||
}, []string{"application", "namespace"})
|
||||
}, []string{"controller"})
|
||||
)
|
||||
|
||||
// ExtractMetricValuesFromObjectLabel extract metric values from k8s object's labels
|
||||
func ExtractMetricValuesFromObjectLabel(obj interface{}, labelKeys ...string) (values []string) {
|
||||
if resource, ok := obj.(client.Object); ok {
|
||||
for _, labelKey := range labelKeys {
|
||||
values = append(values, resource.GetLabels()[labelKey])
|
||||
}
|
||||
} else {
|
||||
values = make([]string, len(labelKeys))
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
@@ -17,69 +17,479 @@ limitations under the License.
|
||||
package multicluster
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
v1alpha12 "github.com/oam-dev/cluster-gateway/pkg/apis/cluster/v1alpha1"
|
||||
"github.com/briandowns/spinner"
|
||||
"github.com/oam-dev/cluster-register/pkg/hub"
|
||||
"github.com/oam-dev/cluster-register/pkg/spoke"
|
||||
"github.com/pkg/errors"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
v14 "k8s.io/api/storage/v1"
|
||||
errors2 "k8s.io/apimachinery/pkg/api/errors"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
v12 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
types2 "k8s.io/apimachinery/pkg/types"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1"
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
apitypes "k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/client-go/rest"
|
||||
"k8s.io/client-go/tools/clientcmd"
|
||||
"k8s.io/client-go/tools/clientcmd/api"
|
||||
clientcmdapi "k8s.io/client-go/tools/clientcmd/api"
|
||||
ocmclusterv1 "open-cluster-management.io/api/cluster/v1"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
|
||||
clusterv1alpha1 "github.com/oam-dev/cluster-gateway/pkg/apis/cluster/v1alpha1"
|
||||
|
||||
"github.com/oam-dev/kubevela/apis/core.oam.dev/v1beta1"
|
||||
"github.com/oam-dev/kubevela/apis/types"
|
||||
"github.com/oam-dev/kubevela/pkg/policy/envbinding"
|
||||
errors3 "github.com/oam-dev/kubevela/pkg/utils/errors"
|
||||
"github.com/oam-dev/kubevela/pkg/utils"
|
||||
velaerrors "github.com/oam-dev/kubevela/pkg/utils/errors"
|
||||
cmdutil "github.com/oam-dev/kubevela/pkg/utils/util"
|
||||
)
|
||||
|
||||
// ensureVelaSystemNamespaceInstalled ensures vela namespace to be installed in child cluster
|
||||
func ensureVelaSystemNamespaceInstalled(ctx context.Context, c client.Client, clusterName string, createNamespace string) error {
|
||||
remoteCtx := ContextWithClusterName(ctx, clusterName)
|
||||
if err := c.Get(remoteCtx, types2.NamespacedName{Name: createNamespace}, &v1.Namespace{}); err != nil {
|
||||
if !errors2.IsNotFound(err) {
|
||||
return errors.Wrapf(err, "failed to check vela-system ")
|
||||
}
|
||||
if err = c.Create(remoteCtx, &v1.Namespace{ObjectMeta: v12.ObjectMeta{Name: createNamespace}}); err != nil {
|
||||
return errors.Wrapf(err, "failed to create vela-system namespace")
|
||||
// KubeClusterConfig info for cluster management
|
||||
type KubeClusterConfig struct {
|
||||
ClusterName string
|
||||
CreateNamespace string
|
||||
*clientcmdapi.Config
|
||||
*clientcmdapi.Cluster
|
||||
*clientcmdapi.AuthInfo
|
||||
|
||||
// Logs records intermediate logs (which do not return error) during running
|
||||
Logs bytes.Buffer
|
||||
}
|
||||
|
||||
// SetClusterName set cluster name if not empty
|
||||
func (clusterConfig *KubeClusterConfig) SetClusterName(clusterName string) *KubeClusterConfig {
|
||||
if clusterName != "" {
|
||||
clusterConfig.ClusterName = clusterName
|
||||
}
|
||||
return clusterConfig
|
||||
}
|
||||
|
||||
// SetCreateNamespace set create namespace, if empty, no namespace will be created
|
||||
func (clusterConfig *KubeClusterConfig) SetCreateNamespace(createNamespace string) *KubeClusterConfig {
|
||||
clusterConfig.CreateNamespace = createNamespace
|
||||
return clusterConfig
|
||||
}
|
||||
|
||||
// Validate check if config is valid for join
|
||||
func (clusterConfig *KubeClusterConfig) Validate() error {
|
||||
switch clusterConfig.ClusterName {
|
||||
case "":
|
||||
return errors.Errorf("ClusterName cannot be empty")
|
||||
case ClusterLocalName:
|
||||
return errors.Errorf("ClusterName cannot be `%s`, it is reserved as the local cluster", ClusterLocalName)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// RegisterByVelaSecret create cluster secrets for KubeVela to use
|
||||
func (clusterConfig *KubeClusterConfig) RegisterByVelaSecret(ctx context.Context, cli client.Client) error {
|
||||
if err := ensureClusterNotExists(ctx, cli, clusterConfig.ClusterName); err != nil {
|
||||
return errors.Wrapf(err, "cannot use cluster name %s", clusterConfig.ClusterName)
|
||||
}
|
||||
var credentialType clusterv1alpha1.CredentialType
|
||||
data := map[string][]byte{
|
||||
"endpoint": []byte(clusterConfig.Cluster.Server),
|
||||
"ca.crt": clusterConfig.Cluster.CertificateAuthorityData,
|
||||
}
|
||||
if len(clusterConfig.AuthInfo.Token) > 0 {
|
||||
credentialType = clusterv1alpha1.CredentialTypeServiceAccountToken
|
||||
data["token"] = []byte(clusterConfig.AuthInfo.Token)
|
||||
} else {
|
||||
credentialType = clusterv1alpha1.CredentialTypeX509Certificate
|
||||
data["tls.crt"] = clusterConfig.AuthInfo.ClientCertificateData
|
||||
data["tls.key"] = clusterConfig.AuthInfo.ClientKeyData
|
||||
}
|
||||
secret := &corev1.Secret{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: clusterConfig.ClusterName,
|
||||
Namespace: ClusterGatewaySecretNamespace,
|
||||
Labels: map[string]string{
|
||||
clusterv1alpha1.LabelKeyClusterCredentialType: string(credentialType),
|
||||
},
|
||||
},
|
||||
Type: corev1.SecretTypeOpaque,
|
||||
Data: data,
|
||||
}
|
||||
if err := cli.Create(ctx, secret); err != nil {
|
||||
return errors.Wrapf(err, "failed to add cluster to kubernetes")
|
||||
}
|
||||
// TODO(somefive): create namespace now only work for cluster secret
|
||||
if clusterConfig.CreateNamespace != "" {
|
||||
if err := ensureNamespaceExists(ctx, cli, clusterConfig.ClusterName, clusterConfig.CreateNamespace); err != nil {
|
||||
_ = cli.Delete(ctx, secret)
|
||||
return errors.Wrapf(err, "failed to ensure %s namespace installed in cluster %s", clusterConfig.CreateNamespace, clusterConfig.ClusterName)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// ensureClusterNotExists checks if child cluster has already been joined, if joined, error is returned
|
||||
// RegisterClusterManagedByOCM create ocm managed cluster for use
|
||||
// TODO(somefive): OCM ManagedCluster only support cli join now
|
||||
func (clusterConfig *KubeClusterConfig) RegisterClusterManagedByOCM(ctx context.Context, args *JoinClusterArgs) error {
|
||||
newTrackingSpinner := args.trackingSpinnerFactory
|
||||
hubCluster, err := hub.NewHubCluster(args.hubConfig)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "fail to create client connect to hub cluster")
|
||||
}
|
||||
|
||||
hubTracker := newTrackingSpinner("Checking the environment of hub cluster..")
|
||||
hubTracker.FinalMSG = "Hub cluster all set, continue registration.\n"
|
||||
hubTracker.Start()
|
||||
crdName := apitypes.NamespacedName{Name: "managedclusters." + ocmclusterv1.GroupName}
|
||||
if err := hubCluster.Client.Get(context.Background(), crdName, &apiextensionsv1.CustomResourceDefinition{}); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
clusters, err := ListVirtualClusters(context.Background(), hubCluster.Client)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for _, cluster := range clusters {
|
||||
if cluster.Name == clusterConfig.ClusterName && cluster.Accepted {
|
||||
return errors.Errorf("you have register a cluster named %s", clusterConfig.ClusterName)
|
||||
}
|
||||
}
|
||||
hubTracker.Stop()
|
||||
|
||||
spokeRestConf, err := clientcmd.BuildConfigFromKubeconfigGetter("", func() (*clientcmdapi.Config, error) {
|
||||
return clusterConfig.Config, nil
|
||||
})
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "fail to convert spoke-cluster kubeconfig")
|
||||
}
|
||||
|
||||
spokeTracker := newTrackingSpinner("Building registration config for the managed cluster")
|
||||
spokeTracker.FinalMSG = "Successfully prepared registration config.\n"
|
||||
spokeTracker.Start()
|
||||
overridingRegistrationEndpoint := ""
|
||||
if !*args.inClusterBootstrap {
|
||||
args.ioStreams.Infof("Using the api endpoint from hub kubeconfig %q as registration entry.\n", args.hubConfig.Host)
|
||||
overridingRegistrationEndpoint = args.hubConfig.Host
|
||||
}
|
||||
hubKubeToken, err := hubCluster.GenerateHubClusterKubeConfig(ctx, overridingRegistrationEndpoint)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "fail to generate the token for spoke-cluster")
|
||||
}
|
||||
|
||||
spokeCluster, err := spoke.NewSpokeCluster(clusterConfig.ClusterName, spokeRestConf, hubKubeToken)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "fail to connect spoke cluster")
|
||||
}
|
||||
|
||||
err = spokeCluster.InitSpokeClusterEnv(ctx)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "fail to prepare the env for spoke-cluster")
|
||||
}
|
||||
spokeTracker.Stop()
|
||||
|
||||
registrationOperatorTracker := newTrackingSpinner("Waiting for registration operators running: (`kubectl -n open-cluster-management get pod -l app=klusterlet`)")
|
||||
registrationOperatorTracker.FinalMSG = "Registration operator successfully deployed.\n"
|
||||
registrationOperatorTracker.Start()
|
||||
if err := spokeCluster.WaitForRegistrationOperatorReady(ctx); err != nil {
|
||||
return errors.Wrap(err, "fail to setup registration operator for spoke-cluster")
|
||||
}
|
||||
registrationOperatorTracker.Stop()
|
||||
|
||||
registrationAgentTracker := newTrackingSpinner("Waiting for registration agent running: (`kubectl -n open-cluster-management-agent get pod -l app=klusterlet-registration-agent`)")
|
||||
registrationAgentTracker.FinalMSG = "Registration agent successfully deployed.\n"
|
||||
registrationAgentTracker.Start()
|
||||
if err := spokeCluster.WaitForRegistrationAgentReady(ctx); err != nil {
|
||||
return errors.Wrap(err, "fail to setup registration agent for spoke-cluster")
|
||||
}
|
||||
registrationAgentTracker.Stop()
|
||||
|
||||
csrCreationTracker := newTrackingSpinner("Waiting for CSRs created (`kubectl get csr -l open-cluster-management.io/cluster-name=" + spokeCluster.Name + "`)")
|
||||
csrCreationTracker.FinalMSG = "Successfully found corresponding CSR from the agent.\n"
|
||||
csrCreationTracker.Start()
|
||||
if err := hubCluster.WaitForCSRCreated(ctx, spokeCluster.Name); err != nil {
|
||||
return errors.Wrap(err, "failed found CSR created by registration agent")
|
||||
}
|
||||
csrCreationTracker.Stop()
|
||||
|
||||
args.ioStreams.Infof("Approving the CSR for cluster %q.\n", spokeCluster.Name)
|
||||
if err := hubCluster.ApproveCSR(ctx, spokeCluster.Name); err != nil {
|
||||
return errors.Wrap(err, "failed found CSR created by registration agent")
|
||||
}
|
||||
|
||||
ready, err := hubCluster.WaitForSpokeClusterReady(ctx, clusterConfig.ClusterName)
|
||||
if err != nil || !ready {
|
||||
return errors.Errorf("fail to waiting for register request")
|
||||
}
|
||||
|
||||
if err = hubCluster.RegisterSpokeCluster(ctx, spokeCluster.Name); err != nil {
|
||||
return errors.Wrap(err, "fail to approve spoke cluster")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// LoadKubeClusterConfigFromFile create KubeClusterConfig from kubeconfig file
|
||||
func LoadKubeClusterConfigFromFile(filepath string) (*KubeClusterConfig, error) {
|
||||
clusterConfig := &KubeClusterConfig{}
|
||||
var err error
|
||||
clusterConfig.Config, err = clientcmd.LoadFromFile(filepath)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "failed to get kubeconfig")
|
||||
}
|
||||
if len(clusterConfig.Config.CurrentContext) == 0 {
|
||||
return nil, fmt.Errorf("current-context is not set")
|
||||
}
|
||||
var ok bool
|
||||
ctx, ok := clusterConfig.Config.Contexts[clusterConfig.Config.CurrentContext]
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("current-context %s not found", clusterConfig.Config.CurrentContext)
|
||||
}
|
||||
clusterConfig.Cluster, ok = clusterConfig.Config.Clusters[ctx.Cluster]
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("cluster %s not found", ctx.Cluster)
|
||||
}
|
||||
clusterConfig.AuthInfo, ok = clusterConfig.Config.AuthInfos[ctx.AuthInfo]
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("authInfo %s not found", ctx.AuthInfo)
|
||||
}
|
||||
clusterConfig.ClusterName = ctx.Cluster
|
||||
if endpoint, err := utils.ParseAPIServerEndpoint(clusterConfig.Cluster.Server); err == nil {
|
||||
clusterConfig.Cluster.Server = endpoint
|
||||
} else {
|
||||
_, _ = fmt.Fprintf(&clusterConfig.Logs, "failed to parse server endpoint: %v", err)
|
||||
}
|
||||
return clusterConfig, nil
|
||||
}
|
||||
|
||||
const (
|
||||
// ClusterGateWayEngine cluster-gateway cluster management solution
|
||||
ClusterGateWayEngine = "cluster-gateway"
|
||||
// OCMEngine ocm cluster management solution
|
||||
OCMEngine = "ocm"
|
||||
)
|
||||
|
||||
// JoinClusterArgs args for join cluster
|
||||
type JoinClusterArgs struct {
|
||||
engine string
|
||||
createNamespace string
|
||||
ioStreams cmdutil.IOStreams
|
||||
hubConfig *rest.Config
|
||||
inClusterBootstrap *bool
|
||||
trackingSpinnerFactory func(string) *spinner.Spinner
|
||||
}
|
||||
|
||||
func newJoinClusterArgs(options ...JoinClusterOption) *JoinClusterArgs {
|
||||
args := &JoinClusterArgs{
|
||||
engine: ClusterGateWayEngine,
|
||||
}
|
||||
for _, op := range options {
|
||||
op.ApplyToArgs(args)
|
||||
}
|
||||
return args
|
||||
}
|
||||
|
||||
// JoinClusterOption option for join cluster
|
||||
type JoinClusterOption interface {
|
||||
ApplyToArgs(args *JoinClusterArgs)
|
||||
}
|
||||
|
||||
// JoinClusterCreateNamespaceOption create namespace when join cluster, if empty, no creation
|
||||
type JoinClusterCreateNamespaceOption string
|
||||
|
||||
// ApplyToArgs apply to args
|
||||
func (op JoinClusterCreateNamespaceOption) ApplyToArgs(args *JoinClusterArgs) {
|
||||
args.createNamespace = string(op)
|
||||
}
|
||||
|
||||
// JoinClusterEngineOption configure engine for join cluster, either cluster-gateway or ocm
|
||||
type JoinClusterEngineOption string
|
||||
|
||||
// ApplyToArgs apply to args
|
||||
func (op JoinClusterEngineOption) ApplyToArgs(args *JoinClusterArgs) {
|
||||
args.engine = string(op)
|
||||
}
|
||||
|
||||
// JoinClusterOCMOptions options used when joining clusters by ocm, only support cli for now
|
||||
type JoinClusterOCMOptions struct {
|
||||
IoStreams cmdutil.IOStreams
|
||||
HubConfig *rest.Config
|
||||
InClusterBootstrap *bool
|
||||
TrackingSpinnerFactory func(string) *spinner.Spinner
|
||||
}
|
||||
|
||||
// ApplyToArgs apply to args
|
||||
func (op JoinClusterOCMOptions) ApplyToArgs(args *JoinClusterArgs) {
|
||||
args.ioStreams = op.IoStreams
|
||||
args.hubConfig = op.HubConfig
|
||||
args.inClusterBootstrap = op.InClusterBootstrap
|
||||
args.trackingSpinnerFactory = op.TrackingSpinnerFactory
|
||||
}
|
||||
|
||||
// JoinClusterByKubeConfig add child cluster by kubeconfig path, return cluster info and error
|
||||
func JoinClusterByKubeConfig(ctx context.Context, cli client.Client, kubeconfigPath string, clusterName string, options ...JoinClusterOption) (*KubeClusterConfig, error) {
|
||||
args := newJoinClusterArgs(options...)
|
||||
clusterConfig, err := LoadKubeClusterConfigFromFile(kubeconfigPath)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := clusterConfig.SetClusterName(clusterName).SetCreateNamespace(args.createNamespace).Validate(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
switch args.engine {
|
||||
case ClusterGateWayEngine:
|
||||
if err = clusterConfig.RegisterByVelaSecret(ctx, cli); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
case OCMEngine:
|
||||
if args.inClusterBootstrap == nil {
|
||||
return nil, errors.Wrapf(err, "failed to determine the registration endpoint for the hub cluster "+
|
||||
"when parsing --in-cluster-bootstrap flag")
|
||||
}
|
||||
if err = clusterConfig.RegisterClusterManagedByOCM(ctx, args); err != nil {
|
||||
return clusterConfig, err
|
||||
}
|
||||
}
|
||||
return clusterConfig, nil
|
||||
}
|
||||
|
||||
// DetachClusterArgs args for detaching cluster
|
||||
type DetachClusterArgs struct {
|
||||
managedClusterKubeConfigPath string
|
||||
}
|
||||
|
||||
func newDetachClusterArgs(options ...DetachClusterOption) *DetachClusterArgs {
|
||||
args := &DetachClusterArgs{}
|
||||
for _, op := range options {
|
||||
op.ApplyToArgs(args)
|
||||
}
|
||||
return args
|
||||
}
|
||||
|
||||
// DetachClusterOption option for detach cluster
|
||||
type DetachClusterOption interface {
|
||||
ApplyToArgs(args *DetachClusterArgs)
|
||||
}
|
||||
|
||||
// DetachClusterManagedClusterKubeConfigPathOption configure the managed cluster kubeconfig path while detach ocm cluster
|
||||
type DetachClusterManagedClusterKubeConfigPathOption string
|
||||
|
||||
// ApplyToArgs apply to args
|
||||
func (op DetachClusterManagedClusterKubeConfigPathOption) ApplyToArgs(args *DetachClusterArgs) {
|
||||
args.managedClusterKubeConfigPath = string(op)
|
||||
}
|
||||
|
||||
// DetachCluster detach cluster by name, if cluster is using by application, it will return error
|
||||
func DetachCluster(ctx context.Context, cli client.Client, clusterName string, options ...DetachClusterOption) error {
|
||||
args := newDetachClusterArgs(options...)
|
||||
if clusterName == ClusterLocalName {
|
||||
return ErrReservedLocalClusterName
|
||||
}
|
||||
vc, err := GetVirtualCluster(ctx, cli, clusterName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
switch vc.Type {
|
||||
case clusterv1alpha1.CredentialTypeX509Certificate, clusterv1alpha1.CredentialTypeServiceAccountToken:
|
||||
clusterSecret, err := getMutableClusterSecret(ctx, cli, clusterName)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "cluster %s is not mutable now", clusterName)
|
||||
}
|
||||
if err := cli.Delete(ctx, clusterSecret); err != nil {
|
||||
return errors.Wrapf(err, "failed to detach cluster %s", clusterName)
|
||||
}
|
||||
case CredentialTypeOCMManagedCluster:
|
||||
if args.managedClusterKubeConfigPath == "" {
|
||||
return errors.New("kubeconfig-path must be set to detach ocm managed cluster")
|
||||
}
|
||||
config, err := clientcmd.LoadFromFile(args.managedClusterKubeConfigPath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
restConfig, err := clientcmd.BuildConfigFromKubeconfigGetter("", func() (*clientcmdapi.Config, error) {
|
||||
return config, nil
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if err = spoke.CleanSpokeClusterEnv(restConfig); err != nil {
|
||||
return err
|
||||
}
|
||||
managedCluster := ocmclusterv1.ManagedCluster{ObjectMeta: metav1.ObjectMeta{Name: clusterName}}
|
||||
if err = cli.Delete(context.Background(), &managedCluster); err != nil {
|
||||
if !apierrors.IsNotFound(err) {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// RenameCluster rename cluster
|
||||
func RenameCluster(ctx context.Context, k8sClient client.Client, oldClusterName string, newClusterName string) error {
|
||||
if newClusterName == ClusterLocalName {
|
||||
return ErrReservedLocalClusterName
|
||||
}
|
||||
clusterSecret, err := getMutableClusterSecret(ctx, k8sClient, oldClusterName)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "cluster %s is not mutable now", oldClusterName)
|
||||
}
|
||||
if err := ensureClusterNotExists(ctx, k8sClient, newClusterName); err != nil {
|
||||
return errors.Wrapf(err, "cannot set cluster name to %s", newClusterName)
|
||||
}
|
||||
if err := k8sClient.Delete(ctx, clusterSecret); err != nil {
|
||||
return errors.Wrapf(err, "failed to rename cluster from %s to %s", oldClusterName, newClusterName)
|
||||
}
|
||||
clusterSecret.ObjectMeta = metav1.ObjectMeta{
|
||||
Name: newClusterName,
|
||||
Namespace: ClusterGatewaySecretNamespace,
|
||||
Labels: clusterSecret.Labels,
|
||||
Annotations: clusterSecret.Annotations,
|
||||
}
|
||||
if err := k8sClient.Create(ctx, clusterSecret); err != nil {
|
||||
return errors.Wrapf(err, "failed to rename cluster from %s to %s", oldClusterName, newClusterName)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// ensureClusterNotExists will check the cluster is not existed in control plane
|
||||
func ensureClusterNotExists(ctx context.Context, c client.Client, clusterName string) error {
|
||||
secret := &v1.Secret{}
|
||||
err := c.Get(ctx, types2.NamespacedName{Name: clusterName, Namespace: ClusterGatewaySecretNamespace}, secret)
|
||||
if err == nil {
|
||||
return ErrClusterExists
|
||||
_, err := GetVirtualCluster(ctx, c, clusterName)
|
||||
if err != nil {
|
||||
if IsClusterNotExists(err) {
|
||||
return nil
|
||||
}
|
||||
return err
|
||||
}
|
||||
if !errors2.IsNotFound(err) {
|
||||
return errors.Wrapf(err, "failed to check duplicate cluster secret")
|
||||
return ErrClusterExists
|
||||
}
|
||||
|
||||
// ensureNamespaceExists ensures vela namespace to be installed in child cluster
|
||||
func ensureNamespaceExists(ctx context.Context, c client.Client, clusterName string, createNamespace string) error {
|
||||
remoteCtx := ContextWithClusterName(ctx, clusterName)
|
||||
if err := c.Get(remoteCtx, apitypes.NamespacedName{Name: createNamespace}, &corev1.Namespace{}); err != nil {
|
||||
if !apierrors.IsNotFound(err) {
|
||||
return errors.Wrapf(err, "failed to check if namespace %s exists", createNamespace)
|
||||
}
|
||||
if err = c.Create(remoteCtx, &corev1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: createNamespace}}); err != nil {
|
||||
return errors.Wrapf(err, "failed to create namespace %s", createNamespace)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// GetMutableClusterSecret retrieves the cluster secret and check if any application is using the cluster
|
||||
func GetMutableClusterSecret(ctx context.Context, c client.Client, clusterName string) (*v1.Secret, error) {
|
||||
clusterSecret := &v1.Secret{}
|
||||
if err := c.Get(ctx, types2.NamespacedName{Namespace: ClusterGatewaySecretNamespace, Name: clusterName}, clusterSecret); err != nil {
|
||||
// getMutableClusterSecret retrieves the cluster secret and check if any application is using the cluster
|
||||
// TODO(somefive): should rework the logic of checking application cluster usage
|
||||
func getMutableClusterSecret(ctx context.Context, c client.Client, clusterName string) (*corev1.Secret, error) {
|
||||
clusterSecret := &corev1.Secret{}
|
||||
if err := c.Get(ctx, apitypes.NamespacedName{Namespace: ClusterGatewaySecretNamespace, Name: clusterName}, clusterSecret); err != nil {
|
||||
return nil, errors.Wrapf(err, "failed to find target cluster secret %s", clusterName)
|
||||
}
|
||||
labels := clusterSecret.GetLabels()
|
||||
if labels == nil || labels[v1alpha12.LabelKeyClusterCredentialType] == "" {
|
||||
return nil, fmt.Errorf("invalid cluster secret %s: cluster credential type label %s is not set", clusterName, v1alpha12.LabelKeyClusterCredentialType)
|
||||
if labels == nil || labels[clusterv1alpha1.LabelKeyClusterCredentialType] == "" {
|
||||
return nil, fmt.Errorf("invalid cluster secret %s: cluster credential type label %s is not set", clusterName, clusterv1alpha1.LabelKeyClusterCredentialType)
|
||||
}
|
||||
apps := &v1beta1.ApplicationList{}
|
||||
if err := c.List(ctx, apps); err != nil {
|
||||
return nil, errors.Wrap(err, "failed to find applications to check clusters")
|
||||
}
|
||||
errs := errors3.ErrorList{}
|
||||
errs := velaerrors.ErrorList{}
|
||||
for _, app := range apps.Items {
|
||||
status, err := envbinding.GetEnvBindingPolicyStatus(app.DeepCopy(), "")
|
||||
if err == nil && status != nil {
|
||||
@@ -97,167 +507,3 @@ func GetMutableClusterSecret(ctx context.Context, c client.Client, clusterName s
|
||||
}
|
||||
return clusterSecret, nil
|
||||
}
|
||||
|
||||
// JoinClusterByKubeConfig add child cluster by kubeconfig path, return cluster info and error
|
||||
func JoinClusterByKubeConfig(_ctx context.Context, k8sClient client.Client, kubeconfigPath string, clusterName string) (*api.Cluster, error) {
|
||||
config, err := clientcmd.LoadFromFile(kubeconfigPath)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "failed to get kubeconfig")
|
||||
}
|
||||
if len(config.CurrentContext) == 0 {
|
||||
return nil, fmt.Errorf("current-context is not set")
|
||||
}
|
||||
ctx, ok := config.Contexts[config.CurrentContext]
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("current-context %s not found", config.CurrentContext)
|
||||
}
|
||||
cluster, ok := config.Clusters[ctx.Cluster]
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("cluster %s not found", ctx.Cluster)
|
||||
}
|
||||
authInfo, ok := config.AuthInfos[ctx.AuthInfo]
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("authInfo %s not found", ctx.AuthInfo)
|
||||
}
|
||||
|
||||
if clusterName == "" {
|
||||
clusterName = ctx.Cluster
|
||||
}
|
||||
if clusterName == ClusterLocalName {
|
||||
return cluster, fmt.Errorf("cannot use `%s` as cluster name, it is reserved as the local cluster", ClusterLocalName)
|
||||
}
|
||||
|
||||
if err := ensureClusterNotExists(_ctx, k8sClient, clusterName); err != nil {
|
||||
return cluster, errors.Wrapf(err, "cannot use cluster name %s", clusterName)
|
||||
}
|
||||
|
||||
var credentialType v1alpha12.CredentialType
|
||||
data := map[string][]byte{
|
||||
"endpoint": []byte(cluster.Server),
|
||||
"ca.crt": cluster.CertificateAuthorityData,
|
||||
}
|
||||
if len(authInfo.Token) > 0 {
|
||||
credentialType = v1alpha12.CredentialTypeServiceAccountToken
|
||||
data["token"] = []byte(authInfo.Token)
|
||||
} else {
|
||||
credentialType = v1alpha12.CredentialTypeX509Certificate
|
||||
data["tls.crt"] = authInfo.ClientCertificateData
|
||||
data["tls.key"] = authInfo.ClientKeyData
|
||||
}
|
||||
secret := &v1.Secret{
|
||||
ObjectMeta: v12.ObjectMeta{
|
||||
Name: clusterName,
|
||||
Namespace: ClusterGatewaySecretNamespace,
|
||||
Labels: map[string]string{
|
||||
v1alpha12.LabelKeyClusterCredentialType: string(credentialType),
|
||||
},
|
||||
},
|
||||
Type: v1.SecretTypeOpaque,
|
||||
Data: data,
|
||||
}
|
||||
|
||||
if err := k8sClient.Create(_ctx, secret); err != nil {
|
||||
return cluster, errors.Wrapf(err, "failed to add cluster to kubernetes")
|
||||
}
|
||||
|
||||
if err := ensureVelaSystemNamespaceInstalled(_ctx, k8sClient, clusterName, types.DefaultKubeVelaNS); err != nil {
|
||||
return nil, errors.Wrapf(err, "failed to create vela namespace in cluster %s", clusterName)
|
||||
}
|
||||
|
||||
return cluster, nil
|
||||
}
|
||||
|
||||
// DetachCluster detach cluster by name, if cluster is using by application, it will return error
|
||||
func DetachCluster(ctx context.Context, k8sClient client.Client, clusterName string) error {
|
||||
if clusterName == ClusterLocalName {
|
||||
return ErrReservedLocalClusterName
|
||||
}
|
||||
clusterSecret, err := GetMutableClusterSecret(ctx, k8sClient, clusterName)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "cluster %s is not mutable now", clusterName)
|
||||
}
|
||||
return k8sClient.Delete(ctx, clusterSecret)
|
||||
}
|
||||
|
||||
// RenameCluster rename cluster
|
||||
func RenameCluster(ctx context.Context, k8sClient client.Client, oldClusterName string, newClusterName string) error {
|
||||
if newClusterName == ClusterLocalName {
|
||||
return ErrReservedLocalClusterName
|
||||
}
|
||||
clusterSecret, err := GetMutableClusterSecret(ctx, k8sClient, oldClusterName)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "cluster %s is not mutable now", oldClusterName)
|
||||
}
|
||||
if err := ensureClusterNotExists(ctx, k8sClient, newClusterName); err != nil {
|
||||
return errors.Wrapf(err, "cannot set cluster name to %s", newClusterName)
|
||||
}
|
||||
if err := k8sClient.Delete(ctx, clusterSecret); err != nil {
|
||||
return errors.Wrapf(err, "failed to rename cluster from %s to %s", oldClusterName, newClusterName)
|
||||
}
|
||||
clusterSecret.ObjectMeta = v12.ObjectMeta{
|
||||
Name: newClusterName,
|
||||
Namespace: ClusterGatewaySecretNamespace,
|
||||
Labels: clusterSecret.Labels,
|
||||
Annotations: clusterSecret.Annotations,
|
||||
}
|
||||
if err := k8sClient.Create(ctx, clusterSecret); err != nil {
|
||||
return errors.Wrapf(err, "failed to rename cluster from %s to %s", oldClusterName, newClusterName)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// ClusterInfo describes the basic information of a cluster
|
||||
type ClusterInfo struct {
|
||||
Nodes *v1.NodeList
|
||||
WorkerNumber int
|
||||
MasterNumber int
|
||||
MemoryCapacity resource.Quantity
|
||||
CPUCapacity resource.Quantity
|
||||
PodCapacity resource.Quantity
|
||||
MemoryAllocatable resource.Quantity
|
||||
CPUAllocatable resource.Quantity
|
||||
PodAllocatable resource.Quantity
|
||||
StorageClasses *v14.StorageClassList
|
||||
}
|
||||
|
||||
// GetClusterInfo retrieves current cluster info from cluster
|
||||
func GetClusterInfo(_ctx context.Context, k8sClient client.Client, clusterName string) (*ClusterInfo, error) {
|
||||
ctx := ContextWithClusterName(_ctx, clusterName)
|
||||
nodes := &v1.NodeList{}
|
||||
if err := k8sClient.List(ctx, nodes); err != nil {
|
||||
return nil, errors.Wrapf(err, "failed to list cluster nodes")
|
||||
}
|
||||
var workerNumber, masterNumber int
|
||||
var memoryCapacity, cpuCapacity, podCapacity, memoryAllocatable, cpuAllocatable, podAllcatable resource.Quantity
|
||||
for _, node := range nodes.Items {
|
||||
if _, ok := node.Labels["node-role.kubernetes.io/master"]; ok {
|
||||
masterNumber++
|
||||
} else {
|
||||
workerNumber++
|
||||
}
|
||||
capacity := node.Status.Capacity
|
||||
memoryCapacity.Add(*capacity.Memory())
|
||||
cpuCapacity.Add(*capacity.Cpu())
|
||||
podCapacity.Add(*capacity.Pods())
|
||||
allocatable := node.Status.Allocatable
|
||||
memoryAllocatable.Add(*allocatable.Memory())
|
||||
cpuAllocatable.Add(*allocatable.Cpu())
|
||||
podAllcatable.Add(*allocatable.Pods())
|
||||
}
|
||||
storageClasses := &v14.StorageClassList{}
|
||||
if err := k8sClient.List(ctx, storageClasses); err != nil {
|
||||
return nil, errors.Wrapf(err, "failed to list storage classes")
|
||||
}
|
||||
return &ClusterInfo{
|
||||
Nodes: nodes,
|
||||
WorkerNumber: workerNumber,
|
||||
MasterNumber: masterNumber,
|
||||
MemoryCapacity: memoryCapacity,
|
||||
CPUCapacity: cpuCapacity,
|
||||
PodCapacity: podCapacity,
|
||||
MemoryAllocatable: memoryAllocatable,
|
||||
CPUAllocatable: cpuAllocatable,
|
||||
PodAllocatable: podAllcatable,
|
||||
StorageClasses: storageClasses,
|
||||
}, nil
|
||||
}
|
||||
|
||||
@@ -26,6 +26,8 @@ import (
|
||||
var (
|
||||
// ErrClusterExists cluster already exists
|
||||
ErrClusterExists = ClusterManagementError(fmt.Errorf("cluster already exists"))
|
||||
// ErrClusterNotExists cluster not exists
|
||||
ErrClusterNotExists = ClusterManagementError(fmt.Errorf("no such cluster"))
|
||||
// ErrReservedLocalClusterName reserved cluster name is used
|
||||
ErrReservedLocalClusterName = ClusterManagementError(fmt.Errorf("cluster name `local` is reserved for kubevela hub cluster"))
|
||||
)
|
||||
|
||||
83
pkg/multicluster/o11n.go
Normal file
83
pkg/multicluster/o11n.go
Normal file
@@ -0,0 +1,83 @@
|
||||
/*
|
||||
Copyright 2020-2022 The KubeVela Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package multicluster
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
storagev1 "k8s.io/api/storage/v1"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
)
|
||||
|
||||
// ClusterInfo describes the basic information of a cluster
|
||||
type ClusterInfo struct {
|
||||
Nodes *corev1.NodeList
|
||||
WorkerNumber int
|
||||
MasterNumber int
|
||||
MemoryCapacity resource.Quantity
|
||||
CPUCapacity resource.Quantity
|
||||
PodCapacity resource.Quantity
|
||||
MemoryAllocatable resource.Quantity
|
||||
CPUAllocatable resource.Quantity
|
||||
PodAllocatable resource.Quantity
|
||||
StorageClasses *storagev1.StorageClassList
|
||||
}
|
||||
|
||||
// GetClusterInfo retrieves current cluster info from cluster
|
||||
func GetClusterInfo(_ctx context.Context, k8sClient client.Client, clusterName string) (*ClusterInfo, error) {
|
||||
ctx := ContextWithClusterName(_ctx, clusterName)
|
||||
nodes := &corev1.NodeList{}
|
||||
if err := k8sClient.List(ctx, nodes); err != nil {
|
||||
return nil, errors.Wrapf(err, "failed to list cluster nodes")
|
||||
}
|
||||
var workerNumber, masterNumber int
|
||||
var memoryCapacity, cpuCapacity, podCapacity, memoryAllocatable, cpuAllocatable, podAllocatable resource.Quantity
|
||||
for _, node := range nodes.Items {
|
||||
if _, ok := node.Labels["node-role.kubernetes.io/master"]; ok {
|
||||
masterNumber++
|
||||
} else {
|
||||
workerNumber++
|
||||
}
|
||||
capacity := node.Status.Capacity
|
||||
memoryCapacity.Add(*capacity.Memory())
|
||||
cpuCapacity.Add(*capacity.Cpu())
|
||||
podCapacity.Add(*capacity.Pods())
|
||||
allocatable := node.Status.Allocatable
|
||||
memoryAllocatable.Add(*allocatable.Memory())
|
||||
cpuAllocatable.Add(*allocatable.Cpu())
|
||||
podAllocatable.Add(*allocatable.Pods())
|
||||
}
|
||||
storageClasses := &storagev1.StorageClassList{}
|
||||
if err := k8sClient.List(ctx, storageClasses); err != nil {
|
||||
return nil, errors.Wrapf(err, "failed to list storage classes")
|
||||
}
|
||||
return &ClusterInfo{
|
||||
Nodes: nodes,
|
||||
WorkerNumber: workerNumber,
|
||||
MasterNumber: masterNumber,
|
||||
MemoryCapacity: memoryCapacity,
|
||||
CPUCapacity: cpuCapacity,
|
||||
PodCapacity: podCapacity,
|
||||
MemoryAllocatable: memoryAllocatable,
|
||||
CPUAllocatable: cpuAllocatable,
|
||||
PodAllocatable: podAllocatable,
|
||||
StorageClasses: storageClasses,
|
||||
}, nil
|
||||
}
|
||||
74
pkg/multicluster/suite_test.go
Normal file
74
pkg/multicluster/suite_test.go
Normal file
@@ -0,0 +1,74 @@
|
||||
/*
|
||||
Copyright 2020-2022 The KubeVela Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package multicluster
|
||||
|
||||
import (
|
||||
"math/rand"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/oam-dev/cluster-gateway/pkg/apis/cluster/v1alpha1"
|
||||
. "github.com/onsi/ginkgo"
|
||||
. "github.com/onsi/gomega"
|
||||
"k8s.io/client-go/rest"
|
||||
"k8s.io/utils/pointer"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
"sigs.k8s.io/controller-runtime/pkg/envtest"
|
||||
|
||||
"github.com/oam-dev/kubevela/pkg/utils/common"
|
||||
)
|
||||
|
||||
var cfg *rest.Config
|
||||
var k8sClient client.Client
|
||||
var testEnv *envtest.Environment
|
||||
|
||||
func TestUtils(t *testing.T) {
|
||||
RegisterFailHandler(Fail)
|
||||
RunSpecs(t, "Utils Suite")
|
||||
}
|
||||
|
||||
var _ = BeforeSuite(func(done Done) {
|
||||
rand.Seed(time.Now().UnixNano())
|
||||
By("bootstrapping test environment for utils test")
|
||||
|
||||
testEnv = &envtest.Environment{
|
||||
ControlPlaneStartTimeout: time.Minute * 3,
|
||||
ControlPlaneStopTimeout: time.Minute,
|
||||
UseExistingCluster: pointer.BoolPtr(false),
|
||||
CRDDirectoryPaths: []string{"./testdata"},
|
||||
}
|
||||
|
||||
By("start kube test env")
|
||||
var err error
|
||||
cfg, err = testEnv.Start()
|
||||
Expect(err).ShouldNot(HaveOccurred())
|
||||
Expect(cfg).ToNot(BeNil())
|
||||
|
||||
By("new kube client")
|
||||
cfg.Timeout = time.Minute * 2
|
||||
Expect(v1alpha1.AddToScheme(common.Scheme)).Should(Succeed())
|
||||
k8sClient, err = client.New(cfg, client.Options{Scheme: common.Scheme})
|
||||
Expect(err).Should(BeNil())
|
||||
Expect(k8sClient).ToNot(BeNil())
|
||||
close(done)
|
||||
}, 240)
|
||||
|
||||
var _ = AfterSuite(func() {
|
||||
By("tearing down the test environment")
|
||||
err := testEnv.Stop()
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
})
|
||||
204
pkg/multicluster/testdata/managedclusters.yaml
vendored
Normal file
204
pkg/multicluster/testdata/managedclusters.yaml
vendored
Normal file
@@ -0,0 +1,204 @@
|
||||
apiVersion: apiextensions.k8s.io/v1
|
||||
kind: CustomResourceDefinition
|
||||
metadata:
|
||||
name: managedclusters.cluster.open-cluster-management.io
|
||||
spec:
|
||||
group: cluster.open-cluster-management.io
|
||||
names:
|
||||
kind: ManagedCluster
|
||||
listKind: ManagedClusterList
|
||||
plural: managedclusters
|
||||
shortNames:
|
||||
- mcl
|
||||
- mcls
|
||||
singular: managedcluster
|
||||
scope: Cluster
|
||||
preserveUnknownFields: false
|
||||
versions:
|
||||
- additionalPrinterColumns:
|
||||
- jsonPath: .spec.hubAcceptsClient
|
||||
name: Hub Accepted
|
||||
type: boolean
|
||||
- jsonPath: .spec.managedClusterClientConfigs[*].url
|
||||
name: Managed Cluster URLs
|
||||
type: string
|
||||
- jsonPath: .status.conditions[?(@.type=="ManagedClusterJoined")].status
|
||||
name: Joined
|
||||
type: string
|
||||
- jsonPath: .status.conditions[?(@.type=="ManagedClusterConditionAvailable")].status
|
||||
name: Available
|
||||
type: string
|
||||
- jsonPath: .metadata.creationTimestamp
|
||||
name: Age
|
||||
type: date
|
||||
name: v1
|
||||
schema:
|
||||
openAPIV3Schema:
|
||||
description: "ManagedCluster represents the desired state and current status of managed cluster. ManagedCluster is a cluster scoped resource. The name is the cluster UID. \n The cluster join process follows a double opt-in process: \n 1. Agent on managed cluster creates CSR on hub with cluster UID and agent name. 2. Agent on managed cluster creates ManagedCluster on hub. 3. Cluster admin on hub approves the CSR for UID and agent name of the ManagedCluster. 4. Cluster admin sets spec.acceptClient of ManagedCluster to true. 5. Cluster admin on managed cluster creates credential of kubeconfig to hub. \n Once the hub creates the cluster namespace, the Klusterlet agent on the ManagedCluster pushes the credential to the hub to use against the kube-apiserver of the ManagedCluster."
|
||||
type: object
|
||||
properties:
|
||||
apiVersion:
|
||||
description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
|
||||
type: string
|
||||
kind:
|
||||
description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
|
||||
type: string
|
||||
metadata:
|
||||
type: object
|
||||
spec:
|
||||
description: Spec represents a desired configuration for the agent on the managed cluster.
|
||||
type: object
|
||||
properties:
|
||||
hubAcceptsClient:
|
||||
description: hubAcceptsClient represents that hub accepts the joining of Klusterlet agent on the managed cluster with the hub. The default value is false, and can only be set true when the user on hub has an RBAC rule to UPDATE on the virtual subresource of managedclusters/accept. When the value is set true, a namespace whose name is the same as the name of ManagedCluster is created on the hub. This namespace represents the managed cluster, also role/rolebinding is created on the namespace to grant the permision of access from the agent on the managed cluster. When the value is set to false, the namespace representing the managed cluster is deleted.
|
||||
type: boolean
|
||||
leaseDurationSeconds:
|
||||
description: LeaseDurationSeconds is used to coordinate the lease update time of Klusterlet agents on the managed cluster. If its value is zero, the Klusterlet agent will update its lease every 60 seconds by default
|
||||
type: integer
|
||||
format: int32
|
||||
default: 60
|
||||
managedClusterClientConfigs:
|
||||
description: ManagedClusterClientConfigs represents a list of the apiserver address of the managed cluster. If it is empty, the managed cluster has no accessible address for the hub to connect with it.
|
||||
type: array
|
||||
items:
|
||||
description: ClientConfig represents the apiserver address of the managed cluster. TODO include credential to connect to managed cluster kube-apiserver
|
||||
type: object
|
||||
properties:
|
||||
caBundle:
|
||||
description: CABundle is the ca bundle to connect to apiserver of the managed cluster. System certs are used if it is not set.
|
||||
type: string
|
||||
format: byte
|
||||
url:
|
||||
description: URL is the URL of apiserver endpoint of the managed cluster.
|
||||
type: string
|
||||
taints:
|
||||
description: Taints is a property of managed cluster that allow the cluster to be repelled when scheduling. Taints, including 'ManagedClusterUnavailable' and 'ManagedClusterUnreachable', can not be added/removed by agent running on the managed cluster; while it's fine to add/remove other taints from either hub cluser or managed cluster.
|
||||
type: array
|
||||
items:
|
||||
description: The managed cluster this Taint is attached to has the "effect" on any placement that does not tolerate the Taint.
|
||||
type: object
|
||||
required:
|
||||
- effect
|
||||
- key
|
||||
properties:
|
||||
effect:
|
||||
description: Effect indicates the effect of the taint on placements that do not tolerate the taint. Valid effects are NoSelect, PreferNoSelect and NoSelectIfNew.
|
||||
type: string
|
||||
enum:
|
||||
- NoSelect
|
||||
- PreferNoSelect
|
||||
- NoSelectIfNew
|
||||
key:
|
||||
description: Key is the taint key applied to a cluster. e.g. bar or foo.example.com/bar. The regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt)
|
||||
type: string
|
||||
maxLength: 316
|
||||
pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$
|
||||
timeAdded:
|
||||
description: TimeAdded represents the time at which the taint was added.
|
||||
type: string
|
||||
format: date-time
|
||||
nullable: true
|
||||
value:
|
||||
description: Value is the taint value corresponding to the taint key.
|
||||
type: string
|
||||
maxLength: 1024
|
||||
status:
|
||||
description: Status represents the current status of joined managed cluster
|
||||
type: object
|
||||
properties:
|
||||
allocatable:
|
||||
description: Allocatable represents the total allocatable resources on the managed cluster.
|
||||
type: object
|
||||
additionalProperties:
|
||||
pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
|
||||
anyOf:
|
||||
- type: integer
|
||||
- type: string
|
||||
x-kubernetes-int-or-string: true
|
||||
capacity:
|
||||
description: Capacity represents the total resource capacity from all nodeStatuses on the managed cluster.
|
||||
type: object
|
||||
additionalProperties:
|
||||
pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
|
||||
anyOf:
|
||||
- type: integer
|
||||
- type: string
|
||||
x-kubernetes-int-or-string: true
|
||||
clusterClaims:
|
||||
description: ClusterClaims represents cluster information that a managed cluster claims, for example a unique cluster identifier (id.k8s.io) and kubernetes version (kubeversion.open-cluster-management.io). They are written from the managed cluster. The set of claims is not uniform across a fleet, some claims can be vendor or version specific and may not be included from all managed clusters.
|
||||
type: array
|
||||
items:
|
||||
description: ManagedClusterClaim represents a ClusterClaim collected from a managed cluster.
|
||||
type: object
|
||||
properties:
|
||||
name:
|
||||
description: Name is the name of a ClusterClaim resource on managed cluster. It's a well known or customized name to identify the claim.
|
||||
type: string
|
||||
maxLength: 253
|
||||
minLength: 1
|
||||
value:
|
||||
description: Value is a claim-dependent string
|
||||
type: string
|
||||
maxLength: 1024
|
||||
minLength: 1
|
||||
conditions:
|
||||
description: Conditions contains the different condition statuses for this managed cluster.
|
||||
type: array
|
||||
items:
|
||||
description: "Condition contains details for one aspect of the current state of this API Resource. --- This struct is intended for direct use as an array at the field path .status.conditions. For example, type FooStatus struct{ // Represents the observations of a foo's current state. // Known .status.conditions.type are: \"Available\", \"Progressing\", and \"Degraded\" // +patchMergeKey=type // +patchStrategy=merge // +listType=map // +listMapKey=type Conditions []metav1.Condition `json:\"conditions,omitempty\" patchStrategy:\"merge\" patchMergeKey:\"type\" protobuf:\"bytes,1,rep,name=conditions\"` \n // other fields }"
|
||||
type: object
|
||||
required:
|
||||
- lastTransitionTime
|
||||
- message
|
||||
- reason
|
||||
- status
|
||||
- type
|
||||
properties:
|
||||
lastTransitionTime:
|
||||
description: lastTransitionTime is the last time the condition transitioned from one status to another. This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable.
|
||||
type: string
|
||||
format: date-time
|
||||
message:
|
||||
description: message is a human readable message indicating details about the transition. This may be an empty string.
|
||||
type: string
|
||||
maxLength: 32768
|
||||
observedGeneration:
|
||||
description: observedGeneration represents the .metadata.generation that the condition was set based upon. For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date with respect to the current state of the instance.
|
||||
type: integer
|
||||
format: int64
|
||||
minimum: 0
|
||||
reason:
|
||||
description: reason contains a programmatic identifier indicating the reason for the condition's last transition. Producers of specific condition types may define expected values and meanings for this field, and whether the values are considered a guaranteed API. The value should be a CamelCase string. This field may not be empty.
|
||||
type: string
|
||||
maxLength: 1024
|
||||
minLength: 1
|
||||
pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$
|
||||
status:
|
||||
description: status of the condition, one of True, False, Unknown.
|
||||
type: string
|
||||
enum:
|
||||
- "True"
|
||||
- "False"
|
||||
- Unknown
|
||||
type:
|
||||
description: type of condition in CamelCase or in foo.example.com/CamelCase. --- Many .condition.type values are consistent across resources like Available, but because arbitrary conditions can be useful (see .node.status.conditions), the ability to deconflict is important. The regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt)
|
||||
type: string
|
||||
maxLength: 316
|
||||
pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$
|
||||
version:
|
||||
description: Version represents the kubernetes version of the managed cluster.
|
||||
type: object
|
||||
properties:
|
||||
kubernetes:
|
||||
description: Kubernetes is the kubernetes version of managed cluster.
|
||||
type: string
|
||||
served: true
|
||||
storage: true
|
||||
subresources:
|
||||
status: {}
|
||||
status:
|
||||
acceptedNames:
|
||||
kind: ""
|
||||
plural: ""
|
||||
conditions: []
|
||||
storedVersions: []
|
||||
186
pkg/multicluster/virtual_cluster.go
Normal file
186
pkg/multicluster/virtual_cluster.go
Normal file
@@ -0,0 +1,186 @@
|
||||
/*
|
||||
Copyright 2020-2022 The KubeVela Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package multicluster
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
apilabels "k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/apimachinery/pkg/selection"
|
||||
apitypes "k8s.io/apimachinery/pkg/types"
|
||||
clusterv1 "open-cluster-management.io/api/cluster/v1"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
|
||||
"github.com/oam-dev/cluster-gateway/pkg/apis/cluster/v1alpha1"
|
||||
|
||||
velaerrors "github.com/oam-dev/kubevela/pkg/utils/errors"
|
||||
)
|
||||
|
||||
const (
|
||||
// CredentialTypeOCMManagedCluster identifies the virtual cluster from ocm
|
||||
CredentialTypeOCMManagedCluster v1alpha1.CredentialType = "ManagedCluster"
|
||||
)
|
||||
|
||||
// VirtualCluster contains base info of cluster, it unifies the difference between different cluster implementations
|
||||
// like cluster secret or ocm managed cluster
|
||||
type VirtualCluster struct {
|
||||
Name string
|
||||
Type v1alpha1.CredentialType
|
||||
EndPoint string
|
||||
Accepted bool
|
||||
Labels map[string]string
|
||||
}
|
||||
|
||||
// NewVirtualClusterFromSecret extract virtual cluster from cluster secret
|
||||
func NewVirtualClusterFromSecret(secret *corev1.Secret) (*VirtualCluster, error) {
|
||||
endpoint := string(secret.Data["endpoint"])
|
||||
labels := secret.GetLabels()
|
||||
if labels == nil {
|
||||
labels = map[string]string{}
|
||||
}
|
||||
if _endpoint, ok := labels[v1alpha1.LabelKeyClusterEndpointType]; ok {
|
||||
endpoint = _endpoint
|
||||
}
|
||||
credType, ok := labels[v1alpha1.LabelKeyClusterCredentialType]
|
||||
if !ok {
|
||||
return nil, errors.Errorf("secret is not a valid cluster secret, no credential type found")
|
||||
}
|
||||
return &VirtualCluster{
|
||||
Name: secret.Name,
|
||||
Type: v1alpha1.CredentialType(credType),
|
||||
EndPoint: endpoint,
|
||||
Accepted: true,
|
||||
Labels: labels,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// NewVirtualClusterFromManagedCluster extract virtual cluster from ocm managed cluster
|
||||
func NewVirtualClusterFromManagedCluster(managedCluster *clusterv1.ManagedCluster) (*VirtualCluster, error) {
|
||||
if len(managedCluster.Spec.ManagedClusterClientConfigs) == 0 {
|
||||
return nil, errors.Errorf("managed cluster has no client config")
|
||||
}
|
||||
return &VirtualCluster{
|
||||
Name: managedCluster.Name,
|
||||
Type: CredentialTypeOCMManagedCluster,
|
||||
EndPoint: "-",
|
||||
Accepted: managedCluster.Spec.HubAcceptsClient,
|
||||
Labels: managedCluster.GetLabels(),
|
||||
}, nil
|
||||
}
|
||||
|
||||
// GetVirtualCluster returns virtual cluster with given clusterName
|
||||
func GetVirtualCluster(ctx context.Context, c client.Client, clusterName string) (vc *VirtualCluster, err error) {
|
||||
secret := &corev1.Secret{}
|
||||
err = c.Get(ctx, apitypes.NamespacedName{
|
||||
Name: clusterName,
|
||||
Namespace: ClusterGatewaySecretNamespace,
|
||||
}, secret)
|
||||
var secretErr error
|
||||
if err == nil {
|
||||
vc, secretErr = NewVirtualClusterFromSecret(secret)
|
||||
if secretErr == nil {
|
||||
return vc, nil
|
||||
}
|
||||
}
|
||||
if err != nil && !apierrors.IsNotFound(err) {
|
||||
secretErr = err
|
||||
}
|
||||
|
||||
managedCluster := &clusterv1.ManagedCluster{}
|
||||
err = c.Get(ctx, apitypes.NamespacedName{
|
||||
Name: clusterName,
|
||||
Namespace: ClusterGatewaySecretNamespace,
|
||||
}, managedCluster)
|
||||
var managedClusterErr error
|
||||
if err == nil {
|
||||
vc, managedClusterErr = NewVirtualClusterFromManagedCluster(managedCluster)
|
||||
if managedClusterErr == nil {
|
||||
return vc, nil
|
||||
}
|
||||
}
|
||||
|
||||
if err != nil && !apierrors.IsNotFound(err) && !velaerrors.IsCRDNotExists(err) {
|
||||
managedClusterErr = err
|
||||
}
|
||||
|
||||
if secretErr == nil && managedClusterErr == nil {
|
||||
return nil, ErrClusterNotExists
|
||||
}
|
||||
|
||||
var errs velaerrors.ErrorList
|
||||
if secretErr != nil {
|
||||
errs = append(errs, secretErr)
|
||||
}
|
||||
if managedClusterErr != nil {
|
||||
errs = append(errs, managedClusterErr)
|
||||
}
|
||||
return nil, errs
|
||||
}
|
||||
|
||||
// MatchVirtualClusterLabels filters the list/delete operation of cluster list
|
||||
type MatchVirtualClusterLabels map[string]string
|
||||
|
||||
// ApplyToList applies this configuration to the given list options.
|
||||
func (m MatchVirtualClusterLabels) ApplyToList(opts *client.ListOptions) {
|
||||
sel := apilabels.SelectorFromValidatedSet(map[string]string(m))
|
||||
r, err := apilabels.NewRequirement(v1alpha1.LabelKeyClusterCredentialType, selection.Exists, nil)
|
||||
if err == nil {
|
||||
sel = sel.Add(*r)
|
||||
}
|
||||
opts.LabelSelector = sel
|
||||
opts.Namespace = ClusterGatewaySecretNamespace
|
||||
}
|
||||
|
||||
// ApplyToDeleteAllOf applies this configuration to the given a List options.
|
||||
func (m MatchVirtualClusterLabels) ApplyToDeleteAllOf(opts *client.DeleteAllOfOptions) {
|
||||
m.ApplyToList(&opts.ListOptions)
|
||||
}
|
||||
|
||||
// ListVirtualClusters will get all registered clusters in control plane
|
||||
func ListVirtualClusters(ctx context.Context, c client.Client) ([]VirtualCluster, error) {
|
||||
return FindVirtualClustersByLabels(ctx, c, map[string]string{})
|
||||
}
|
||||
|
||||
// FindVirtualClustersByLabels will get all virtual clusters with matched labels in control plane
|
||||
func FindVirtualClustersByLabels(ctx context.Context, c client.Client, labels map[string]string) ([]VirtualCluster, error) {
|
||||
var clusters []VirtualCluster
|
||||
secrets := corev1.SecretList{}
|
||||
if err := c.List(ctx, &secrets, MatchVirtualClusterLabels(labels)); err != nil {
|
||||
return nil, errors.Wrapf(err, "failed to get clusterSecret secrets")
|
||||
}
|
||||
for _, secret := range secrets.Items {
|
||||
vc, err := NewVirtualClusterFromSecret(secret.DeepCopy())
|
||||
if err == nil {
|
||||
clusters = append(clusters, *vc)
|
||||
}
|
||||
}
|
||||
|
||||
managedClusters := clusterv1.ManagedClusterList{}
|
||||
if err := c.List(context.Background(), &managedClusters, client.MatchingLabels(labels)); err != nil && !velaerrors.IsCRDNotExists(err) {
|
||||
return nil, errors.Wrapf(err, "failed to get managed clusters")
|
||||
}
|
||||
for _, managedCluster := range managedClusters.Items {
|
||||
vc, err := NewVirtualClusterFromManagedCluster(managedCluster.DeepCopy())
|
||||
if err == nil {
|
||||
clusters = append(clusters, *vc)
|
||||
}
|
||||
}
|
||||
return clusters, nil
|
||||
}
|
||||
118
pkg/multicluster/virtual_cluster_test.go
Normal file
118
pkg/multicluster/virtual_cluster_test.go
Normal file
@@ -0,0 +1,118 @@
|
||||
/*
|
||||
Copyright 2020-2022 The KubeVela Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package multicluster
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/oam-dev/cluster-gateway/pkg/apis/cluster/v1alpha1"
|
||||
. "github.com/onsi/ginkgo"
|
||||
. "github.com/onsi/gomega"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
clusterv1 "open-cluster-management.io/api/cluster/v1"
|
||||
)
|
||||
|
||||
var _ = Describe("Test Virtual Cluster", func() {
|
||||
|
||||
It("Test Virtual Cluster", func() {
|
||||
ClusterGatewaySecretNamespace = "vela-system"
|
||||
ctx := context.Background()
|
||||
Expect(k8sClient.Create(ctx, &v1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: ClusterGatewaySecretNamespace}})).Should(Succeed())
|
||||
|
||||
By("Initialize Secrets")
|
||||
Expect(k8sClient.Create(ctx, &v1.Secret{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "test-cluster",
|
||||
Namespace: ClusterGatewaySecretNamespace,
|
||||
Labels: map[string]string{
|
||||
v1alpha1.LabelKeyClusterCredentialType: string(v1alpha1.CredentialTypeX509Certificate),
|
||||
v1alpha1.LabelKeyClusterEndpointType: v1alpha1.ClusterEndpointTypeConst,
|
||||
"key": "value",
|
||||
},
|
||||
},
|
||||
})).Should(Succeed())
|
||||
Expect(k8sClient.Create(ctx, &v1.Secret{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "cluster-no-label",
|
||||
Namespace: ClusterGatewaySecretNamespace,
|
||||
Labels: map[string]string{
|
||||
v1alpha1.LabelKeyClusterCredentialType: string(v1alpha1.CredentialTypeX509Certificate),
|
||||
},
|
||||
},
|
||||
})).Should(Succeed())
|
||||
Expect(k8sClient.Create(ctx, &v1.Secret{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "cluster-invalid",
|
||||
Namespace: ClusterGatewaySecretNamespace,
|
||||
},
|
||||
})).Should(Succeed())
|
||||
|
||||
By("Test Get Virtual Cluster From Cluster Secret")
|
||||
vc, err := GetVirtualCluster(ctx, k8sClient, "test-cluster")
|
||||
Expect(err).Should(Succeed())
|
||||
Expect(vc.Type).Should(Equal(v1alpha1.CredentialTypeX509Certificate))
|
||||
Expect(vc.Labels["key"]).Should(Equal("value"))
|
||||
|
||||
_, err = GetVirtualCluster(ctx, k8sClient, "cluster-not-found")
|
||||
Expect(err).ShouldNot(Succeed())
|
||||
Expect(err.Error()).Should(ContainSubstring("no such cluster"))
|
||||
|
||||
_, err = GetVirtualCluster(ctx, k8sClient, "cluster-invalid")
|
||||
Expect(err).ShouldNot(Succeed())
|
||||
Expect(err.Error()).Should(ContainSubstring("not a valid cluster"))
|
||||
|
||||
By("Add OCM ManagedCluster")
|
||||
Expect(k8sClient.Create(ctx, &clusterv1.ManagedCluster{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "ocm-bad-cluster",
|
||||
Namespace: ClusterGatewaySecretNamespace,
|
||||
},
|
||||
})).Should(Succeed())
|
||||
Expect(k8sClient.Create(ctx, &clusterv1.ManagedCluster{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "ocm-cluster",
|
||||
Namespace: ClusterGatewaySecretNamespace,
|
||||
Labels: map[string]string{"key": "value"},
|
||||
},
|
||||
Spec: clusterv1.ManagedClusterSpec{
|
||||
ManagedClusterClientConfigs: []clusterv1.ClientConfig{{URL: "test-url"}},
|
||||
},
|
||||
})).Should(Succeed())
|
||||
|
||||
By("Test Get Virtual Cluster From OCM")
|
||||
|
||||
_, err = GetVirtualCluster(ctx, k8sClient, "ocm-bad-cluster")
|
||||
Expect(err).ShouldNot(Succeed())
|
||||
Expect(err.Error()).Should(ContainSubstring("has no client config"))
|
||||
|
||||
vc, err = GetVirtualCluster(ctx, k8sClient, "ocm-cluster")
|
||||
Expect(err).Should(Succeed())
|
||||
Expect(vc.Type).Should(Equal(CredentialTypeOCMManagedCluster))
|
||||
|
||||
By("Test List Virtual Clusters")
|
||||
|
||||
vcs, err := ListVirtualClusters(ctx, k8sClient)
|
||||
Expect(err).Should(Succeed())
|
||||
Expect(len(vcs)).Should(Equal(3))
|
||||
|
||||
vcs, err = FindVirtualClustersByLabels(ctx, k8sClient, map[string]string{"key": "value"})
|
||||
Expect(err).Should(Succeed())
|
||||
Expect(len(vcs)).Should(Equal(2))
|
||||
})
|
||||
|
||||
})
|
||||
@@ -165,6 +165,9 @@ const (
|
||||
// AnnotationPublishVersion is annotation that record the application workflow version.
|
||||
AnnotationPublishVersion = "app.oam.dev/publishVersion"
|
||||
|
||||
// AnnotationAutoUpdate is annotation that let application auto update when it finds definition changes
|
||||
AnnotationAutoUpdate = "app.oam.dev/autoUpdate"
|
||||
|
||||
// AnnotationWorkflowName specifies the workflow name for execution.
|
||||
AnnotationWorkflowName = "app.oam.dev/workflowName"
|
||||
|
||||
@@ -179,4 +182,7 @@ const (
|
||||
|
||||
// AnnotationWorkloadName indicates the managed workload's name by trait
|
||||
AnnotationWorkloadName = "trait.oam.dev/workload-name"
|
||||
|
||||
// AnnotationControllerRequirement indicates the controller version that can process the application.
|
||||
AnnotationControllerRequirement = "app.oam.dev/controller-version-require"
|
||||
)
|
||||
|
||||
@@ -591,10 +591,9 @@ func TestConvertWorkloadGVK2Def(t *testing.T) {
|
||||
Version: "v1",
|
||||
}, ref)
|
||||
|
||||
ref, err = util.ConvertWorkloadGVK2Definition(mapper, common.WorkloadGVK{APIVersion: "/apps/v1",
|
||||
_, err = util.ConvertWorkloadGVK2Definition(mapper, common.WorkloadGVK{APIVersion: "/apps/v1",
|
||||
Kind: "Deployment"})
|
||||
assert.Error(t, err)
|
||||
|
||||
}
|
||||
|
||||
func TestGenTraitName(t *testing.T) {
|
||||
|
||||
@@ -39,7 +39,7 @@ func (h *resourceKeeper) DispatchComponentRevision(ctx context.Context, cr *v1.C
|
||||
obj.SetName(cr.Name)
|
||||
obj.SetNamespace(cr.Namespace)
|
||||
obj.SetLabels(cr.Labels)
|
||||
if err = resourcetracker.RecordManifestInResourceTracker(multicluster.ContextInLocalCluster(ctx), h.Client, rt, obj, true); err != nil {
|
||||
if err = resourcetracker.RecordManifestsInResourceTracker(multicluster.ContextInLocalCluster(ctx), h.Client, rt, []*unstructured.Unstructured{obj}, true); err != nil {
|
||||
return errors.Wrapf(err, "failed to record componentrevision %s/%s/%s", oam.GetCluster(cr), cr.Namespace, cr.Name)
|
||||
}
|
||||
if err = h.Client.Create(multicluster.ContextWithClusterName(ctx, oam.GetCluster(cr)), cr); err != nil {
|
||||
|
||||
@@ -18,17 +18,21 @@ package resourcekeeper
|
||||
|
||||
import (
|
||||
"context"
|
||||
"sync"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||
kerrors "k8s.io/apimachinery/pkg/util/errors"
|
||||
|
||||
"github.com/oam-dev/kubevela/apis/core.oam.dev/v1beta1"
|
||||
"github.com/oam-dev/kubevela/pkg/multicluster"
|
||||
"github.com/oam-dev/kubevela/pkg/oam"
|
||||
"github.com/oam-dev/kubevela/pkg/resourcetracker"
|
||||
"github.com/oam-dev/kubevela/pkg/utils/apply"
|
||||
)
|
||||
|
||||
// MaxDispatchConcurrent is the max dispatch concurrent number
|
||||
var MaxDispatchConcurrent = 10
|
||||
|
||||
// DispatchOption option for dispatch
|
||||
type DispatchOption interface {
|
||||
ApplyToDispatchConfig(*dispatchConfig)
|
||||
@@ -52,6 +56,21 @@ func (h *resourceKeeper) Dispatch(ctx context.Context, manifests []*unstructured
|
||||
if h.applyOncePolicy != nil && h.applyOncePolicy.Enable {
|
||||
options = append(options, MetaOnlyOption{})
|
||||
}
|
||||
// 1. record manifests in resourcetracker
|
||||
if err = h.record(ctx, manifests, options...); err != nil {
|
||||
return err
|
||||
}
|
||||
// 2. apply manifests
|
||||
if err = h.dispatch(ctx, manifests); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (h *resourceKeeper) record(ctx context.Context, manifests []*unstructured.Unstructured, options ...DispatchOption) error {
|
||||
var rootManifests []*unstructured.Unstructured
|
||||
var versionManifests []*unstructured.Unstructured
|
||||
|
||||
for _, manifest := range manifests {
|
||||
if manifest != nil {
|
||||
_options := options
|
||||
@@ -61,34 +80,61 @@ func (h *resourceKeeper) Dispatch(ctx context.Context, manifests []*unstructured
|
||||
}
|
||||
}
|
||||
cfg := newDispatchConfig(_options...)
|
||||
if err = h.dispatch(ctx, manifest, cfg); err != nil {
|
||||
return err
|
||||
if !cfg.skipRT {
|
||||
if cfg.useRoot {
|
||||
rootManifests = append(rootManifests, manifest)
|
||||
} else {
|
||||
versionManifests = append(versionManifests, manifest)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
cfg := newDispatchConfig(options...)
|
||||
if len(rootManifests) != 0 {
|
||||
rt, err := h.getRootRT(ctx)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "failed to get resourcetracker")
|
||||
}
|
||||
if err = resourcetracker.RecordManifestsInResourceTracker(multicluster.ContextInLocalCluster(ctx), h.Client, rt, rootManifests, cfg.metaOnly); err != nil {
|
||||
return errors.Wrapf(err, "failed to record resources in resourcetracker %s", rt.Name)
|
||||
}
|
||||
}
|
||||
|
||||
rt, err := h.getCurrentRT(ctx)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "failed to get resourcetracker")
|
||||
}
|
||||
if err = resourcetracker.RecordManifestsInResourceTracker(multicluster.ContextInLocalCluster(ctx), h.Client, rt, versionManifests, cfg.metaOnly); err != nil {
|
||||
return errors.Wrapf(err, "failed to record resources in resourcetracker %s", rt.Name)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (h *resourceKeeper) dispatch(ctx context.Context, manifest *unstructured.Unstructured, cfg *dispatchConfig) (err error) {
|
||||
// 1. record manifests in resourcetracker
|
||||
if !cfg.skipRT {
|
||||
var rt *v1beta1.ResourceTracker
|
||||
if cfg.useRoot {
|
||||
rt, err = h.getRootRT(ctx)
|
||||
} else {
|
||||
rt, err = h.getCurrentRT(ctx)
|
||||
}
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "failed to get resourcetracker")
|
||||
}
|
||||
if err = resourcetracker.RecordManifestInResourceTracker(multicluster.ContextInLocalCluster(ctx), h.Client, rt, manifest, cfg.metaOnly); err != nil {
|
||||
return errors.Wrapf(err, "failed to record resources in resourcetracker %s", rt.Name)
|
||||
}
|
||||
}
|
||||
// 2. apply manifests
|
||||
func (h *resourceKeeper) dispatch(ctx context.Context, manifests []*unstructured.Unstructured) error {
|
||||
var errs []error
|
||||
var l sync.Mutex
|
||||
var wg sync.WaitGroup
|
||||
|
||||
ch := make(chan struct{}, MaxDispatchConcurrent)
|
||||
applyOpts := []apply.ApplyOption{apply.MustBeControlledByApp(h.app), apply.NotUpdateRenderHashEqual()}
|
||||
if err := h.applicator.Apply(multicluster.ContextWithClusterName(ctx, oam.GetCluster(manifest)), manifest, applyOpts...); err != nil {
|
||||
return errors.Wrapf(err, "cannot apply manifest, name: %s apiVersion: %s kind: %s", manifest.GetName(), manifest.GetAPIVersion(), manifest.GetKind())
|
||||
|
||||
for i := 0; i < len(manifests); i++ {
|
||||
ch <- struct{}{}
|
||||
wg.Add(1)
|
||||
go func(index int) {
|
||||
defer wg.Done()
|
||||
manifest := manifests[index]
|
||||
applyCtx := multicluster.ContextWithClusterName(ctx, oam.GetCluster(manifest))
|
||||
err := h.applicator.Apply(applyCtx, manifest, applyOpts...)
|
||||
if err != nil {
|
||||
l.Lock()
|
||||
errs = append(errs, err)
|
||||
l.Unlock()
|
||||
}
|
||||
<-ch
|
||||
}(i)
|
||||
}
|
||||
return nil
|
||||
wg.Wait()
|
||||
return kerrors.NewAggregate(errs)
|
||||
}
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user