Compare commits

..

2 Commits

Author SHA1 Message Date
github-actions[bot]
f89622eec7 Fix: Fix issue with imports/packages in status validations (#6963) (#6971)
(cherry picked from commit 8e3749f970)

Signed-off-by: Brian Kane <briankane1@gmail.com>
Co-authored-by: Brian Kane <briankane1@gmail.com>
2025-11-06 18:56:43 -08:00
Ayush Kumar
8401ff4d85 Fix: Prevent namespace admins from accessing vela-system definitions without explicit permissions (#6967)
* fix: webhook validation to check definition existence in namespaces and privilege checks

Signed-off-by: Reetika Malhotra <malhotra.reetika25@gmail.com>
Signed-off-by: Ayush Kumar <ayushshyamkumar888@gmail.com>

* fix: make reviewable changes

Signed-off-by: Ayush Kumar <ayushshyamkumar888@gmail.com>

* Update Ingress API version and enhance output validation tests

- Changed Ingress API version from v1beta1 to v1 in multiple test files to align with Kubernetes API updates.
- Added pathType specification to Ingress rules for better compatibility.
- Introduced a new e2e test for validating outputs in ComponentDefinition, TraitDefinition, PolicyDefinition, and WorkflowStepDefinition, ensuring proper handling of valid and invalid resources.
- Enhanced existing tests to check for non-existent CRDs in outputs and validate definitions with mixed valid and invalid resources.

Signed-off-by: Ayush Kumar <ayushshyamkumar888@gmail.com>

* fix: update comment for expected error count in definition permissions test

Signed-off-by: Ayush Kumar <ayushshyamkumar888@gmail.com>

* fix: improve error handling message in definitionExistsInNamespace function

Signed-off-by: Ayush Kumar <ayushshyamkumar888@gmail.com>

* fix: enhance definition permission checks and add corresponding test cases

Signed-off-by: Ayush Kumar <ayushshyamkumar888@gmail.com>

* fix: clarify comment for definition permission check in ValidateComponents

Signed-off-by: Ayush Kumar <ayushshyamkumar888@gmail.com>

* fix: add existing definitions to validation permissions tests for improved coverage

Signed-off-by: Ayush Kumar <ayushshyamkumar888@gmail.com>

---------

Signed-off-by: Reetika Malhotra <malhotra.reetika25@gmail.com>
Signed-off-by: Ayush Kumar <ayushshyamkumar888@gmail.com>
Co-authored-by: Reetika Malhotra <malhotra.reetika25@gmail.com>
2025-11-06 06:31:05 -08:00
194 changed files with 2568 additions and 13061 deletions

30
.github/CODEOWNERS vendored
View File

@@ -1,35 +1,35 @@
# This file is a github code protect rule follow the codeowners https://docs.github.com/en/github/creating-cloning-and-archiving-repositories/creating-a-repository-on-github/about-code-owners#example-of-a-codeowners-file
* @barnettZQG @wonderflow @leejanee @Somefive @jefree-cat @FogDong @wangyikewxgm @chivalryq @anoop2811 @briankane @jguionnet
design/ @barnettZQG @leejanee @wonderflow @Somefive @jefree-cat @FogDong @anoop2811 @briankane @jguionnet
* @barnettZQG @wonderflow @leejanee @Somefive @jefree-cat @FogDong @wangyikewxgm @chivalryq @anoop2811
design/ @barnettZQG @leejanee @wonderflow @Somefive @jefree-cat @FogDong @anoop2811
# Owner of Core Controllers
pkg/controller/core.oam.dev @Somefive @FogDong @barnettZQG @wonderflow @wangyikewxgm @chivalryq @anoop2811 @briankane @jguionnet
pkg/controller/core.oam.dev @Somefive @FogDong @barnettZQG @wonderflow @wangyikewxgm @chivalryq @anoop2811
# Owner of Standard Controllers
pkg/controller/standard.oam.dev @wangyikewxgm @barnettZQG @wonderflow @Somefive @anoop2811 @FogDong @briankane @jguionnet
pkg/controller/standard.oam.dev @wangyikewxgm @barnettZQG @wonderflow @Somefive @anoop2811 @FogDong
# Owner of CUE
pkg/cue @leejanee @FogDong @Somefive @anoop2811 @briankane @jguionnet
pkg/stdlib @leejanee @FogDong @Somefive @anoop2811 @briankane @jguionnet
pkg/cue @leejanee @FogDong @Somefive @anoop2811
pkg/stdlib @leejanee @FogDong @Somefive @anoop2811
# Owner of Workflow
pkg/workflow @leejanee @FogDong @Somefive @wangyikewxgm @chivalryq @anoop2811 @briankane @jguionnet
pkg/workflow @leejanee @FogDong @Somefive @wangyikewxgm @chivalryq @anoop2811
# Owner of vela templates
vela-templates/ @Somefive @barnettZQG @wonderflow @FogDong @wangyikewxgm @chivalryq @anoop2811 @briankane @jguionnet
vela-templates/ @Somefive @barnettZQG @wonderflow @FogDong @wangyikewxgm @chivalryq @anoop2811
# Owner of vela CLI
references/cli/ @Somefive @StevenLeiZhang @charlie0129 @wangyikewxgm @chivalryq @anoop2811 @FogDong @briankane @jguionnet
references/cli/ @Somefive @StevenLeiZhang @charlie0129 @wangyikewxgm @chivalryq @anoop2811 @FogDong
# Owner of vela addon framework
pkg/addon/ @wangyikewxgm @wonderflow @charlie0129 @anoop2811 @FogDong @briankane @jguionnet
pkg/addon/ @wangyikewxgm @wonderflow @charlie0129 @anoop2811 @FogDong
# Owner of resource keeper and tracker
pkg/resourcekeeper @Somefive @FogDong @chivalryq @anoop2811 @briankane @jguionnet
pkg/resourcetracker @Somefive @FogDong @chivalryq @anoop2811 @briankane @jguionnet
pkg/resourcekeeper @Somefive @FogDong @chivalryq @anoop2811
pkg/resourcetracker @Somefive @FogDong @chivalryq @anoop2811
.github/ @chivalryq @wonderflow @Somefive @FogDong @wangyikewxgm @anoop2811 @briankane @jguionnet
makefiles @chivalryq @wonderflow @Somefive @FogDong @wangyikewxgm @anoop2811 @briankane @jguionnet
go.* @chivalryq @wonderflow @Somefive @FogDong @wangyikewxgm @anoop2811 @briankane @jguionnet
.github/ @chivalryq @wonderflow @Somefive @FogDong @wangyikewxgm @anoop2811
makefiles @chivalryq @wonderflow @Somefive @FogDong @wangyikewxgm @anoop2811
go.* @chivalryq @wonderflow @Somefive @FogDong @wangyikewxgm @anoop2811

View File

@@ -19,10 +19,6 @@ runs:
# ========================================================================
- name: Configure environment setup
uses: ./.github/actions/env-setup
with:
install-ginkgo: 'true'
install-setup-envtest: 'false'
install-kustomize: 'false'
- name: Build project
shell: bash

View File

@@ -1,27 +1,11 @@
name: 'Kubevela Test Environment Setup'
description: 'Sets up complete testing environment for Kubevela with Go, Kubernetes tools, and testing frameworks.'
description: 'Sets up complete testing environment for Kubevela with Go, Kubernetes tools, and Ginkgo framework for E2E testing.'
inputs:
go-version:
description: 'Go version to use for testing'
required: false
default: '1.23.8'
install-ginkgo:
description: 'Install Ginkgo testing framework'
required: false
default: 'true'
install-setup-envtest:
description: 'Install setup-envtest for integration testing'
required: false
default: 'false'
install-kustomize:
description: 'Install kustomize for manifest management'
required: false
default: 'false'
kustomize-version:
description: 'Kustomize version to install'
required: false
default: '4.5.4'
runs:
@@ -82,37 +66,7 @@ runs:
go mod verify
- name: Install Ginkgo testing framework
if: ${{ inputs.install-ginkgo == 'true' }}
shell: bash
run: |
echo "Installing Ginkgo testing framework..."
go install github.com/onsi/ginkgo/v2/ginkgo@v2.14.0
echo "Ginkgo installed successfully"
- name: Install setup-envtest
if: ${{ inputs.install-setup-envtest == 'true' }}
shell: bash
run: |
echo "Installing setup-envtest for integration testing..."
mkdir -p ./bin
GOBIN=$(pwd)/bin go install sigs.k8s.io/controller-runtime/tools/setup-envtest@v0.0.0-20240522175850-2e9781e9fc60
echo "setup-envtest installed successfully at ./bin/setup-envtest"
ls -la ./bin/setup-envtest
# Download and cache the Kubernetes binaries for envtest
echo "Downloading Kubernetes binaries for envtest..."
KUBEBUILDER_ASSETS=$(./bin/setup-envtest use 1.31.0 --bin-dir ./bin -p path)
echo "Kubernetes binaries downloaded successfully"
echo "KUBEBUILDER_ASSETS=${KUBEBUILDER_ASSETS}"
# Export for subsequent steps
echo "KUBEBUILDER_ASSETS=${KUBEBUILDER_ASSETS}" >> $GITHUB_ENV
- name: Install kustomize
if: ${{ inputs.install-kustomize == 'true' }}
shell: bash
run: |
echo "Installing kustomize version ${{ inputs.kustomize-version }}..."
mkdir -p ./bin
curl -sS https://raw.githubusercontent.com/kubernetes-sigs/kustomize/master/hack/install_kustomize.sh | bash -s ${{ inputs.kustomize-version }} $(pwd)/bin
echo "kustomize installed successfully at ./bin/kustomize"
./bin/kustomize version

View File

@@ -20,10 +20,6 @@ runs:
# ========================================================================
- name: Configure environment setup
uses: ./.github/actions/env-setup
with:
install-ginkgo: 'true'
install-setup-envtest: 'false'
install-kustomize: 'false'
# ========================================================================
# E2E Test Execution

View File

@@ -19,10 +19,6 @@ runs:
# ========================================================================
- name: Configure environment setup
uses: ./.github/actions/env-setup
with:
install-ginkgo: 'true'
install-setup-envtest: 'true'
install-kustomize: 'true'
# ========================================================================
# Unit Test Execution

View File

@@ -11,7 +11,4 @@ wangyuan249
chivalryq
FogDong
leejanee
barnettZQG
anoop2811
briankane
jguionnet
barnettZQG

View File

@@ -97,21 +97,6 @@ jobs:
with:
submodules: true
- name: Free Disk Space
run: |
echo "Disk space before cleanup:"
df -h
# Remove unnecessary software to free up disk space
sudo rm -rf /usr/share/dotnet
sudo rm -rf /usr/local/lib/android
sudo rm -rf /opt/ghc
sudo rm -rf /opt/hostedtoolcache/CodeQL
sudo docker image prune --all --force
echo "Disk space after cleanup:"
df -h
- name: Setup Env
uses: ./.github/actions/env-setup
@@ -126,24 +111,6 @@ jobs:
- name: Run cross-build
run: make cross-build
- name: Free Disk Space After Cross-Build
run: |
echo "Disk space before cleanup:"
df -h
# Remove cross-build artifacts to free up space
# (make build will rebuild binaries for current platform)
rm -rf _bin
# Clean Go build cache and test cache
go clean -cache -testcache
# Remove Docker build cache
sudo docker builder prune --all --force || true
echo "Disk space after cleanup:"
df -h
- name: Check Diff
run: |
export PATH=$(pwd)/bin/:$PATH

View File

@@ -111,9 +111,16 @@ jobs:
steps:
- name: Checkout
uses: actions/checkout@8ade135a41bc03ea155e62e844d188df1ea18608
- name: Update kubectl plugin version in krew-index
uses: rajatjindal/krew-release-bot@df3eb197549e3568be8b4767eec31c5e8e8e6ad8 # v0.0.46
- name: Update Homebrew formula
uses: dawidd6/action-homebrew-bump-formula@d3667e5ae14df19579e4414897498e3e88f2f458 # v3.10.0
with:
token: ${{ secrets.HOMEBREW_TOKEN }}
formula: kubevela
tag: ${{ github.ref }}
revision: ${{ github.sha }}
force: false
provenance-vela-bins:
name: generate provenance for binaries

View File

@@ -28,15 +28,10 @@ jobs:
- name: Install Go tools
run: |
make goimports
- name: Build CLI
run: make vela-cli
- name: Setup KinD
uses: ./.github/actions/setup-kind-cluster
with:
name: sync-sdk
- name: Get the version
id: get_version
run: echo "VERSION=${GITHUB_REF}" >> $GITHUB_OUTPUT

View File

@@ -64,8 +64,8 @@ lint: golangci
@GOLANGCILINT=$(GOLANGCILINT) ./hack/utils/golangci-lint-wrapper.sh
## reviewable: Run the reviewable
## Run make build to compile vela binary before running this target to ensure all generated definitions are up to date.
reviewable: build manifests fmt vet lint staticcheck helm-doc-gen sdk_fmt
reviewable: manifests fmt vet lint staticcheck helm-doc-gen sdk_fmt
go mod tidy
# check-diff: Execute auto-gen code commands and ensure branch is clean.
check-diff: reviewable
@@ -103,7 +103,7 @@ manager:
$(GOBUILD_ENV) go build -o bin/manager -a -ldflags $(LDFLAGS) ./cmd/core/main.go
## manifests: Generate manifests e.g. CRD, RBAC etc.
manifests: tidy installcue kustomize sync-crds
manifests: installcue kustomize
go generate $(foreach t,pkg apis,./$(t)/...)
# TODO(yangsoon): kustomize will merge all CRD into a whole file, it may not work if we want patch more than one CRD in this way
$(KUSTOMIZE) build config/crd -o config/crd/base/core.oam.dev_applications.yaml

View File

@@ -3,7 +3,8 @@ apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
annotations:
controller-gen.kubebuilder.io/version: v0.16.5
controller-gen.kubebuilder.io/version: v0.9.0
creationTimestamp: null
name: workflows.core.oam.dev
spec:
group: core.oam.dev
@@ -22,19 +23,14 @@ spec:
description: Workflow is the Schema for the workflow API
properties:
apiVersion:
description: |-
APIVersion defines the versioned schema of this representation of an object.
Servers should convert recognized schemas to the latest internal value, and
may reject unrecognized values.
More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources
description: 'APIVersion defines the versioned schema of this representation
of an object. Servers should convert recognized schemas to the latest
internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
type: string
kind:
description: |-
Kind is a string value representing the REST resource this object represents.
Servers may infer this from the endpoint the client submits requests to.
Cannot be updated.
In CamelCase.
More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
description: 'Kind is a string value representing the REST resource this
object represents. Servers may infer this from the endpoint the client
submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
type: string
metadata:
type: object
@@ -63,7 +59,6 @@ spec:
inputs:
description: Inputs is the inputs of the step
items:
description: InputItem defines an input variable of WorkflowStep
properties:
from:
type: string
@@ -71,6 +66,7 @@ spec:
type: string
required:
- from
- parameterKey
type: object
type: array
meta:
@@ -79,18 +75,12 @@ spec:
alias:
type: string
type: object
mode:
description: Mode is only valid for sub steps, it defines the mode
of the sub steps
nullable: true
type: string
name:
description: Name is the unique name of the workflow step.
type: string
outputs:
description: Outputs is the outputs of the step
items:
description: OutputItem defines an output variable of WorkflowStep
properties:
name:
type: string
@@ -120,7 +110,6 @@ spec:
inputs:
description: Inputs is the inputs of the step
items:
description: InputItem defines an input variable of WorkflowStep
properties:
from:
type: string
@@ -128,6 +117,7 @@ spec:
type: string
required:
- from
- parameterKey
type: object
type: array
meta:
@@ -142,7 +132,6 @@ spec:
outputs:
description: Outputs is the outputs of the step
items:
description: OutputItem defines an output variable of WorkflowStep
properties:
name:
type: string
@@ -164,6 +153,7 @@ spec:
description: Type is the type of the workflow step.
type: string
required:
- name
- type
type: object
type: array
@@ -174,6 +164,7 @@ spec:
description: Type is the type of the workflow step.
type: string
required:
- name
- type
type: object
type: array

View File

@@ -15,7 +15,9 @@ spec:
schematic:
cue:
template: |
import "vela/op"
import (
"vela/op"
)
output: op.#ApplyApplicationInParallel & {}

View File

@@ -16,7 +16,9 @@ spec:
schematic:
cue:
template: |
import "vela/op"
import (
"vela/op"
)
// apply application
output: op.#ApplyApplication & {}

View File

@@ -13,8 +13,12 @@ spec:
schematic:
cue:
template: |
import "vela/kube"
import "vela/builtin"
import (
"strconv"
"strings"
"vela/kube"
"vela/builtin"
)
output: kube.#Apply & {
$params: {

View File

@@ -12,7 +12,9 @@ spec:
schematic:
cue:
template: |
import "vela/kube"
import (
"vela/kube"
)
apply: kube.#Apply & {
$params: parameter

View File

@@ -16,7 +16,9 @@ spec:
schematic:
cue:
template: |
import "vela/op"
import (
"vela/op"
)
// apply remaining components and traits
apply: op.#ApplyRemaining & {

View File

@@ -13,8 +13,10 @@ spec:
schematic:
cue:
template: |
import "vela/kube"
import "vela/builtin"
import (
"vela/kube"
"vela/builtin"
)
apply: kube.#Apply & {
$params: value: {

View File

@@ -13,9 +13,12 @@ spec:
schematic:
cue:
template: |
import "vela/config"
import "vela/kube"
import "vela/builtin"
import (
"vela/config"
"vela/kube"
"vela/builtin"
"strings"
)
cfg: config.#CreateConfig & {
$params: {
@@ -84,9 +87,9 @@ spec:
}
}
providerBasic: {
accessKey!: string
secretKey!: string
region!: string
accessKey: string
secretKey: string
region: string
}
#AlibabaProvider: {
providerBasic
@@ -138,5 +141,5 @@ spec:
type: "ucloud"
name: *"ucloud-provider" | string
}
parameter: #AlibabaProvider | #AWSProvider | #AzureProvider | #BaiduProvider | #ECProvider | #GCPProvider | #TencentProvider | #UCloudProvider
parameter: *#AlibabaProvider | #AWSProvider | #AzureProvider | #BaiduProvider | #ECProvider | #GCPProvider | #TencentProvider | #UCloudProvider

View File

@@ -13,10 +13,13 @@ spec:
schematic:
cue:
template: |
import "vela/builtin"
import "vela/kube"
import "vela/util"
import "strings"
import (
"vela/builtin"
"vela/kube"
"vela/util"
"encoding/json"
"strings"
)
url: {
if parameter.context.git != _|_ {

View File

@@ -14,8 +14,10 @@ spec:
schematic:
cue:
template: |
import "vela/metrics"
import "vela/builtin"
import (
"vela/metrics"
"vela/builtin"
)
check: metrics.#PromCheck & {
$params: {

View File

@@ -12,7 +12,9 @@ spec:
schematic:
cue:
template: |
import "vela/kube"
import (
"vela/kube"
)
parameter: {
labelselector?: {...}

View File

@@ -12,9 +12,11 @@ spec:
schematic:
cue:
template: |
import "vela/builtin"
import "vela/query"
import "strconv"
import (
"vela/builtin"
"vela/query"
"strconv"
)
collect: query.#CollectServiceEndpoints & {
$params: app: {

View File

@@ -16,8 +16,6 @@ spec:
schematic:
cue:
template: |
import "list"
#PatchParams: {
// +usage=Specify the name of the target container, if not set, use the component name
containerName: *"" | string
@@ -75,7 +73,7 @@ spec:
}
// +patchStrategy=replace
args: list.Concat([[for a in _args if _delArgs[a] == _|_ {a}], [for a in _addArgs if _delArgs[a] == _|_ && _argsMap[a] == _|_ {a}]])
args: [for a in _args if _delArgs[a] == _|_ {a}] + [for a in _addArgs if _delArgs[a] == _|_ && _argsMap[a] == _|_ {a}]
}
}
// +patchStrategy=open

View File

@@ -17,9 +17,10 @@ spec:
schematic:
cue:
template: |
import "strconv"
import "strings"
import "list"
import (
"strconv"
"strings"
)
#PatchParams: {
// +usage=Specify the name of the target container, if not set, use the component name
@@ -66,7 +67,7 @@ spec:
_basePortsMap: {for _basePort in _basePorts {(strings.ToLower(_basePort.protocol) + strconv.FormatInt(_basePort.containerPort, 10)): _basePort}}
_portsMap: {for port in _params.ports {(strings.ToLower(port.protocol) + strconv.FormatInt(port.containerPort, 10)): port}}
// +patchStrategy=replace
ports: list.Concat([[for portVar in _basePorts {
ports: [for portVar in _basePorts {
containerPort: portVar.containerPort
protocol: portVar.protocol
name: portVar.name
@@ -79,7 +80,7 @@ spec:
hostIP: _portsMap[_uniqueKey].hostIP
}
}
}], [for port in _params.ports if _basePortsMap[strings.ToLower(port.protocol)+strconv.FormatInt(port.containerPort, 10)] == _|_ {
}] + [for port in _params.ports if _basePortsMap[strings.ToLower(port.protocol)+strconv.FormatInt(port.containerPort, 10)] == _|_ {
if port.containerPort != _|_ {
containerPort: port.containerPort
}
@@ -92,7 +93,7 @@ spec:
if port.hostIP != _|_ {
hostIP: port.hostIP
}
}]])
}]
}
}
}

View File

@@ -12,7 +12,9 @@ spec:
schematic:
cue:
template: |
import "vela/config"
import (
"vela/config"
)
deploy: config.#CreateConfig & {
$params: parameter

View File

@@ -11,8 +11,6 @@ spec:
schematic:
cue:
template: |
import "list"
mountsArray: {
pvc: *[
for v in parameter.volumeMounts.pvc {
@@ -132,7 +130,7 @@ spec:
},
] | []
}
volumesList: list.Concat([volumesArray.pvc, volumesArray.configMap, volumesArray.secret, volumesArray.emptyDir, volumesArray.hostPath])
volumesList: volumesArray.pvc + volumesArray.configMap + volumesArray.secret + volumesArray.emptyDir + volumesArray.hostPath
deDupVolumesArray: [
for val in [
for i, vi in volumesList {

View File

@@ -11,7 +11,9 @@ spec:
schematic:
cue:
template: |
import "strconv"
import (
"strconv"
)
mountsArray: [
if parameter.volumeMounts != _|_ && parameter.volumeMounts.pvc != _|_ for v in parameter.volumeMounts.pvc {

View File

@@ -12,7 +12,9 @@ spec:
schematic:
cue:
template: |
import "vela/config"
import (
"vela/config"
)
deploy: config.#DeleteConfig & {
$params: parameter

View File

@@ -12,9 +12,11 @@ spec:
schematic:
cue:
template: |
import "vela/kube"
import "vela/builtin"
import "encoding/yaml"
import (
"vela/kube"
"vela/builtin"
"encoding/yaml"
)
dependsOn: kube.#Read & {
$params: value: {

View File

@@ -14,7 +14,9 @@ spec:
schematic:
cue:
template: |
import "vela/op"
import (
"vela/op"
)
app: op.#DeployCloudResource & {
env: parameter.env

View File

@@ -14,9 +14,10 @@ spec:
schematic:
cue:
template: |
import "vela/multicluster"
import "vela/builtin"
import (
"vela/multicluster"
"vela/builtin"
)
if parameter.auto == false {
suspend: builtin.#Suspend & {$params: message: "Waiting approval to the deploy step \"\(context.stepName)\""}

View File

@@ -15,7 +15,9 @@ spec:
schematic:
cue:
template: |
import "vela/op"
import (
"vela/op"
)
app: op.#ApplyEnvBindApp & {
env: parameter.env

View File

@@ -15,7 +15,9 @@ spec:
schematic:
cue:
template: |
import "vela/op"
import (
"vela/op"
)
app: op.#Steps & {
load: op.#Load

View File

@@ -16,8 +16,6 @@ spec:
schematic:
cue:
template: |
import "list"
#PatchParams: {
// +usage=Specify the name of the target container, if not set, use the component name
containerName: *"" | string
@@ -51,7 +49,7 @@ spec:
if _baseEnv != _|_ {
_baseEnvMap: {for envVar in _baseEnv {(envVar.name): envVar}}
// +patchStrategy=replace
env: list.Concat([[for envVar in _baseEnv if _delKeys[envVar.name] == _|_ && !_params.replace {
env: [for envVar in _baseEnv if _delKeys[envVar.name] == _|_ && !_params.replace {
name: envVar.name
if _params.env[envVar.name] != _|_ {
value: _params.env[envVar.name]
@@ -64,10 +62,10 @@ spec:
valueFrom: envVar.valueFrom
}
}
}], [for k, v in _params.env if _delKeys[k] == _|_ && (_params.replace || _baseEnvMap[k] == _|_) {
}] + [for k, v in _params.env if _delKeys[k] == _|_ && (_params.replace || _baseEnvMap[k] == _|_) {
name: k
value: v
}]])
}]
}
}
}

View File

@@ -14,8 +14,10 @@ spec:
schematic:
cue:
template: |
import "vela/op"
import "vela/kube"
import (
"vela/op"
"vela/kube"
)
object: {
apiVersion: "v1"

View File

@@ -14,8 +14,10 @@ spec:
schematic:
cue:
template: |
import "vela/op"
import "vela/kube"
import (
"vela/op"
"vela/kube"
)
meta: {
name: *context.name | string

View File

@@ -12,7 +12,9 @@ spec:
schematic:
cue:
template: |
import "vela/kube"
import (
"vela/kube"
)
apply: kube.#Apply & {
$params: {

View File

@@ -12,9 +12,11 @@ spec:
schematic:
cue:
template: |
import "vela/kube"
import "encoding/base64"
import "encoding/json"
import (
"vela/kube"
"encoding/base64"
"encoding/json"
)
secret: {
data: *parameter.data | {}

View File

@@ -15,8 +15,10 @@ spec:
schematic:
cue:
template: |
import "strconv"
import "strings"
import (
"strconv"
"strings"
)
outputs: service: {
apiVersion: "v1"
@@ -94,17 +96,21 @@ spec:
stage: PostDispatch
status:
customStatus: |-
service: context.outputs.service
message: *"" | string
service: context.outputs.service
if service.spec.type == "ClusterIP" {
message: "ClusterIP: \(service.spec.clusterIP)"
}
if service.spec.type == "LoadBalancer" {
status: service.status
isHealth: *false | bool
message: *"ExternalIP: Pending" | string
if status != _|_ if status.loadBalancer != _|_ if status.loadBalancer.ingress != _|_ if len(status.loadBalancer.ingress) > 0 if status.loadBalancer.ingress[0].ip != _|_ {
isHealth: true
}
if !isHealth {
message: "ExternalIP: Pending"
}
if isHealth {
message: "ExternalIP: \(status.loadBalancer.ingress[0].ip)"
}
}

View File

@@ -17,7 +17,6 @@ spec:
template: |
import "strconv"
let nameSuffix = {
if parameter.name != _|_ {"-" + parameter.name}
if parameter.name == _|_ {""}
@@ -161,17 +160,13 @@ spec:
if parameter.name == _|_ { "" }
}
let ingressMetaName = context.name + nameSuffix
let igList = [for i in context.outputs if (i.kind == "Ingress") && (i.metadata.name == ingressMetaName) {i}]
ig: *_|_ | _
if len(igList) > 0 {
ig: igList[0]
}
let ig = [for i in context.outputs if (i.kind == "Ingress") && (i.metadata.name == ingressMetaName) {i}][0]
igs: *{} | {}
if ig != _|_ if ig.status != _|_ if ig.status.loadbalancer != _|_ if len(ig.status.loadbalancer.ingress) > 0 {
if ig != _|_ if ig.status != _|_ if ig.status.loadbalancer != _|_ {
igs: ig.status.loadbalancer.ingress[0]
}
igr: *{} | {}
if ig != _|_ if ig.spec != _|_ if len(ig.spec.rules) > 0 {
if ig != _|_ if ig.spec != _|_ {
igr: ig.spec.rules[0]
}
if igs == _|_ {

View File

@@ -12,9 +12,11 @@ spec:
schematic:
cue:
template: |
import "vela/kube"
import "vela/util"
import "encoding/base64"
import (
"vela/kube"
"vela/util"
"encoding/base64"
)
output: kube.#Read & {
$params: value: {

View File

@@ -69,16 +69,11 @@ spec:
message: "No loadBalancer found, visiting by using 'vela port-forward " + context.appName + "'\n"
}
if len(igs) > 0 {
let rules = context.outputs.ingress.spec.rules
host: *"" | string
if rules != _|_ if len(rules) > 0 if rules[0].host != _|_ {
host: rules[0].host
}
if igs[0].ip != _|_ {
message: "Visiting URL: " + host + ", IP: " + igs[0].ip
message: "Visiting URL: " + context.outputs.ingress.spec.rules[0].host + ", IP: " + igs[0].ip
}
if igs[0].ip == _|_ {
message: "Visiting URL: " + host
message: "Visiting URL: " + context.outputs.ingress.spec.rules[0].host
}
}
healthPolicy: 'isHealth: len(context.outputs.service.spec.clusterIP) > 0'

View File

@@ -62,16 +62,11 @@ spec:
message: "No loadBalancer found, visiting by using 'vela port-forward " + context.appName + "'\n"
}
if len(igs) > 0 {
let rules = context.outputs.ingress.spec.rules
host: *"" | string
if rules != _|_ if len(rules) > 0 if rules[0].host != _|_ {
host: rules[0].host
}
if igs[0].ip != _|_ {
message: "Visiting URL: " + host + ", IP: " + igs[0].ip
message: "Visiting URL: " + context.outputs.ingress.spec.rules[0].host + ", IP: " + igs[0].ip
}
if igs[0].ip == _|_ {
message: "Visiting URL: " + host
message: "Visiting URL: " + context.outputs.ingress.spec.rules[0].host
}
}
healthPolicy: 'isHealth: len(context.outputs.service.spec.clusterIP) > 0'

View File

@@ -17,8 +17,6 @@ spec:
schematic:
cue:
template: |
import "list"
patch: spec: template: spec: {
// +patchKey=name
containers: [{
@@ -45,10 +43,10 @@ spec:
}
// +patchKey=name
volumeMounts: list.Concat([[{
volumeMounts: [{
name: parameter.mountName
mountPath: parameter.initMountPath
}], parameter.extraVolumeMounts])
}] + parameter.extraVolumeMounts
}]
// +patchKey=name
volumes: [{

View File

@@ -11,12 +11,7 @@ spec:
schematic:
cue:
template: |
output: {
if len(parameter.objects) > 0 {
parameter.objects[0]
}
...
}
output: parameter.objects[0]
outputs: {
for i, v in parameter.objects {

View File

@@ -12,7 +12,9 @@ spec:
schematic:
cue:
template: |
import "vela/config"
import (
"vela/config"
)
output: config.#ListConfig & {
$params: parameter

View File

@@ -19,7 +19,9 @@ spec:
schematic:
cue:
template: |
import "encoding/json"
import (
"encoding/json"
)
outputs: nocalhostService: {
apiVersion: "v1"

View File

@@ -12,12 +12,14 @@ spec:
schematic:
cue:
template: |
import "vela/http"
import "vela/email"
import "vela/kube"
import "vela/util"
import "encoding/base64"
import "encoding/json"
import (
"vela/http"
"vela/email"
"vela/kube"
"vela/util"
"encoding/base64"
"encoding/json"
)
parameter: {
// +usage=Please fulfill its url and message if you want to send Lark messages

View File

@@ -12,7 +12,9 @@ spec:
schematic:
cue:
template: |
import "vela/builtin"
import (
"vela/builtin"
)
parameter: message: string

View File

@@ -49,16 +49,11 @@ spec:
message: "No loadBalancer found, visiting by using 'vela port-forward " + context.appName + " --route'\n"
}
if len(igs) > 0 {
let rules = context.outputs.ingress.spec.rules
host: *"" | string
if rules != _|_ if len(rules) > 0 if rules[0].host != _|_ {
host: rules[0].host
}
if igs[0].ip != _|_ {
message: "Visiting URL: " + host + ", IP: " + igs[0].ip
message: "Visiting URL: " + context.outputs.ingress.spec.rules[0].host + ", IP: " + igs[0].ip
}
if igs[0].ip == _|_ {
message: "Visiting URL: " + host
message: "Visiting URL: " + context.outputs.ingress.spec.rules[0].host
}
}
workloadRefPath: ""

View File

@@ -12,7 +12,9 @@ spec:
schematic:
cue:
template: |
import "vela/config"
import (
"vela/config"
)
output: config.#ReadConfig & {
$params: parameter

View File

@@ -12,7 +12,9 @@ spec:
schematic:
cue:
template: |
import "vela/kube"
import (
"vela/kube"
)
output: kube.#Read & {
$params: {

View File

@@ -13,9 +13,11 @@ spec:
schematic:
cue:
template: |
import "vela/op"
import "vela/http"
import "encoding/json"
import (
"vela/op"
"vela/http"
"encoding/json"
)
req: http.#HTTPDo & {
$params: {

View File

@@ -14,7 +14,9 @@ spec:
schematic:
cue:
template: |
import "vela/op"
import (
"vela/op"
)
app: op.#ShareCloudResource & {
env: parameter.env

View File

@@ -11,8 +11,10 @@ spec:
schematic:
cue:
template: |
import "strconv"
import "strings"
import (
"strconv"
"strings"
)
mountsArray: [
if parameter.volumeMounts != _|_ if parameter.volumeMounts.pvc != _|_ for v in parameter.volumeMounts.pvc {

View File

@@ -12,7 +12,9 @@ spec:
schematic:
cue:
template: |
import "vela/builtin"
import (
"vela/builtin"
)
suspend: builtin.#Suspend & {
$params: parameter

View File

@@ -12,9 +12,11 @@ spec:
schematic:
cue:
template: |
import "vela/kube"
import "vela/builtin"
import "vela/util"
import (
"vela/kube"
"vela/builtin"
"vela/util"
)
mountsArray: [
if parameter.storage != _|_ && parameter.storage.secret != _|_ for v in parameter.storage.secret {
@@ -124,6 +126,8 @@ spec:
}
parameter: {
// +usage=Specify the name of the addon.
addonName: string
// +usage=Specify the vela command
command: [...string]
// +usage=Specify the image

View File

@@ -12,11 +12,13 @@ spec:
schematic:
cue:
template: |
import "vela/http"
import "vela/kube"
import "vela/util"
import "encoding/json"
import "encoding/base64"
import (
"vela/http"
"vela/kube"
"vela/util"
"encoding/json"
"encoding/base64"
)
data: {
if parameter.data == _|_ {

View File

@@ -11,8 +11,10 @@ spec:
schematic:
cue:
template: |
import "strconv"
import "strings"
import (
"strconv"
"strings"
)
mountsArray: [
if parameter.volumeMounts != _|_ && parameter.volumeMounts.pvc != _|_ for v in parameter.volumeMounts.pvc {

View File

@@ -1,38 +0,0 @@
/*
Copyright 2025 The KubeVela Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package config
import (
"github.com/spf13/pflag"
standardcontroller "github.com/oam-dev/kubevela/pkg/controller"
)
// AdmissionConfig contains admission control configuration.
type AdmissionConfig struct {
// Fields will be populated based on what standardcontroller.AddAdmissionFlags sets
}
// NewAdmissionConfig creates a new AdmissionConfig with defaults.
func NewAdmissionConfig() *AdmissionConfig {
return &AdmissionConfig{}
}
// AddFlags registers admission configuration flags.
func (c *AdmissionConfig) AddFlags(fs *pflag.FlagSet) {
standardcontroller.AddAdmissionFlags(fs)
}

View File

@@ -1,56 +0,0 @@
/*
Copyright 2025 The KubeVela Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package config
import (
"time"
"github.com/spf13/pflag"
commonconfig "github.com/oam-dev/kubevela/pkg/controller/common"
)
// ApplicationConfig contains application-specific configuration.
type ApplicationConfig struct {
ReSyncPeriod time.Duration
}
// NewApplicationConfig creates a new ApplicationConfig with defaults.
func NewApplicationConfig() *ApplicationConfig {
return &ApplicationConfig{
ReSyncPeriod: commonconfig.ApplicationReSyncPeriod,
}
}
// AddFlags registers application configuration flags.
func (c *ApplicationConfig) AddFlags(fs *pflag.FlagSet) {
fs.DurationVar(&c.ReSyncPeriod,
"application-re-sync-period",
c.ReSyncPeriod,
"Re-sync period for application to re-sync, also known as the state-keep interval.")
}
// SyncToApplicationGlobals syncs the parsed configuration values to application package global variables.
// This should be called after flag parsing to ensure the application controller uses the configured values.
//
// NOTE: This method exists for backward compatibility with legacy code that depends on global
// variables in the commonconfig package. Ideally, configuration should be injected rather than using globals.
//
// The flow is: CLI flags -> ApplicationConfig struct fields -> commonconfig globals (via this method)
func (c *ApplicationConfig) SyncToApplicationGlobals() {
commonconfig.ApplicationReSyncPeriod = c.ReSyncPeriod
}

View File

@@ -1,40 +0,0 @@
/*
Copyright 2025 The KubeVela Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package config
import (
pkgclient "github.com/kubevela/pkg/controller/client"
"github.com/spf13/pflag"
)
// ClientConfig contains controller client configuration.
// This wraps the external package's client configuration flags.
type ClientConfig struct {
// Note: The actual configuration is managed by the pkgclient package
// This is a wrapper to maintain consistency with our config pattern
}
// NewClientConfig creates a new ClientConfig with defaults.
func NewClientConfig() *ClientConfig {
return &ClientConfig{}
}
// AddFlags registers client configuration flags.
// Delegates to the external package's flag registration.
func (c *ClientConfig) AddFlags(fs *pflag.FlagSet) {
pkgclient.AddTimeoutControllerClientFlags(fs)
}

View File

@@ -1,67 +0,0 @@
/*
Copyright 2025 The KubeVela Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package config
import (
"github.com/spf13/pflag"
oamcontroller "github.com/oam-dev/kubevela/pkg/controller/core.oam.dev"
)
// ControllerConfig wraps the oamcontroller.Args configuration.
// While this appears to duplicate the Args struct, it serves as the new home for
// controller flag registration after the AddFlags method was moved here from
// the oamcontroller package during refactoring.
type ControllerConfig struct {
// Embed the existing Args struct to reuse its fields
oamcontroller.Args
}
// NewControllerConfig creates a new ControllerConfig with defaults.
func NewControllerConfig() *ControllerConfig {
return &ControllerConfig{
Args: oamcontroller.Args{
RevisionLimit: 50,
AppRevisionLimit: 10,
DefRevisionLimit: 20,
AutoGenWorkloadDefinition: true,
ConcurrentReconciles: 4,
IgnoreAppWithoutControllerRequirement: false,
IgnoreDefinitionWithoutControllerRequirement: false,
},
}
}
// AddFlags registers controller configuration flags.
// This method was moved here from oamcontroller.Args during refactoring
// to centralize configuration management.
func (c *ControllerConfig) AddFlags(fs *pflag.FlagSet) {
fs.IntVar(&c.RevisionLimit, "revision-limit", c.RevisionLimit,
"RevisionLimit is the maximum number of revisions that will be maintained. The default value is 50.")
fs.IntVar(&c.AppRevisionLimit, "application-revision-limit", c.AppRevisionLimit,
"application-revision-limit is the maximum number of application useless revisions that will be maintained, if the useless revisions exceed this number, older ones will be GCed first.The default value is 10.")
fs.IntVar(&c.DefRevisionLimit, "definition-revision-limit", c.DefRevisionLimit,
"definition-revision-limit is the maximum number of component/trait definition useless revisions that will be maintained, if the useless revisions exceed this number, older ones will be GCed first.The default value is 20.")
fs.BoolVar(&c.AutoGenWorkloadDefinition, "autogen-workload-definition", c.AutoGenWorkloadDefinition,
"Automatic generated workloadDefinition which componentDefinition refers to.")
fs.IntVar(&c.ConcurrentReconciles, "concurrent-reconciles", c.ConcurrentReconciles,
"concurrent-reconciles is the concurrent reconcile number of the controller. The default value is 4")
fs.BoolVar(&c.IgnoreAppWithoutControllerRequirement, "ignore-app-without-controller-version", c.IgnoreAppWithoutControllerRequirement,
"If true, application controller will not process the app without 'app.oam.dev/controller-version-require' annotation")
fs.BoolVar(&c.IgnoreDefinitionWithoutControllerRequirement, "ignore-definition-without-controller-version", c.IgnoreDefinitionWithoutControllerRequirement,
"If true, trait/component/workflowstep definition controller will not process the definition without 'definition.oam.dev/controller-version-require' annotation")
}

View File

@@ -1,61 +0,0 @@
/*
Copyright 2025 The KubeVela Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package config
import (
"github.com/kubevela/pkg/cue/cuex"
"github.com/spf13/pflag"
)
// CUEConfig contains CUE language configuration.
type CUEConfig struct {
EnableExternalPackage bool
EnableExternalPackageWatch bool
}
// NewCUEConfig creates a new CUEConfig with defaults.
func NewCUEConfig() *CUEConfig {
return &CUEConfig{
EnableExternalPackage: cuex.EnableExternalPackageForDefaultCompiler,
EnableExternalPackageWatch: cuex.EnableExternalPackageWatchForDefaultCompiler,
}
}
// AddFlags registers CUE configuration flags.
func (c *CUEConfig) AddFlags(fs *pflag.FlagSet) {
fs.BoolVar(&c.EnableExternalPackage,
"enable-external-package-for-default-compiler",
c.EnableExternalPackage,
"Enable loading third-party CUE packages into the default CUE compiler. When enabled, external CUE packages can be imported and used in CUE templates.")
fs.BoolVar(&c.EnableExternalPackageWatch,
"enable-external-package-watch-for-default-compiler",
c.EnableExternalPackageWatch,
"Enable watching for changes in external CUE packages and automatically reload them when modified. Requires enable-external-package-for-default-compiler to be enabled.")
}
// SyncToCUEGlobals syncs the parsed configuration values to CUE package global variables.
// This should be called after flag parsing to ensure the CUE compiler uses the configured values.
//
// NOTE: This method exists for backward compatibility with legacy code that depends on global
// variables in the cuex package. Ideally, the CUE compiler configuration should be injected
// rather than relying on globals.
//
// The flow is: CLI flags -> CUEConfig struct fields -> cuex globals (via this method)
func (c *CUEConfig) SyncToCUEGlobals() {
cuex.EnableExternalPackageForDefaultCompiler = c.EnableExternalPackage
cuex.EnableExternalPackageWatchForDefaultCompiler = c.EnableExternalPackageWatch
}

View File

@@ -1,40 +0,0 @@
/*
Copyright 2025 The KubeVela Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package config
import (
"github.com/spf13/pflag"
utilfeature "k8s.io/apiserver/pkg/util/feature"
)
// FeatureConfig contains feature gate configuration.
// This wraps the Kubernetes feature gate system.
type FeatureConfig struct {
// Note: The actual configuration is managed by the utilfeature package
// This is a wrapper to maintain consistency with our config pattern
}
// NewFeatureConfig creates a new FeatureConfig with defaults.
func NewFeatureConfig() *FeatureConfig {
return &FeatureConfig{}
}
// AddFlags registers feature gate configuration flags.
// Delegates to the Kubernetes feature gate system.
func (c *FeatureConfig) AddFlags(fs *pflag.FlagSet) {
utilfeature.DefaultMutableFeatureGate.AddFlag(fs)
}

View File

@@ -1,42 +0,0 @@
/*
Copyright 2025 The KubeVela Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package config
import (
utillog "github.com/kubevela/pkg/util/log"
"github.com/spf13/pflag"
)
// KLogConfig contains klog configuration.
// This wraps the Kubernetes logging configuration.
type KLogConfig struct {
// Reference to observability config for log settings
observability *ObservabilityConfig
}
// NewKLogConfig creates a new KLogConfig.
func NewKLogConfig(observability *ObservabilityConfig) *KLogConfig {
return &KLogConfig{
observability: observability,
}
}
// AddFlags registers klog configuration flags.
func (c *KLogConfig) AddFlags(fs *pflag.FlagSet) {
// Add base klog flags
utillog.AddFlags(fs)
}

View File

@@ -1,49 +0,0 @@
/*
Copyright 2025 The KubeVela Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package config
import (
"time"
"github.com/spf13/pflag"
)
// KubernetesConfig contains Kubernetes API client configuration.
type KubernetesConfig struct {
QPS float64
Burst int
InformerSyncPeriod time.Duration
}
// NewKubernetesConfig creates a new KubernetesConfig with defaults.
func NewKubernetesConfig() *KubernetesConfig {
return &KubernetesConfig{
QPS: 50,
Burst: 100,
InformerSyncPeriod: 10 * time.Hour,
}
}
// AddFlags registers Kubernetes configuration flags.
func (c *KubernetesConfig) AddFlags(fs *pflag.FlagSet) {
fs.Float64Var(&c.QPS, "kube-api-qps", c.QPS,
"the qps for reconcile clients. Low qps may lead to low throughput. High qps may give stress to api-server. Raise this value if concurrent-reconciles is set to be high.")
fs.IntVar(&c.Burst, "kube-api-burst", c.Burst,
"the burst for reconcile clients. Recommend setting it qps*2.")
fs.DurationVar(&c.InformerSyncPeriod, "informer-sync-period", c.InformerSyncPeriod,
"The re-sync period for informer in controller-runtime. This is a system-level configuration.")
}

View File

@@ -1,53 +0,0 @@
/*
Copyright 2025 The KubeVela Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package config
import (
"time"
pkgmulticluster "github.com/kubevela/pkg/multicluster"
"github.com/spf13/pflag"
)
// MultiClusterConfig contains multi-cluster configuration.
type MultiClusterConfig struct {
EnableClusterGateway bool
EnableClusterMetrics bool
ClusterMetricsInterval time.Duration
}
// NewMultiClusterConfig creates a new MultiClusterConfig with defaults.
func NewMultiClusterConfig() *MultiClusterConfig {
return &MultiClusterConfig{
EnableClusterGateway: false,
EnableClusterMetrics: false,
ClusterMetricsInterval: 15 * time.Second,
}
}
// AddFlags registers multi-cluster configuration flags.
func (c *MultiClusterConfig) AddFlags(fs *pflag.FlagSet) {
fs.BoolVar(&c.EnableClusterGateway, "enable-cluster-gateway", c.EnableClusterGateway,
"Enable cluster-gateway to use multicluster, disabled by default.")
fs.BoolVar(&c.EnableClusterMetrics, "enable-cluster-metrics", c.EnableClusterMetrics,
"Enable cluster-metrics-management to collect metrics from clusters with cluster-gateway, disabled by default. When this param is enabled, enable-cluster-gateway should be enabled")
fs.DurationVar(&c.ClusterMetricsInterval, "cluster-metrics-interval", c.ClusterMetricsInterval,
"The interval that ClusterMetricsMgr will collect metrics from clusters, default value is 15 seconds.")
// Also register additional multicluster flags from external package
pkgmulticluster.AddFlags(fs)
}

View File

@@ -1,54 +0,0 @@
/*
Copyright 2025 The KubeVela Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package config
import (
"github.com/spf13/pflag"
"github.com/oam-dev/kubevela/pkg/oam"
)
// OAMConfig contains OAM-specific configuration.
type OAMConfig struct {
SystemDefinitionNamespace string
}
// NewOAMConfig creates a new OAMConfig with defaults.
func NewOAMConfig() *OAMConfig {
return &OAMConfig{
SystemDefinitionNamespace: "vela-system",
}
}
// AddFlags registers OAM configuration flags.
func (c *OAMConfig) AddFlags(fs *pflag.FlagSet) {
fs.StringVar(&c.SystemDefinitionNamespace,
"system-definition-namespace",
c.SystemDefinitionNamespace,
"Define the namespace of the system-level definition")
}
// SyncToOAMGlobals syncs the parsed configuration values to OAM package global variables.
// This should be called after flag parsing to ensure the OAM runtime uses the configured values.
//
// NOTE: This method exists for backward compatibility with legacy code that depends on global
// variables in the oam package. Ideally, configuration should be injected rather than using globals.
//
// The flow is: CLI flags -> OAMConfig struct fields -> oam globals (via this method)
func (c *OAMConfig) SyncToOAMGlobals() {
oam.SystemDefinitionNamespace = c.SystemDefinitionNamespace
}

View File

@@ -1,55 +0,0 @@
/*
Copyright 2025 The KubeVela Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package config
import (
"github.com/spf13/pflag"
)
// ObservabilityConfig contains metrics and logging configuration.
type ObservabilityConfig struct {
MetricsAddr string
LogFilePath string
LogFileMaxSize uint64
LogDebug bool
DevLogs bool
}
// NewObservabilityConfig creates a new ObservabilityConfig with defaults.
func NewObservabilityConfig() *ObservabilityConfig {
return &ObservabilityConfig{
MetricsAddr: ":8080",
LogFilePath: "",
LogFileMaxSize: 1024,
LogDebug: false,
DevLogs: false,
}
}
// AddFlags registers observability configuration flags.
func (c *ObservabilityConfig) AddFlags(fs *pflag.FlagSet) {
fs.StringVar(&c.MetricsAddr, "metrics-addr", c.MetricsAddr,
"The address the metric endpoint binds to.")
fs.StringVar(&c.LogFilePath, "log-file-path", c.LogFilePath,
"The file to write logs to.")
fs.Uint64Var(&c.LogFileMaxSize, "log-file-max-size", c.LogFileMaxSize,
"Defines the maximum size a log file can grow to, Unit is megabytes.")
fs.BoolVar(&c.LogDebug, "log-debug", c.LogDebug,
"Enable debug logs for development purpose")
fs.BoolVar(&c.DevLogs, "dev-logs", c.DevLogs,
"Enable ANSI color formatting for console logs (ignored when log-file-path is set)")
}

View File

@@ -1,58 +0,0 @@
/*
Copyright 2025 The KubeVela Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package config
import (
"github.com/spf13/pflag"
standardcontroller "github.com/oam-dev/kubevela/pkg/controller"
commonconfig "github.com/oam-dev/kubevela/pkg/controller/common"
)
// PerformanceConfig contains performance and optimization configuration.
type PerformanceConfig struct {
PerfEnabled bool
}
// NewPerformanceConfig creates a new PerformanceConfig with defaults.
func NewPerformanceConfig() *PerformanceConfig {
return &PerformanceConfig{
PerfEnabled: commonconfig.PerfEnabled,
}
}
// AddFlags registers performance configuration flags.
func (c *PerformanceConfig) AddFlags(fs *pflag.FlagSet) {
fs.BoolVar(&c.PerfEnabled,
"perf-enabled",
c.PerfEnabled,
"Enable performance logging for controllers, disabled by default.")
// Add optimization flags from the standard controller
standardcontroller.AddOptimizeFlags(fs)
}
// SyncToPerformanceGlobals syncs the parsed configuration values to performance package global variables.
// This should be called after flag parsing to ensure the performance monitoring uses the configured values.
//
// NOTE: This method exists for backward compatibility with legacy code that depends on global
// variables in the commonconfig package. Ideally, configuration should be injected rather than using globals.
//
// The flow is: CLI flags -> PerformanceConfig struct fields -> commonconfig globals (via this method)
func (c *PerformanceConfig) SyncToPerformanceGlobals() {
commonconfig.PerfEnabled = c.PerfEnabled
}

View File

@@ -1,40 +0,0 @@
/*
Copyright 2025 The KubeVela Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package config
import (
"github.com/kubevela/pkg/util/profiling"
"github.com/spf13/pflag"
)
// ProfilingConfig contains profiling configuration.
// This wraps the external package's profiling configuration flags.
type ProfilingConfig struct {
// Note: The actual configuration is managed by the profiling package
// This is a wrapper to maintain consistency with our config pattern
}
// NewProfilingConfig creates a new ProfilingConfig with defaults.
func NewProfilingConfig() *ProfilingConfig {
return &ProfilingConfig{}
}
// AddFlags registers profiling configuration flags.
// Delegates to the external package's flag registration.
func (c *ProfilingConfig) AddFlags(fs *pflag.FlagSet) {
profiling.AddFlags(fs)
}

View File

@@ -1,40 +0,0 @@
/*
Copyright 2025 The KubeVela Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package config
import (
ctrlrec "github.com/kubevela/pkg/controller/reconciler"
"github.com/spf13/pflag"
)
// ReconcileConfig contains controller reconciliation configuration.
// This wraps the external package's reconciler configuration flags.
type ReconcileConfig struct {
// Note: The actual configuration is managed by the ctrlrec package
// This is a wrapper to maintain consistency with our config pattern
}
// NewReconcileConfig creates a new ReconcileConfig with defaults.
func NewReconcileConfig() *ReconcileConfig {
return &ReconcileConfig{}
}
// AddFlags registers reconcile configuration flags.
// Delegates to the external package's flag registration.
func (c *ReconcileConfig) AddFlags(fs *pflag.FlagSet) {
ctrlrec.AddFlags(fs)
}

View File

@@ -1,55 +0,0 @@
/*
Copyright 2025 The KubeVela Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package config
import (
"github.com/spf13/pflag"
"github.com/oam-dev/kubevela/pkg/resourcekeeper"
)
// ResourceConfig contains resource management configuration.
type ResourceConfig struct {
MaxDispatchConcurrent int
}
// NewResourceConfig creates a new ResourceConfig with defaults.
func NewResourceConfig() *ResourceConfig {
return &ResourceConfig{
MaxDispatchConcurrent: 10,
}
}
// AddFlags registers resource configuration flags.
func (c *ResourceConfig) AddFlags(fs *pflag.FlagSet) {
fs.IntVar(&c.MaxDispatchConcurrent,
"max-dispatch-concurrent",
c.MaxDispatchConcurrent,
"Set the max dispatch concurrent number, default is 10")
}
// SyncToResourceGlobals syncs the parsed configuration values to resource package global variables.
// This should be called after flag parsing to ensure the resource keeper uses the configured values.
//
// NOTE: This method exists for backward compatibility with legacy code that depends on global
// variables in the resourcekeeper package. The long-term goal should be to refactor to use
// dependency injection rather than globals.
//
// The flow is: CLI flags -> ResourceConfig struct fields -> resourcekeeper globals (via this method)
func (c *ResourceConfig) SyncToResourceGlobals() {
resourcekeeper.MaxDispatchConcurrent = c.MaxDispatchConcurrent
}

View File

@@ -1,65 +0,0 @@
/*
Copyright 2025 The KubeVela Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package config
import (
"time"
"github.com/spf13/pflag"
)
// ServerConfig contains server-level configuration.
type ServerConfig struct {
HealthAddr string
StorageDriver string
EnableLeaderElection bool
LeaderElectionNamespace string
LeaseDuration time.Duration
RenewDeadline time.Duration
RetryPeriod time.Duration
}
// NewServerConfig creates a new ServerConfig with defaults.
func NewServerConfig() *ServerConfig {
return &ServerConfig{
HealthAddr: ":9440",
StorageDriver: "Local",
EnableLeaderElection: false,
LeaderElectionNamespace: "",
LeaseDuration: 15 * time.Second,
RenewDeadline: 10 * time.Second,
RetryPeriod: 2 * time.Second,
}
}
// AddFlags registers server configuration flags.
func (c *ServerConfig) AddFlags(fs *pflag.FlagSet) {
fs.StringVar(&c.HealthAddr, "health-addr", c.HealthAddr,
"The address the health endpoint binds to.")
fs.StringVar(&c.StorageDriver, "storage-driver", c.StorageDriver,
"Application storage driver.")
fs.BoolVar(&c.EnableLeaderElection, "enable-leader-election", c.EnableLeaderElection,
"Enable leader election for controller manager. Enabling this will ensure there is only one active controller manager.")
fs.StringVar(&c.LeaderElectionNamespace, "leader-election-namespace", c.LeaderElectionNamespace,
"Determines the namespace in which the leader election configmap will be created.")
fs.DurationVar(&c.LeaseDuration, "leader-election-lease-duration", c.LeaseDuration,
"The duration that non-leader candidates will wait to force acquire leadership")
fs.DurationVar(&c.RenewDeadline, "leader-election-renew-deadline", c.RenewDeadline,
"The duration that the acting controlplane will retry refreshing leadership before giving up")
fs.DurationVar(&c.RetryPeriod, "leader-election-retry-period", c.RetryPeriod,
"The duration the LeaderElector clients should wait between tries of actions")
}

View File

@@ -1,40 +0,0 @@
/*
Copyright 2025 The KubeVela Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package config
import (
"github.com/kubevela/pkg/controller/sharding"
"github.com/spf13/pflag"
)
// ShardingConfig contains controller sharding configuration.
// This wraps the external package's sharding configuration flags.
type ShardingConfig struct {
// Note: The actual configuration is managed by the sharding package
// This is a wrapper to maintain consistency with our config pattern
}
// NewShardingConfig creates a new ShardingConfig with defaults.
func NewShardingConfig() *ShardingConfig {
return &ShardingConfig{}
}
// AddFlags registers sharding configuration flags.
// Delegates to the external package's flag registration.
func (c *ShardingConfig) AddFlags(fs *pflag.FlagSet) {
sharding.AddFlags(fs)
}

View File

@@ -1,47 +0,0 @@
/*
Copyright 2025 The KubeVela Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package config
import (
"github.com/spf13/pflag"
)
// WebhookConfig contains webhook configuration.
type WebhookConfig struct {
UseWebhook bool
CertDir string
WebhookPort int
}
// NewWebhookConfig creates a new WebhookConfig with defaults.
func NewWebhookConfig() *WebhookConfig {
return &WebhookConfig{
UseWebhook: false,
CertDir: "/k8s-webhook-server/serving-certs",
WebhookPort: 9443,
}
}
// AddFlags registers webhook configuration flags.
func (c *WebhookConfig) AddFlags(fs *pflag.FlagSet) {
fs.BoolVar(&c.UseWebhook, "use-webhook", c.UseWebhook,
"Enable Admission Webhook")
fs.StringVar(&c.CertDir, "webhook-cert-dir", c.CertDir,
"Admission webhook cert/key dir.")
fs.IntVar(&c.WebhookPort, "webhook-port", c.WebhookPort,
"admission webhook listen address")
}

View File

@@ -1,69 +0,0 @@
/*
Copyright 2025 The KubeVela Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package config
import (
"github.com/spf13/pflag"
wfTypes "github.com/kubevela/workflow/pkg/types"
)
// WorkflowConfig contains workflow engine configuration.
type WorkflowConfig struct {
MaxWaitBackoffTime int
MaxFailedBackoffTime int
MaxStepErrorRetryTimes int
}
// NewWorkflowConfig creates a new WorkflowConfig with defaults.
func NewWorkflowConfig() *WorkflowConfig {
return &WorkflowConfig{
MaxWaitBackoffTime: 60,
MaxFailedBackoffTime: 300,
MaxStepErrorRetryTimes: 10,
}
}
// AddFlags registers workflow configuration flags.
func (c *WorkflowConfig) AddFlags(fs *pflag.FlagSet) {
fs.IntVar(&c.MaxWaitBackoffTime,
"max-workflow-wait-backoff-time",
c.MaxWaitBackoffTime,
"Set the max workflow wait backoff time, default is 60")
fs.IntVar(&c.MaxFailedBackoffTime,
"max-workflow-failed-backoff-time",
c.MaxFailedBackoffTime,
"Set the max workflow failed backoff time, default is 300")
fs.IntVar(&c.MaxStepErrorRetryTimes,
"max-workflow-step-error-retry-times",
c.MaxStepErrorRetryTimes,
"Set the max workflow step error retry times, default is 10")
}
// SyncToWorkflowGlobals syncs the parsed configuration values to workflow package global variables.
// This should be called after flag parsing to ensure the workflow engine uses the configured values.
//
// NOTE: This method exists for backward compatibility with legacy code that depends on global
// variables in the wfTypes package. The long-term goal should be to refactor the workflow
// package to accept configuration via dependency injection rather than globals.
//
// The flow is: CLI flags -> WorkflowConfig struct fields -> wfTypes globals (via this method)
func (c *WorkflowConfig) SyncToWorkflowGlobals() {
wfTypes.MaxWorkflowWaitBackoffTime = c.MaxWaitBackoffTime
wfTypes.MaxWorkflowFailedBackoffTime = c.MaxFailedBackoffTime
wfTypes.MaxWorkflowStepErrorRetryTimes = c.MaxStepErrorRetryTimes
}

View File

@@ -17,81 +17,88 @@ limitations under the License.
package options
import (
"strconv"
"time"
"github.com/kubevela/pkg/cue/cuex"
pkgclient "github.com/kubevela/pkg/controller/client"
ctrlrec "github.com/kubevela/pkg/controller/reconciler"
"github.com/kubevela/pkg/controller/sharding"
pkgmulticluster "github.com/kubevela/pkg/multicluster"
utillog "github.com/kubevela/pkg/util/log"
"github.com/kubevela/pkg/util/profiling"
wfTypes "github.com/kubevela/workflow/pkg/types"
utilfeature "k8s.io/apiserver/pkg/util/feature"
cliflag "k8s.io/component-base/cli/flag"
"github.com/oam-dev/kubevela/cmd/core/app/config"
standardcontroller "github.com/oam-dev/kubevela/pkg/controller"
commonconfig "github.com/oam-dev/kubevela/pkg/controller/common"
oamcontroller "github.com/oam-dev/kubevela/pkg/controller/core.oam.dev"
"github.com/oam-dev/kubevela/pkg/oam"
"github.com/oam-dev/kubevela/pkg/resourcekeeper"
)
// CoreOptions contains everything necessary to create and run vela-core
type CoreOptions struct {
// Config modules - clean, well-organized configuration
Server *config.ServerConfig
Webhook *config.WebhookConfig
Observability *config.ObservabilityConfig
Kubernetes *config.KubernetesConfig
MultiCluster *config.MultiClusterConfig
CUE *config.CUEConfig
Application *config.ApplicationConfig
OAM *config.OAMConfig
Performance *config.PerformanceConfig
Workflow *config.WorkflowConfig
Admission *config.AdmissionConfig
Resource *config.ResourceConfig
Client *config.ClientConfig
Reconcile *config.ReconcileConfig
Sharding *config.ShardingConfig
Feature *config.FeatureConfig
Profiling *config.ProfilingConfig
KLog *config.KLogConfig
Controller *config.ControllerConfig
UseWebhook bool
CertDir string
WebhookPort int
MetricsAddr string
EnableLeaderElection bool
LeaderElectionNamespace string
LogFilePath string
LogFileMaxSize uint64
LogDebug bool
DevLogs bool
ControllerArgs *oamcontroller.Args
HealthAddr string
StorageDriver string
InformerSyncPeriod time.Duration
QPS float64
Burst int
LeaseDuration time.Duration
RenewDeadLine time.Duration
RetryPeriod time.Duration
EnableClusterGateway bool
EnableClusterMetrics bool
ClusterMetricsInterval time.Duration
}
// NewCoreOptions creates a new NewVelaCoreOptions object with default parameters
func NewCoreOptions() *CoreOptions {
// Initialize config modules
server := config.NewServerConfig()
webhook := config.NewWebhookConfig()
observability := config.NewObservabilityConfig()
kubernetes := config.NewKubernetesConfig()
multiCluster := config.NewMultiClusterConfig()
cue := config.NewCUEConfig()
application := config.NewApplicationConfig()
oam := config.NewOAMConfig()
performance := config.NewPerformanceConfig()
workflow := config.NewWorkflowConfig()
admission := config.NewAdmissionConfig()
resource := config.NewResourceConfig()
client := config.NewClientConfig()
reconcile := config.NewReconcileConfig()
sharding := config.NewShardingConfig()
feature := config.NewFeatureConfig()
profiling := config.NewProfilingConfig()
klog := config.NewKLogConfig(observability)
controller := config.NewControllerConfig()
s := &CoreOptions{
// Config modules
Server: server,
Webhook: webhook,
Observability: observability,
Kubernetes: kubernetes,
MultiCluster: multiCluster,
CUE: cue,
Application: application,
OAM: oam,
Performance: performance,
Workflow: workflow,
Admission: admission,
Resource: resource,
Client: client,
Reconcile: reconcile,
Sharding: sharding,
Feature: feature,
Profiling: profiling,
KLog: klog,
Controller: controller,
UseWebhook: false,
CertDir: "/k8s-webhook-server/serving-certs",
WebhookPort: 9443,
MetricsAddr: ":8080",
EnableLeaderElection: false,
LeaderElectionNamespace: "",
LogFilePath: "",
LogFileMaxSize: 1024,
LogDebug: false,
DevLogs: false,
ControllerArgs: &oamcontroller.Args{
RevisionLimit: 50,
AppRevisionLimit: 10,
DefRevisionLimit: 20,
AutoGenWorkloadDefinition: true,
ConcurrentReconciles: 4,
IgnoreAppWithoutControllerRequirement: false,
IgnoreDefinitionWithoutControllerRequirement: false,
},
HealthAddr: ":9440",
StorageDriver: "Local",
InformerSyncPeriod: 10 * time.Hour,
QPS: 50,
Burst: 100,
LeaseDuration: 15 * time.Second,
RenewDeadLine: 10 * time.Second,
RetryPeriod: 2 * time.Second,
EnableClusterGateway: false,
EnableClusterMetrics: false,
ClusterMetricsInterval: 15 * time.Second,
}
return s
}
@@ -99,28 +106,75 @@ func NewCoreOptions() *CoreOptions {
func (s *CoreOptions) Flags() cliflag.NamedFlagSets {
fss := cliflag.NamedFlagSets{}
// Use config modules to register flags - clean delegation pattern
s.Server.AddFlags(fss.FlagSet("server"))
s.Webhook.AddFlags(fss.FlagSet("webhook"))
s.Observability.AddFlags(fss.FlagSet("observability"))
s.Kubernetes.AddFlags(fss.FlagSet("kubernetes"))
s.MultiCluster.AddFlags(fss.FlagSet("multicluster"))
s.CUE.AddFlags(fss.FlagSet("cue"))
s.Application.AddFlags(fss.FlagSet("application"))
s.OAM.AddFlags(fss.FlagSet("oam"))
s.Performance.AddFlags(fss.FlagSet("performance"))
s.Admission.AddFlags(fss.FlagSet("admission"))
s.Resource.AddFlags(fss.FlagSet("resource"))
s.Workflow.AddFlags(fss.FlagSet("workflow"))
s.Controller.AddFlags(fss.FlagSet("controller"))
gfs := fss.FlagSet("generic")
gfs.BoolVar(&s.UseWebhook, "use-webhook", s.UseWebhook, "Enable Admission Webhook")
gfs.StringVar(&s.CertDir, "webhook-cert-dir", s.CertDir, "Admission webhook cert/key dir.")
gfs.IntVar(&s.WebhookPort, "webhook-port", s.WebhookPort, "admission webhook listen address")
gfs.StringVar(&s.MetricsAddr, "metrics-addr", s.MetricsAddr, "The address the metric endpoint binds to.")
gfs.BoolVar(&s.EnableLeaderElection, "enable-leader-election", s.EnableLeaderElection,
"Enable leader election for controller manager. Enabling this will ensure there is only one active controller manager.")
gfs.StringVar(&s.LeaderElectionNamespace, "leader-election-namespace", s.LeaderElectionNamespace,
"Determines the namespace in which the leader election configmap will be created.")
gfs.StringVar(&s.LogFilePath, "log-file-path", s.LogFilePath, "The file to write logs to.")
gfs.Uint64Var(&s.LogFileMaxSize, "log-file-max-size", s.LogFileMaxSize, "Defines the maximum size a log file can grow to, Unit is megabytes.")
gfs.BoolVar(&s.LogDebug, "log-debug", s.LogDebug, "Enable debug logs for development purpose")
gfs.BoolVar(&s.DevLogs, "dev-logs", s.DevLogs, "Enable ANSI color formatting for console logs (ignored when log-file-path is set)")
gfs.StringVar(&s.HealthAddr, "health-addr", s.HealthAddr, "The address the health endpoint binds to.")
gfs.DurationVar(&s.InformerSyncPeriod, "informer-sync-period", s.InformerSyncPeriod,
"The re-sync period for informer in controller-runtime. This is a system-level configuration.")
gfs.Float64Var(&s.QPS, "kube-api-qps", s.QPS, "the qps for reconcile clients. Low qps may lead to low throughput. High qps may give stress to api-server. Raise this value if concurrent-reconciles is set to be high.")
gfs.IntVar(&s.Burst, "kube-api-burst", s.Burst, "the burst for reconcile clients. Recommend setting it qps*2.")
gfs.DurationVar(&s.LeaseDuration, "leader-election-lease-duration", s.LeaseDuration,
"The duration that non-leader candidates will wait to force acquire leadership")
gfs.DurationVar(&s.RenewDeadLine, "leader-election-renew-deadline", s.RenewDeadLine,
"The duration that the acting controlplane will retry refreshing leadership before giving up")
gfs.DurationVar(&s.RetryPeriod, "leader-election-retry-period", s.RetryPeriod,
"The duration the LeaderElector clients should wait between tries of actions")
gfs.BoolVar(&s.EnableClusterGateway, "enable-cluster-gateway", s.EnableClusterGateway, "Enable cluster-gateway to use multicluster, disabled by default.")
gfs.BoolVar(&s.EnableClusterMetrics, "enable-cluster-metrics", s.EnableClusterMetrics, "Enable cluster-metrics-management to collect metrics from clusters with cluster-gateway, disabled by default. When this param is enabled, enable-cluster-gateway should be enabled")
gfs.DurationVar(&s.ClusterMetricsInterval, "cluster-metrics-interval", s.ClusterMetricsInterval, "The interval that ClusterMetricsMgr will collect metrics from clusters, default value is 15 seconds.")
gfs.BoolVar(&cuex.EnableExternalPackageForDefaultCompiler, "enable-external-package-for-default-compiler", cuex.EnableExternalPackageForDefaultCompiler, "Enable external package for default compiler")
gfs.BoolVar(&cuex.EnableExternalPackageWatchForDefaultCompiler, "enable-external-package-watch-for-default-compiler", cuex.EnableExternalPackageWatchForDefaultCompiler, "Enable external package watch for default compiler")
// External package configurations (now wrapped in config modules)
s.Client.AddFlags(fss.FlagSet("client"))
s.Reconcile.AddFlags(fss.FlagSet("reconcile"))
s.Sharding.AddFlags(fss.FlagSet("sharding"))
s.Feature.AddFlags(fss.FlagSet("feature"))
s.Profiling.AddFlags(fss.FlagSet("profiling"))
s.KLog.AddFlags(fss.FlagSet("klog"))
s.ControllerArgs.AddFlags(fss.FlagSet("controllerArgs"), s.ControllerArgs)
cfs := fss.FlagSet("commonconfig")
cfs.DurationVar(&commonconfig.ApplicationReSyncPeriod, "application-re-sync-period", commonconfig.ApplicationReSyncPeriod,
"Re-sync period for application to re-sync, also known as the state-keep interval.")
cfs.BoolVar(&commonconfig.PerfEnabled, "perf-enabled", commonconfig.PerfEnabled, "Enable performance logging for controllers, disabled by default.")
ofs := fss.FlagSet("oam")
ofs.StringVar(&oam.SystemDefinitionNamespace, "system-definition-namespace", "vela-system", "define the namespace of the system-level definition")
standardcontroller.AddOptimizeFlags(fss.FlagSet("optimize"))
standardcontroller.AddAdmissionFlags(fss.FlagSet("admission"))
rfs := fss.FlagSet("resourcekeeper")
rfs.IntVar(&resourcekeeper.MaxDispatchConcurrent, "max-dispatch-concurrent", 10, "Set the max dispatch concurrent number, default is 10")
wfs := fss.FlagSet("wfTypes")
wfs.IntVar(&wfTypes.MaxWorkflowWaitBackoffTime, "max-workflow-wait-backoff-time", 60, "Set the max workflow wait backoff time, default is 60")
wfs.IntVar(&wfTypes.MaxWorkflowFailedBackoffTime, "max-workflow-failed-backoff-time", 300, "Set the max workflow failed backoff time, default is 300")
wfs.IntVar(&wfTypes.MaxWorkflowStepErrorRetryTimes, "max-workflow-step-error-retry-times", 10, "Set the max workflow step error retry times, default is 10")
pkgmulticluster.AddFlags(fss.FlagSet("multicluster"))
ctrlrec.AddFlags(fss.FlagSet("controllerreconciles"))
utilfeature.DefaultMutableFeatureGate.AddFlag(fss.FlagSet("featuregate"))
sharding.AddFlags(fss.FlagSet("sharding"))
kfs := fss.FlagSet("klog")
pkgclient.AddTimeoutControllerClientFlags(fss.FlagSet("controllerclient"))
utillog.AddFlags(kfs)
profiling.AddFlags(fss.FlagSet("profiling"))
if s.LogDebug {
_ = kfs.Set("v", strconv.Itoa(int(commonconfig.LogDebug)))
}
if s.LogFilePath != "" {
_ = kfs.Set("logtostderr", "false")
_ = kfs.Set("log_file", s.LogFilePath)
_ = kfs.Set("log_file_max_size", strconv.FormatUint(s.LogFileMaxSize, 10))
}
return fss
}

View File

@@ -20,915 +20,104 @@ import (
"testing"
"time"
"github.com/google/go-cmp/cmp"
"github.com/kubevela/pkg/cue/cuex"
wfTypes "github.com/kubevela/workflow/pkg/types"
"github.com/spf13/pflag"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
commonconfig "github.com/oam-dev/kubevela/pkg/controller/common"
"github.com/oam-dev/kubevela/pkg/oam"
"github.com/oam-dev/kubevela/pkg/resourcekeeper"
oamcontroller "github.com/oam-dev/kubevela/pkg/controller/core.oam.dev"
)
func TestNewCoreOptions_DefaultValues(t *testing.T) {
opt := NewCoreOptions()
// Test Server defaults
assert.Equal(t, ":9440", opt.Server.HealthAddr)
assert.Equal(t, "Local", opt.Server.StorageDriver)
assert.Equal(t, false, opt.Server.EnableLeaderElection)
assert.Equal(t, "", opt.Server.LeaderElectionNamespace)
assert.Equal(t, 15*time.Second, opt.Server.LeaseDuration)
assert.Equal(t, 10*time.Second, opt.Server.RenewDeadline)
assert.Equal(t, 2*time.Second, opt.Server.RetryPeriod)
// Test Webhook defaults
assert.Equal(t, false, opt.Webhook.UseWebhook)
assert.Equal(t, "/k8s-webhook-server/serving-certs", opt.Webhook.CertDir)
assert.Equal(t, 9443, opt.Webhook.WebhookPort)
// Test Observability defaults
assert.Equal(t, ":8080", opt.Observability.MetricsAddr)
assert.Equal(t, false, opt.Observability.LogDebug)
assert.Equal(t, "", opt.Observability.LogFilePath)
assert.Equal(t, uint64(1024), opt.Observability.LogFileMaxSize)
// Test Kubernetes defaults
assert.Equal(t, 10*time.Hour, opt.Kubernetes.InformerSyncPeriod)
assert.Equal(t, float64(50), opt.Kubernetes.QPS)
assert.Equal(t, 100, opt.Kubernetes.Burst)
// Test MultiCluster defaults
assert.Equal(t, false, opt.MultiCluster.EnableClusterGateway)
assert.Equal(t, false, opt.MultiCluster.EnableClusterMetrics)
assert.Equal(t, 15*time.Second, opt.MultiCluster.ClusterMetricsInterval)
// Test CUE defaults
assert.NotNil(t, opt.CUE)
// Test Application defaults
assert.Equal(t, 5*time.Minute, opt.Application.ReSyncPeriod)
// Test OAM defaults
assert.Equal(t, "vela-system", opt.OAM.SystemDefinitionNamespace)
// Test Performance defaults
assert.Equal(t, false, opt.Performance.PerfEnabled)
// Test Controller defaults
assert.Equal(t, 50, opt.Controller.RevisionLimit)
assert.Equal(t, 10, opt.Controller.AppRevisionLimit)
assert.Equal(t, 20, opt.Controller.DefRevisionLimit)
assert.Equal(t, true, opt.Controller.AutoGenWorkloadDefinition)
assert.Equal(t, 4, opt.Controller.ConcurrentReconciles)
assert.Equal(t, false, opt.Controller.IgnoreAppWithoutControllerRequirement)
assert.Equal(t, false, opt.Controller.IgnoreDefinitionWithoutControllerRequirement)
// Test Workflow defaults
assert.Equal(t, 60, opt.Workflow.MaxWaitBackoffTime)
assert.Equal(t, 300, opt.Workflow.MaxFailedBackoffTime)
assert.Equal(t, 10, opt.Workflow.MaxStepErrorRetryTimes)
// Test Resource defaults
assert.Equal(t, 10, opt.Resource.MaxDispatchConcurrent)
// Ensure all config modules are initialized
assert.NotNil(t, opt.Admission)
assert.NotNil(t, opt.Client)
assert.NotNil(t, opt.Reconcile)
assert.NotNil(t, opt.Sharding)
assert.NotNil(t, opt.Feature)
assert.NotNil(t, opt.Profiling)
assert.NotNil(t, opt.KLog)
assert.NotNil(t, opt.Controller)
}
func TestCoreOptions_FlagsCompleteSet(t *testing.T) {
func TestCoreOptions_Flags(t *testing.T) {
fs := pflag.NewFlagSet("test", pflag.ContinueOnError)
opt := NewCoreOptions()
opt := &CoreOptions{
ControllerArgs: &oamcontroller.Args{},
}
for _, f := range opt.Flags().FlagSets {
fs.AddFlagSet(f)
}
args := []string{
// Server flags
"--health-addr=/healthz",
"--storage-driver=MongoDB",
"--application-re-sync-period=5s",
"--cluster-metrics-interval=5s",
"--enable-cluster-gateway=true",
"--enable-cluster-metrics=true",
"--enable-leader-election=true",
"--leader-election-namespace=test-namespace",
"--health-addr=/healthz",
"--informer-sync-period=3s",
"--kube-api-burst=500",
"--kube-api-qps=200",
"--leader-election-lease-duration=3s",
"--leader-election-namespace=test-namespace",
"--leader-election-renew-deadline=5s",
"--leader-election-retry-period=3s",
// Webhook flags
"--log-debug=true",
"--log-file-max-size=50",
"--log-file-path=/path/to/log",
"--max-dispatch-concurrent=5",
"--max-workflow-failed-backoff-time=30",
"--max-workflow-step-error-retry-times=5",
"--max-workflow-wait-backoff-time=5",
"--metrics-addr=/metrics",
"--perf-enabled=true",
"--use-webhook=true",
"--webhook-cert-dir=/path/to/cert",
"--webhook-port=8080",
// Observability flags
"--metrics-addr=/metrics",
"--log-debug=true",
"--log-file-path=/path/to/log",
"--log-file-max-size=50",
// Kubernetes flags
"--informer-sync-period=3s",
"--kube-api-qps=200",
"--kube-api-burst=500",
// MultiCluster flags
"--enable-cluster-gateway=true",
"--enable-cluster-metrics=true",
"--cluster-metrics-interval=5s",
// CUE flags
"--enable-external-package-for-default-compiler=true",
"--enable-external-package-watch-for-default-compiler=true",
// Application flags
"--application-re-sync-period=5s",
// OAM flags
"--system-definition-namespace=custom-namespace",
// Performance flags
"--perf-enabled=true",
// Controller flags
"--revision-limit=100",
"--application-revision-limit=20",
"--definition-revision-limit=30",
"--autogen-workload-definition=false",
"--concurrent-reconciles=8",
"--ignore-app-without-controller-version=true",
"--ignore-definition-without-controller-version=true",
// Workflow flags
"--max-workflow-wait-backoff-time=30",
"--max-workflow-failed-backoff-time=150",
"--max-workflow-step-error-retry-times=5",
// Resource flags
"--max-dispatch-concurrent=5",
}
err := fs.Parse(args)
require.NoError(t, err)
if err := fs.Parse(args); err != nil {
t.Errorf("Failed to parse args: %v", err)
}
// Verify Server flags
assert.Equal(t, "/healthz", opt.Server.HealthAddr)
assert.Equal(t, "MongoDB", opt.Server.StorageDriver)
assert.Equal(t, true, opt.Server.EnableLeaderElection)
assert.Equal(t, "test-namespace", opt.Server.LeaderElectionNamespace)
assert.Equal(t, 3*time.Second, opt.Server.LeaseDuration)
assert.Equal(t, 5*time.Second, opt.Server.RenewDeadline)
assert.Equal(t, 3*time.Second, opt.Server.RetryPeriod)
expected := &CoreOptions{
UseWebhook: true,
CertDir: "/path/to/cert",
WebhookPort: 8080,
MetricsAddr: "/metrics",
EnableLeaderElection: true,
LeaderElectionNamespace: "test-namespace",
LogFilePath: "/path/to/log",
LogFileMaxSize: 50,
LogDebug: true,
ControllerArgs: &oamcontroller.Args{},
HealthAddr: "/healthz",
StorageDriver: "",
InformerSyncPeriod: 3 * time.Second,
QPS: 200,
Burst: 500,
LeaseDuration: 3 * time.Second,
RenewDeadLine: 5 * time.Second,
RetryPeriod: 3 * time.Second,
EnableClusterGateway: true,
EnableClusterMetrics: true,
ClusterMetricsInterval: 5 * time.Second,
}
// Verify Webhook flags
assert.Equal(t, true, opt.Webhook.UseWebhook)
assert.Equal(t, "/path/to/cert", opt.Webhook.CertDir)
assert.Equal(t, 8080, opt.Webhook.WebhookPort)
// Verify Observability flags
assert.Equal(t, "/metrics", opt.Observability.MetricsAddr)
assert.Equal(t, true, opt.Observability.LogDebug)
assert.Equal(t, "/path/to/log", opt.Observability.LogFilePath)
assert.Equal(t, uint64(50), opt.Observability.LogFileMaxSize)
// Verify Kubernetes flags
assert.Equal(t, 3*time.Second, opt.Kubernetes.InformerSyncPeriod)
assert.Equal(t, float64(200), opt.Kubernetes.QPS)
assert.Equal(t, 500, opt.Kubernetes.Burst)
// Verify MultiCluster flags
assert.Equal(t, true, opt.MultiCluster.EnableClusterGateway)
assert.Equal(t, true, opt.MultiCluster.EnableClusterMetrics)
assert.Equal(t, 5*time.Second, opt.MultiCluster.ClusterMetricsInterval)
// Verify CUE flags
assert.True(t, opt.CUE.EnableExternalPackage)
assert.True(t, opt.CUE.EnableExternalPackageWatch)
// Verify Application flags
assert.Equal(t, 5*time.Second, opt.Application.ReSyncPeriod)
// Verify OAM flags
assert.Equal(t, "custom-namespace", opt.OAM.SystemDefinitionNamespace)
// Verify Performance flags
assert.Equal(t, true, opt.Performance.PerfEnabled)
// Verify Controller flags
assert.Equal(t, 100, opt.Controller.RevisionLimit)
assert.Equal(t, 20, opt.Controller.AppRevisionLimit)
assert.Equal(t, 30, opt.Controller.DefRevisionLimit)
assert.Equal(t, false, opt.Controller.AutoGenWorkloadDefinition)
assert.Equal(t, 8, opt.Controller.ConcurrentReconciles)
assert.Equal(t, true, opt.Controller.IgnoreAppWithoutControllerRequirement)
assert.Equal(t, true, opt.Controller.IgnoreDefinitionWithoutControllerRequirement)
// Verify Workflow flags
assert.Equal(t, 30, opt.Workflow.MaxWaitBackoffTime)
assert.Equal(t, 150, opt.Workflow.MaxFailedBackoffTime)
assert.Equal(t, 5, opt.Workflow.MaxStepErrorRetryTimes)
// Verify Resource flags
assert.Equal(t, 5, opt.Resource.MaxDispatchConcurrent)
if !cmp.Equal(opt, expected, cmp.AllowUnexported(CoreOptions{})) {
t.Errorf("Flags() diff: %v", cmp.Diff(opt, expected, cmp.AllowUnexported(CoreOptions{})))
}
}
func TestCuexOptions_SyncToGlobals(t *testing.T) {
// Reset globals
func TestCuexOptions_Flags(t *testing.T) {
pflag.NewFlagSet("test", pflag.ContinueOnError)
cuex.EnableExternalPackageForDefaultCompiler = false
cuex.EnableExternalPackageWatchForDefaultCompiler = false
opts := NewCoreOptions()
opts := &CoreOptions{
ControllerArgs: &oamcontroller.Args{},
}
fss := opts.Flags()
args := []string{
"--enable-external-package-for-default-compiler=true",
"--enable-external-package-watch-for-default-compiler=true",
}
err := fss.FlagSet("generic").Parse(args)
if err != nil {
return
}
err := fss.FlagSet("cue").Parse(args)
require.NoError(t, err)
// Before sync, globals should still be false
assert.False(t, cuex.EnableExternalPackageForDefaultCompiler)
assert.False(t, cuex.EnableExternalPackageWatchForDefaultCompiler)
// After sync, globals should be updated
opts.CUE.SyncToCUEGlobals()
assert.True(t, cuex.EnableExternalPackageForDefaultCompiler)
assert.True(t, cuex.EnableExternalPackageWatchForDefaultCompiler)
}
func TestWorkflowOptions_SyncToGlobals(t *testing.T) {
// Store original values
origWait := wfTypes.MaxWorkflowWaitBackoffTime
origFailed := wfTypes.MaxWorkflowFailedBackoffTime
origRetry := wfTypes.MaxWorkflowStepErrorRetryTimes
// Restore after test
defer func() {
wfTypes.MaxWorkflowWaitBackoffTime = origWait
wfTypes.MaxWorkflowFailedBackoffTime = origFailed
wfTypes.MaxWorkflowStepErrorRetryTimes = origRetry
}()
opts := NewCoreOptions()
fss := opts.Flags()
args := []string{
"--max-workflow-wait-backoff-time=120",
"--max-workflow-failed-backoff-time=600",
"--max-workflow-step-error-retry-times=20",
}
err := fss.FlagSet("workflow").Parse(args)
require.NoError(t, err)
// Verify struct fields are updated
assert.Equal(t, 120, opts.Workflow.MaxWaitBackoffTime)
assert.Equal(t, 600, opts.Workflow.MaxFailedBackoffTime)
assert.Equal(t, 20, opts.Workflow.MaxStepErrorRetryTimes)
// After sync, globals should be updated
opts.Workflow.SyncToWorkflowGlobals()
assert.Equal(t, 120, wfTypes.MaxWorkflowWaitBackoffTime)
assert.Equal(t, 600, wfTypes.MaxWorkflowFailedBackoffTime)
assert.Equal(t, 20, wfTypes.MaxWorkflowStepErrorRetryTimes)
}
func TestOAMOptions_SyncToGlobals(t *testing.T) {
// Store original value
origNamespace := oam.SystemDefinitionNamespace
// Restore after test
defer func() {
oam.SystemDefinitionNamespace = origNamespace
}()
opts := NewCoreOptions()
fss := opts.Flags()
args := []string{
"--system-definition-namespace=custom-system",
}
err := fss.FlagSet("oam").Parse(args)
require.NoError(t, err)
// Verify struct field is updated
assert.Equal(t, "custom-system", opts.OAM.SystemDefinitionNamespace)
// After sync, global should be updated
opts.OAM.SyncToOAMGlobals()
assert.Equal(t, "custom-system", oam.SystemDefinitionNamespace)
}
func TestPerformanceOptions_SyncToGlobals(t *testing.T) {
// Store original value
origPerf := commonconfig.PerfEnabled
// Restore after test
defer func() {
commonconfig.PerfEnabled = origPerf
}()
opts := NewCoreOptions()
fss := opts.Flags()
args := []string{
"--perf-enabled=true",
}
err := fss.FlagSet("performance").Parse(args)
require.NoError(t, err)
// Verify struct field is updated
assert.Equal(t, true, opts.Performance.PerfEnabled)
// After sync, global should be updated
opts.Performance.SyncToPerformanceGlobals()
assert.True(t, commonconfig.PerfEnabled)
}
func TestApplicationOptions_SyncToGlobals(t *testing.T) {
// Store original value
origPeriod := commonconfig.ApplicationReSyncPeriod
// Restore after test
defer func() {
commonconfig.ApplicationReSyncPeriod = origPeriod
}()
opts := NewCoreOptions()
fss := opts.Flags()
args := []string{
"--application-re-sync-period=10m",
}
err := fss.FlagSet("application").Parse(args)
require.NoError(t, err)
// Verify struct field is updated
assert.Equal(t, 10*time.Minute, opts.Application.ReSyncPeriod)
// After sync, global should be updated
opts.Application.SyncToApplicationGlobals()
assert.Equal(t, 10*time.Minute, commonconfig.ApplicationReSyncPeriod)
}
func TestResourceOptions_SyncToGlobals(t *testing.T) {
// Store original value
origDispatch := resourcekeeper.MaxDispatchConcurrent
// Restore after test
defer func() {
resourcekeeper.MaxDispatchConcurrent = origDispatch
}()
opts := NewCoreOptions()
fss := opts.Flags()
args := []string{
"--max-dispatch-concurrent=25",
}
err := fss.FlagSet("resource").Parse(args)
require.NoError(t, err)
// Verify struct field is updated
assert.Equal(t, 25, opts.Resource.MaxDispatchConcurrent)
// After sync, global should be updated
opts.Resource.SyncToResourceGlobals()
assert.Equal(t, 25, resourcekeeper.MaxDispatchConcurrent)
}
func TestCoreOptions_InvalidValues(t *testing.T) {
tests := []struct {
name string
args []string
expectError bool
errorMsg string
}{
{
name: "invalid boolean value",
args: []string{
"--enable-leader-election=notabool",
},
expectError: true,
errorMsg: "invalid argument",
},
{
name: "invalid duration value",
args: []string{
"--leader-election-lease-duration=notaduration",
},
expectError: true,
errorMsg: "invalid argument",
},
{
name: "invalid int value",
args: []string{
"--webhook-port=notanint",
},
expectError: true,
errorMsg: "invalid argument",
},
{
name: "invalid float value",
args: []string{
"--kube-api-qps=notafloat",
},
expectError: true,
errorMsg: "invalid argument",
},
{
name: "invalid uint64 value",
args: []string{
"--log-file-max-size=-100",
},
expectError: true,
errorMsg: "invalid argument",
},
{
name: "unknown flag",
args: []string{
"--unknown-flag=value",
},
expectError: true,
errorMsg: "unknown flag",
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
fs := pflag.NewFlagSet("test", pflag.ContinueOnError)
opt := NewCoreOptions()
for _, f := range opt.Flags().FlagSets {
fs.AddFlagSet(f)
}
err := fs.Parse(tt.args)
if tt.expectError {
assert.Error(t, err)
assert.Contains(t, err.Error(), tt.errorMsg)
} else {
assert.NoError(t, err)
}
})
}
}
func TestCoreOptions_PartialConfiguration(t *testing.T) {
// Test that partial configuration works correctly
// and doesn't override other defaults
fs := pflag.NewFlagSet("test", pflag.ContinueOnError)
opt := NewCoreOptions()
for _, f := range opt.Flags().FlagSets {
fs.AddFlagSet(f)
}
// Only set a few flags
args := []string{
"--enable-leader-election=true",
"--log-debug=true",
"--perf-enabled=true",
}
err := fs.Parse(args)
require.NoError(t, err)
// Check that specified flags are updated
assert.Equal(t, true, opt.Server.EnableLeaderElection)
assert.Equal(t, true, opt.Observability.LogDebug)
assert.Equal(t, true, opt.Performance.PerfEnabled)
// Check that unspecified flags retain defaults
assert.Equal(t, ":9440", opt.Server.HealthAddr)
assert.Equal(t, "Local", opt.Server.StorageDriver)
assert.Equal(t, false, opt.Webhook.UseWebhook)
assert.Equal(t, ":8080", opt.Observability.MetricsAddr)
assert.Equal(t, 10*time.Hour, opt.Kubernetes.InformerSyncPeriod)
assert.Equal(t, 10, opt.Resource.MaxDispatchConcurrent)
}
func TestCoreOptions_FlagSetsOrganization(t *testing.T) {
opt := NewCoreOptions()
fss := opt.Flags()
// Verify that all expected flag sets are created
expectedFlagSets := []string{
"server",
"webhook",
"observability",
"kubernetes",
"multicluster",
"cue",
"application",
"oam",
"performance",
"admission",
"resource",
"workflow",
"controller",
"client",
"reconcile",
"sharding",
"feature",
"profiling",
"klog",
}
for _, name := range expectedFlagSets {
fs := fss.FlagSet(name)
assert.NotNil(t, fs, "FlagSet %s should exist", name)
}
}
func TestCoreOptions_FlagHelp(t *testing.T) {
opt := NewCoreOptions()
fss := opt.Flags()
// Test that flags have proper help messages
serverFS := fss.FlagSet("server")
flag := serverFS.Lookup("enable-leader-election")
assert.NotNil(t, flag)
assert.Contains(t, flag.Usage, "Enable leader election")
webhookFS := fss.FlagSet("webhook")
flag = webhookFS.Lookup("use-webhook")
assert.NotNil(t, flag)
assert.Contains(t, flag.Usage, "Enable Admission Webhook")
obsFS := fss.FlagSet("observability")
flag = obsFS.Lookup("log-debug")
assert.NotNil(t, flag)
assert.Contains(t, flag.Usage, "Enable debug logs")
}
func TestCoreOptions_MultipleSyncCalls(t *testing.T) {
// Store original values
origCUEExternal := cuex.EnableExternalPackageForDefaultCompiler
origCUEWatch := cuex.EnableExternalPackageWatchForDefaultCompiler
origWait := wfTypes.MaxWorkflowWaitBackoffTime
origDispatch := resourcekeeper.MaxDispatchConcurrent
origOAMNamespace := oam.SystemDefinitionNamespace
origAppPeriod := commonconfig.ApplicationReSyncPeriod
origPerf := commonconfig.PerfEnabled
// Restore after test
defer func() {
cuex.EnableExternalPackageForDefaultCompiler = origCUEExternal
cuex.EnableExternalPackageWatchForDefaultCompiler = origCUEWatch
wfTypes.MaxWorkflowWaitBackoffTime = origWait
resourcekeeper.MaxDispatchConcurrent = origDispatch
oam.SystemDefinitionNamespace = origOAMNamespace
commonconfig.ApplicationReSyncPeriod = origAppPeriod
commonconfig.PerfEnabled = origPerf
}()
// Test that calling sync multiple times doesn't cause issues
opts := NewCoreOptions()
// Set some values
opts.CUE.EnableExternalPackage = true
opts.CUE.EnableExternalPackageWatch = false
opts.Workflow.MaxWaitBackoffTime = 100
opts.Resource.MaxDispatchConcurrent = 20
opts.OAM.SystemDefinitionNamespace = "test-system"
opts.Application.ReSyncPeriod = 15 * time.Minute
opts.Performance.PerfEnabled = true
// Call sync multiple times
opts.CUE.SyncToCUEGlobals()
opts.CUE.SyncToCUEGlobals()
opts.Workflow.SyncToWorkflowGlobals()
opts.Workflow.SyncToWorkflowGlobals()
opts.Resource.SyncToResourceGlobals()
opts.Resource.SyncToResourceGlobals()
opts.OAM.SyncToOAMGlobals()
opts.OAM.SyncToOAMGlobals()
opts.Application.SyncToApplicationGlobals()
opts.Application.SyncToApplicationGlobals()
opts.Performance.SyncToPerformanceGlobals()
opts.Performance.SyncToPerformanceGlobals()
// Verify values are still correct
assert.True(t, cuex.EnableExternalPackageForDefaultCompiler)
assert.False(t, cuex.EnableExternalPackageWatchForDefaultCompiler)
assert.Equal(t, 100, wfTypes.MaxWorkflowWaitBackoffTime)
assert.Equal(t, 20, resourcekeeper.MaxDispatchConcurrent)
assert.Equal(t, "test-system", oam.SystemDefinitionNamespace)
assert.Equal(t, 15*time.Minute, commonconfig.ApplicationReSyncPeriod)
assert.True(t, commonconfig.PerfEnabled)
}
func TestCoreOptions_SpecialCharactersInStrings(t *testing.T) {
fs := pflag.NewFlagSet("test", pflag.ContinueOnError)
opt := NewCoreOptions()
for _, f := range opt.Flags().FlagSets {
fs.AddFlagSet(f)
}
// Test with special characters and spaces in paths
args := []string{
`--webhook-cert-dir=/path/with spaces/and-special!@#$%chars`,
`--log-file-path=/var/log/kubevela/日本語/логи.log`,
`--health-addr=[::1]:8080`,
`--metrics-addr=0.0.0.0:9090`,
}
err := fs.Parse(args)
require.NoError(t, err)
assert.Equal(t, `/path/with spaces/and-special!@#$%chars`, opt.Webhook.CertDir)
assert.Equal(t, `/var/log/kubevela/日本語/логи.log`, opt.Observability.LogFilePath)
assert.Equal(t, `[::1]:8080`, opt.Server.HealthAddr)
assert.Equal(t, `0.0.0.0:9090`, opt.Observability.MetricsAddr)
}
func TestCoreOptions_ConcurrentAccess(t *testing.T) {
// Test that the options can be accessed concurrently safely
opt := NewCoreOptions()
// Set some values
opt.Server.EnableLeaderElection = true
opt.Workflow.MaxWaitBackoffTime = 100
opt.Resource.MaxDispatchConcurrent = 20
// Simulate concurrent access
done := make(chan bool, 3)
go func() {
for i := 0; i < 100; i++ {
_ = opt.Server.EnableLeaderElection
}
done <- true
}()
go func() {
for i := 0; i < 100; i++ {
_ = opt.Workflow.MaxWaitBackoffTime
}
done <- true
}()
go func() {
for i := 0; i < 100; i++ {
_ = opt.Resource.MaxDispatchConcurrent
}
done <- true
}()
// Wait for all goroutines
for i := 0; i < 3; i++ {
<-done
}
}
func TestCoreOptions_NilPointerSafety(t *testing.T) {
// Ensure NewCoreOptions never returns nil pointers
opt := NewCoreOptions()
// All config modules should be non-nil
assert.NotNil(t, opt.Server)
assert.NotNil(t, opt.Webhook)
assert.NotNil(t, opt.Observability)
assert.NotNil(t, opt.Kubernetes)
assert.NotNil(t, opt.MultiCluster)
assert.NotNil(t, opt.CUE)
assert.NotNil(t, opt.Application)
assert.NotNil(t, opt.OAM)
assert.NotNil(t, opt.Performance)
assert.NotNil(t, opt.Workflow)
assert.NotNil(t, opt.Admission)
assert.NotNil(t, opt.Resource)
assert.NotNil(t, opt.Client)
assert.NotNil(t, opt.Reconcile)
assert.NotNil(t, opt.Sharding)
assert.NotNil(t, opt.Feature)
assert.NotNil(t, opt.Profiling)
assert.NotNil(t, opt.KLog)
assert.NotNil(t, opt.Controller)
}
func TestCoreOptions_FlagPrecedence(t *testing.T) {
// Test that later flags override earlier ones
fs := pflag.NewFlagSet("test", pflag.ContinueOnError)
opt := NewCoreOptions()
for _, f := range opt.Flags().FlagSets {
fs.AddFlagSet(f)
}
// Parse with one value, then parse again with different value
args1 := []string{"--webhook-port=8080"}
err := fs.Parse(args1)
require.NoError(t, err)
assert.Equal(t, 8080, opt.Webhook.WebhookPort)
// Reset and parse with different value
fs = pflag.NewFlagSet("test", pflag.ContinueOnError)
opt = NewCoreOptions()
for _, f := range opt.Flags().FlagSets {
fs.AddFlagSet(f)
}
args2 := []string{"--webhook-port=9090"}
err = fs.Parse(args2)
require.NoError(t, err)
assert.Equal(t, 9090, opt.Webhook.WebhookPort)
}
func TestCoreOptions_AllConfigModulesHaveFlags(t *testing.T) {
// Ensure every config module registers at least one flag
opt := NewCoreOptions()
fss := opt.Flags()
configsWithExpectedFlags := map[string][]string{
"server": {"health-addr", "storage-driver", "enable-leader-election"},
"webhook": {"use-webhook", "webhook-cert-dir", "webhook-port"},
"observability": {"metrics-addr", "log-debug", "log-file-path"},
"kubernetes": {"informer-sync-period", "kube-api-qps", "kube-api-burst"},
"multicluster": {"enable-cluster-gateway", "enable-cluster-metrics"},
"cue": {"enable-external-package-for-default-compiler"},
"application": {"application-re-sync-period"},
"oam": {"system-definition-namespace"},
"controller": {"revision-limit", "application-revision-limit", "definition-revision-limit"},
"performance": {"perf-enabled"},
"workflow": {"max-workflow-wait-backoff-time"},
"resource": {"max-dispatch-concurrent"},
}
for setName, expectedFlags := range configsWithExpectedFlags {
fs := fss.FlagSet(setName)
assert.NotNil(t, fs, "FlagSet %s should exist", setName)
for _, flagName := range expectedFlags {
flag := fs.Lookup(flagName)
assert.NotNil(t, flag, "Flag %s should exist in flagset %s", flagName, setName)
}
}
}
func TestCoreOptions_CLIOverridesWork(t *testing.T) {
// This test verifies that CLI flags correctly override default values
// and that the sync methods properly propagate these values to globals
// Store original globals to restore after test
origWait := wfTypes.MaxWorkflowWaitBackoffTime
origFailed := wfTypes.MaxWorkflowFailedBackoffTime
origRetry := wfTypes.MaxWorkflowStepErrorRetryTimes
origDispatch := resourcekeeper.MaxDispatchConcurrent
origOAMNamespace := oam.SystemDefinitionNamespace
origAppPeriod := commonconfig.ApplicationReSyncPeriod
origPerf := commonconfig.PerfEnabled
origCUEExternal := cuex.EnableExternalPackageForDefaultCompiler
origCUEWatch := cuex.EnableExternalPackageWatchForDefaultCompiler
defer func() {
wfTypes.MaxWorkflowWaitBackoffTime = origWait
wfTypes.MaxWorkflowFailedBackoffTime = origFailed
wfTypes.MaxWorkflowStepErrorRetryTimes = origRetry
resourcekeeper.MaxDispatchConcurrent = origDispatch
oam.SystemDefinitionNamespace = origOAMNamespace
commonconfig.ApplicationReSyncPeriod = origAppPeriod
commonconfig.PerfEnabled = origPerf
cuex.EnableExternalPackageForDefaultCompiler = origCUEExternal
cuex.EnableExternalPackageWatchForDefaultCompiler = origCUEWatch
}()
opt := NewCoreOptions()
fs := pflag.NewFlagSet("test", pflag.ContinueOnError)
for _, f := range opt.Flags().FlagSets {
fs.AddFlagSet(f)
}
// Verify defaults first
assert.Equal(t, 60, opt.Workflow.MaxWaitBackoffTime, "Default should be 60")
assert.Equal(t, 300, opt.Workflow.MaxFailedBackoffTime, "Default should be 300")
assert.Equal(t, 10, opt.Workflow.MaxStepErrorRetryTimes, "Default should be 10")
assert.Equal(t, 10, opt.Resource.MaxDispatchConcurrent, "Default should be 10")
assert.Equal(t, "vela-system", opt.OAM.SystemDefinitionNamespace, "Default should be vela-system")
assert.Equal(t, false, opt.Performance.PerfEnabled, "Default should be false")
// Parse CLI args with overrides
args := []string{
"--max-workflow-wait-backoff-time=999",
"--max-workflow-failed-backoff-time=888",
"--max-workflow-step-error-retry-times=77",
"--max-dispatch-concurrent=66",
"--system-definition-namespace=custom-ns",
"--application-re-sync-period=20m",
"--perf-enabled=true",
"--enable-external-package-for-default-compiler=true",
"--enable-external-package-watch-for-default-compiler=true",
}
err := fs.Parse(args)
require.NoError(t, err)
// Verify struct fields got CLI values (not defaults)
assert.Equal(t, 999, opt.Workflow.MaxWaitBackoffTime, "CLI override should be 999")
assert.Equal(t, 888, opt.Workflow.MaxFailedBackoffTime, "CLI override should be 888")
assert.Equal(t, 77, opt.Workflow.MaxStepErrorRetryTimes, "CLI override should be 77")
assert.Equal(t, 66, opt.Resource.MaxDispatchConcurrent, "CLI override should be 66")
assert.Equal(t, "custom-ns", opt.OAM.SystemDefinitionNamespace, "CLI override should be custom-ns")
assert.Equal(t, 20*time.Minute, opt.Application.ReSyncPeriod, "CLI override should be 20m")
assert.Equal(t, true, opt.Performance.PerfEnabled, "CLI override should be true")
assert.Equal(t, true, opt.CUE.EnableExternalPackage, "CLI override should be true")
assert.Equal(t, true, opt.CUE.EnableExternalPackageWatch, "CLI override should be true")
// Now sync to globals
opt.Workflow.SyncToWorkflowGlobals()
opt.Resource.SyncToResourceGlobals()
opt.OAM.SyncToOAMGlobals()
opt.Application.SyncToApplicationGlobals()
opt.Performance.SyncToPerformanceGlobals()
opt.CUE.SyncToCUEGlobals()
// Verify globals got the CLI values
assert.Equal(t, 999, wfTypes.MaxWorkflowWaitBackoffTime, "Global should have CLI value")
assert.Equal(t, 888, wfTypes.MaxWorkflowFailedBackoffTime, "Global should have CLI value")
assert.Equal(t, 77, wfTypes.MaxWorkflowStepErrorRetryTimes, "Global should have CLI value")
assert.Equal(t, 66, resourcekeeper.MaxDispatchConcurrent, "Global should have CLI value")
assert.Equal(t, "custom-ns", oam.SystemDefinitionNamespace, "Global should have CLI value")
assert.Equal(t, 20*time.Minute, commonconfig.ApplicationReSyncPeriod, "Global should have CLI value")
assert.Equal(t, true, commonconfig.PerfEnabled, "Global should have CLI value")
assert.Equal(t, true, cuex.EnableExternalPackageForDefaultCompiler, "Global should have CLI value")
assert.Equal(t, true, cuex.EnableExternalPackageWatchForDefaultCompiler, "Global should have CLI value")
}
func TestCoreOptions_CompleteIntegration(t *testing.T) {
// A comprehensive integration test
opt := NewCoreOptions()
fs := pflag.NewFlagSet("test", pflag.ContinueOnError)
for _, f := range opt.Flags().FlagSets {
fs.AddFlagSet(f)
}
// Simulate a real-world configuration
args := []string{
// Production-like settings
"--enable-leader-election=true",
"--leader-election-namespace=vela-system",
"--use-webhook=true",
"--webhook-port=9443",
"--metrics-addr=:8080",
"--health-addr=:9440",
"--log-debug=false",
"--log-file-path=/var/log/vela/core.log",
"--log-file-max-size=100",
"--kube-api-qps=100",
"--kube-api-burst=200",
"--enable-cluster-gateway=true",
"--enable-cluster-metrics=true",
"--cluster-metrics-interval=30s",
"--application-re-sync-period=10m",
"--perf-enabled=true",
"--max-dispatch-concurrent=20",
"--max-workflow-wait-backoff-time=120",
"--max-workflow-failed-backoff-time=600",
}
err := fs.Parse(args)
require.NoError(t, err)
// Verify the configuration is production-ready
assert.True(t, opt.Server.EnableLeaderElection, "Leader election should be enabled in production")
assert.Equal(t, "vela-system", opt.Server.LeaderElectionNamespace)
assert.True(t, opt.Webhook.UseWebhook, "Webhook should be enabled in production")
assert.Equal(t, 9443, opt.Webhook.WebhookPort)
assert.False(t, opt.Observability.LogDebug, "Debug logging should be disabled in production")
assert.NotEmpty(t, opt.Observability.LogFilePath, "Log file path should be set in production")
// Verify performance settings
assert.True(t, opt.Performance.PerfEnabled)
assert.Equal(t, 20, opt.Resource.MaxDispatchConcurrent)
// Verify cluster settings
assert.True(t, opt.MultiCluster.EnableClusterGateway)
assert.True(t, opt.MultiCluster.EnableClusterMetrics)
assert.Equal(t, 30*time.Second, opt.MultiCluster.ClusterMetricsInterval)
// Sync all configurations that need it
opt.CUE.SyncToCUEGlobals()
opt.Workflow.SyncToWorkflowGlobals()
opt.Resource.SyncToResourceGlobals()
opt.OAM.SyncToOAMGlobals()
opt.Application.SyncToApplicationGlobals()
opt.Performance.SyncToPerformanceGlobals()
// Verify sync worked
assert.Equal(t, 20, resourcekeeper.MaxDispatchConcurrent)
assert.Equal(t, 120, wfTypes.MaxWorkflowWaitBackoffTime)
assert.Equal(t, 600, wfTypes.MaxWorkflowFailedBackoffTime)
assert.Equal(t, "vela-system", oam.SystemDefinitionNamespace)
assert.Equal(t, 10*time.Minute, commonconfig.ApplicationReSyncPeriod)
assert.True(t, commonconfig.PerfEnabled)
assert.True(t, cuex.EnableExternalPackageForDefaultCompiler, "The --enable-external-package-for-default-compiler flag should be enabled")
assert.True(t, cuex.EnableExternalPackageWatchForDefaultCompiler, "The --enable-external-package-watch-for-default-compiler flag should be enabled")
}

View File

@@ -18,7 +18,6 @@ package app
import (
"context"
"flag"
"fmt"
"io"
"os"
@@ -33,7 +32,6 @@ import (
"github.com/pkg/errors"
"github.com/spf13/cobra"
utilfeature "k8s.io/apiserver/pkg/util/feature"
"k8s.io/client-go/rest"
"k8s.io/klog/v2"
"k8s.io/klog/v2/textlogger"
ctrl "sigs.k8s.io/controller-runtime"
@@ -47,7 +45,6 @@ import (
"github.com/oam-dev/kubevela/apis/core.oam.dev/v1beta1"
"github.com/oam-dev/kubevela/apis/types"
"github.com/oam-dev/kubevela/cmd/core/app/config"
"github.com/oam-dev/kubevela/cmd/core/app/hooks"
"github.com/oam-dev/kubevela/cmd/core/app/options"
"github.com/oam-dev/kubevela/pkg/auth"
@@ -73,12 +70,12 @@ var (
// NewCoreCommand creates a *cobra.Command object with default parameters
func NewCoreCommand() *cobra.Command {
coreOptions := options.NewCoreOptions()
s := options.NewCoreOptions()
cmd := &cobra.Command{
Use: "vela-core",
Long: `The KubeVela controller manager is a daemon that embeds the core control loops shipped with KubeVela`,
RunE: func(cmd *cobra.Command, args []string) error {
return run(signals.SetupSignalHandler(), coreOptions)
return run(signals.SetupSignalHandler(), s)
},
SilenceUsage: true,
FParseErrWhitelist: cobra.FParseErrWhitelist{
@@ -87,10 +84,10 @@ func NewCoreCommand() *cobra.Command {
},
}
flags := cmd.Flags()
namedFlagSets := coreOptions.Flags()
for _, flagSet := range namedFlagSets.FlagSets {
flags.AddFlagSet(flagSet)
fs := cmd.Flags()
namedFlagSets := s.Flags()
for _, set := range namedFlagSets.FlagSets {
fs.AddFlagSet(set)
}
meta.Name = types.VelaCoreName
@@ -100,147 +97,15 @@ func NewCoreCommand() *cobra.Command {
return cmd
}
func run(ctx context.Context, coreOptions *options.CoreOptions) error {
klog.InfoS("Starting KubeVela core controller",
"context", "initialization",
"leaderElection", coreOptions.Server.EnableLeaderElection,
"webhookEnabled", coreOptions.Webhook.UseWebhook)
// Sync configurations
klog.V(2).InfoS("Syncing configurations to global variables")
syncConfigurations(coreOptions)
klog.InfoS("Configuration sync completed successfully")
// Setup logging
klog.V(2).InfoS("Setting up logging configuration",
"debug", coreOptions.Observability.LogDebug,
"devLogs", coreOptions.Observability.DevLogs,
"logFilePath", coreOptions.Observability.LogFilePath)
setupLogging(coreOptions.Observability)
// Configure Kubernetes client
klog.InfoS("Configuring Kubernetes client",
"QPS", coreOptions.Kubernetes.QPS,
"burst", coreOptions.Kubernetes.Burst)
kubeConfig, err := configureKubernetesClient(coreOptions.Kubernetes)
if err != nil {
klog.ErrorS(err, "Failed to configure Kubernetes client")
return fmt.Errorf("failed to configure Kubernetes client: %w", err)
}
// Start profiling server
klog.V(2).InfoS("Starting profiling server in background")
go profiling.StartProfilingServer(nil)
// Setup multi-cluster if enabled
if coreOptions.MultiCluster.EnableClusterGateway {
klog.InfoS("Multi-cluster gateway enabled, setting up multi-cluster capability",
"enableMetrics", coreOptions.MultiCluster.EnableClusterMetrics,
"metricsInterval", coreOptions.MultiCluster.ClusterMetricsInterval)
if err := setupMultiCluster(ctx, kubeConfig, coreOptions.MultiCluster); err != nil {
klog.ErrorS(err, "Failed to setup multi-cluster")
return fmt.Errorf("failed to setup multi-cluster: %w", err)
}
klog.InfoS("Multi-cluster setup completed successfully")
}
// Configure feature gates
klog.V(2).InfoS("Configuring feature gates")
configureFeatureGates(coreOptions)
// Create controller manager
klog.InfoS("Creating controller manager",
"metricsAddr", coreOptions.Observability.MetricsAddr,
"healthAddr", coreOptions.Server.HealthAddr,
"webhookPort", coreOptions.Webhook.WebhookPort)
manager, err := createControllerManager(ctx, kubeConfig, coreOptions)
if err != nil {
klog.ErrorS(err, "Failed to create controller manager")
return fmt.Errorf("failed to create controller manager: %w", err)
}
klog.InfoS("Controller manager created successfully")
// Register health checks
klog.V(2).InfoS("Registering health and readiness checks")
if err := registerHealthChecks(manager); err != nil {
klog.ErrorS(err, "Failed to register health checks")
return fmt.Errorf("failed to register health checks: %w", err)
}
// Setup controllers based on sharding mode
klog.InfoS("Setting up controllers",
"shardingEnabled", sharding.EnableSharding,
"shardID", sharding.ShardID)
if err := setupControllers(ctx, manager, coreOptions); err != nil {
klog.ErrorS(err, "Failed to setup controllers")
return fmt.Errorf("failed to setup controllers: %w", err)
}
klog.InfoS("Controllers setup completed successfully")
// Start application monitor
klog.InfoS("Starting application metrics monitor")
if err := startApplicationMonitor(ctx, manager); err != nil {
klog.ErrorS(err, "Failed to start application monitor")
return fmt.Errorf("failed to start application monitor: %w", err)
}
// Start the manager
klog.InfoS("Starting controller manager")
if err := manager.Start(ctx); err != nil {
klog.ErrorS(err, "Failed to run manager")
return err
}
// Cleanup
performCleanup(coreOptions)
klog.InfoS("Program safely stopped")
return nil
}
// syncConfigurations syncs parsed config values to external package global variables
func syncConfigurations(coreOptions *options.CoreOptions) {
if coreOptions.Workflow != nil {
klog.V(3).InfoS("Syncing workflow configuration")
coreOptions.Workflow.SyncToWorkflowGlobals()
}
if coreOptions.CUE != nil {
klog.V(3).InfoS("Syncing CUE configuration")
coreOptions.CUE.SyncToCUEGlobals()
}
if coreOptions.Application != nil {
klog.V(3).InfoS("Syncing application configuration")
coreOptions.Application.SyncToApplicationGlobals()
}
if coreOptions.Performance != nil {
klog.V(3).InfoS("Syncing performance configuration")
coreOptions.Performance.SyncToPerformanceGlobals()
}
if coreOptions.Resource != nil {
klog.V(3).InfoS("Syncing resource configuration")
coreOptions.Resource.SyncToResourceGlobals()
}
if coreOptions.OAM != nil {
klog.V(3).InfoS("Syncing OAM configuration")
coreOptions.OAM.SyncToOAMGlobals()
}
}
// setupLogging configures klog based on parsed observability settings
func setupLogging(observabilityConfig *config.ObservabilityConfig) {
// Configure klog verbosity
if observabilityConfig.LogDebug {
_ = flag.Set("v", strconv.Itoa(int(commonconfig.LogDebug)))
}
// Configure log file output
if observabilityConfig.LogFilePath != "" {
_ = flag.Set("logtostderr", "false")
_ = flag.Set("log_file", observabilityConfig.LogFilePath)
_ = flag.Set("log_file_max_size", strconv.FormatUint(observabilityConfig.LogFileMaxSize, 10))
}
func run(ctx context.Context, s *options.CoreOptions) error {
restConfig := ctrl.GetConfigOrDie()
restConfig.UserAgent = types.KubeVelaName + "/" + version.GitRevision
restConfig.QPS = float32(s.QPS)
restConfig.Burst = s.Burst
restConfig.Wrap(auth.NewImpersonatingRoundTripper)
// Set logger (use --dev-logs=true for local development)
if observabilityConfig.DevLogs {
if s.DevLogs {
logOutput := newColorWriter(os.Stdout)
klog.LogToStderr(false)
klog.SetOutput(logOutput)
@@ -248,99 +113,58 @@ func setupLogging(observabilityConfig *config.ObservabilityConfig) {
} else {
ctrl.SetLogger(textlogger.NewLogger(textlogger.NewConfig()))
}
}
// ConfigProvider is a function type that provides a Kubernetes REST config
type ConfigProvider func() (*rest.Config, error)
// configureKubernetesClient creates and configures the Kubernetes REST config
func configureKubernetesClient(kubernetesConfig *config.KubernetesConfig) (*rest.Config, error) {
return configureKubernetesClientWithProvider(kubernetesConfig, ctrl.GetConfig)
}
// configureKubernetesClientWithProvider creates and configures the Kubernetes REST config
// using a provided config provider function. This allows for dependency injection in tests.
func configureKubernetesClientWithProvider(kubernetesConfig *config.KubernetesConfig, configProvider ConfigProvider) (*rest.Config, error) {
// Gracefully handle error returns instead of panicking
kubeConfig, err := configProvider()
if err != nil {
return nil, err
}
kubeConfig.UserAgent = types.KubeVelaName + "/" + version.GitRevision
kubeConfig.QPS = float32(kubernetesConfig.QPS)
kubeConfig.Burst = kubernetesConfig.Burst
kubeConfig.Wrap(auth.NewImpersonatingRoundTripper)
klog.InfoS("Kubernetes Config Loaded",
"UserAgent", kubeConfig.UserAgent,
"QPS", kubeConfig.QPS,
"Burst", kubeConfig.Burst,
"UserAgent", restConfig.UserAgent,
"QPS", restConfig.QPS,
"Burst", restConfig.Burst,
)
go profiling.StartProfilingServer(nil)
return kubeConfig, nil
}
// setupMultiCluster initializes multi-cluster capability
func setupMultiCluster(ctx context.Context, kubeConfig *rest.Config, multiClusterConfig *config.MultiClusterConfig) error {
klog.V(2).InfoS("Initializing multi-cluster client")
clusterClient, err := multicluster.Initialize(kubeConfig, true)
if err != nil {
klog.ErrorS(err, "Failed to enable multi-cluster capability")
return err
}
klog.InfoS("Multi-cluster client initialized successfully")
if multiClusterConfig.EnableClusterMetrics {
klog.InfoS("Enabling cluster metrics collection",
"interval", multiClusterConfig.ClusterMetricsInterval)
_, err := multicluster.NewClusterMetricsMgr(ctx, clusterClient, multiClusterConfig.ClusterMetricsInterval)
// wrapper the round tripper by multi cluster rewriter
if s.EnableClusterGateway {
client, err := multicluster.Initialize(restConfig, true)
if err != nil {
klog.ErrorS(err, "Failed to enable multi-cluster-metrics capability")
klog.ErrorS(err, "failed to enable multi-cluster capability")
return err
}
klog.InfoS("Cluster metrics manager initialized successfully")
if s.EnableClusterMetrics {
_, err := multicluster.NewClusterMetricsMgr(context.Background(), client, s.ClusterMetricsInterval)
if err != nil {
klog.ErrorS(err, "failed to enable multi-cluster-metrics capability")
return err
}
}
}
return nil
}
// configureFeatureGates sets up feature-dependent configurations
func configureFeatureGates(coreOptions *options.CoreOptions) {
if utilfeature.DefaultMutableFeatureGate.Enabled(features.ApplyOnce) {
klog.V(2).InfoS("ApplyOnce feature gate enabled, configuring application re-sync period",
"period", coreOptions.Kubernetes.InformerSyncPeriod)
commonconfig.ApplicationReSyncPeriod = coreOptions.Kubernetes.InformerSyncPeriod
commonconfig.ApplicationReSyncPeriod = s.InformerSyncPeriod
}
}
// buildManagerOptions constructs ctrl.Options from CoreOptions for creating a controller manager.
// This function is extracted for testability - it contains the option construction logic
// without the side effects of creating a manager or starting background processes.
func buildManagerOptions(ctx context.Context, coreOptions *options.CoreOptions) ctrl.Options {
leaderElectionID := util.GenerateLeaderElectionID(types.KubeVelaName, coreOptions.Controller.IgnoreAppWithoutControllerRequirement)
leaderElectionID := util.GenerateLeaderElectionID(types.KubeVelaName, s.ControllerArgs.IgnoreAppWithoutControllerRequirement)
leaderElectionID += sharding.GetShardIDSuffix()
return ctrl.Options{
mgr, err := ctrl.NewManager(restConfig, ctrl.Options{
Scheme: scheme,
Metrics: metricsserver.Options{
BindAddress: coreOptions.Observability.MetricsAddr,
BindAddress: s.MetricsAddr,
},
LeaderElection: coreOptions.Server.EnableLeaderElection,
LeaderElectionNamespace: coreOptions.Server.LeaderElectionNamespace,
LeaderElection: s.EnableLeaderElection,
LeaderElectionNamespace: s.LeaderElectionNamespace,
LeaderElectionID: leaderElectionID,
WebhookServer: ctrlwebhook.NewServer(ctrlwebhook.Options{
Port: coreOptions.Webhook.WebhookPort,
CertDir: coreOptions.Webhook.CertDir,
Port: s.WebhookPort,
CertDir: s.CertDir,
}),
HealthProbeBindAddress: coreOptions.Server.HealthAddr,
LeaseDuration: &coreOptions.Server.LeaseDuration,
RenewDeadline: &coreOptions.Server.RenewDeadline,
RetryPeriod: &coreOptions.Server.RetryPeriod,
HealthProbeBindAddress: s.HealthAddr,
LeaseDuration: &s.LeaseDuration,
RenewDeadline: &s.RenewDeadLine,
RetryPeriod: &s.RetryPeriod,
NewClient: velaclient.DefaultNewControllerClient,
NewCache: cache.BuildCache(ctx,
ctrlcache.Options{
Scheme: scheme,
SyncPeriod: &coreOptions.Kubernetes.InformerSyncPeriod,
SyncPeriod: &s.InformerSyncPeriod,
// SyncPeriod is configured with default value, aka. 10h. First, controller-runtime does not
// recommend use it as a time trigger, instead, it is expected to work for failure tolerance
// of controller-runtime. Additionally, set this value will affect not only application
@@ -354,160 +178,105 @@ func buildManagerOptions(ctx context.Context, coreOptions *options.CoreOptions)
DisableFor: cache.NewResourcesToDisableCache(),
},
},
}
}
// createControllerManager creates and configures the controller-runtime manager
func createControllerManager(ctx context.Context, kubeConfig *rest.Config, coreOptions *options.CoreOptions) (ctrl.Manager, error) {
leaderElectionID := util.GenerateLeaderElectionID(types.KubeVelaName, coreOptions.Controller.IgnoreAppWithoutControllerRequirement)
leaderElectionID += sharding.GetShardIDSuffix()
klog.V(2).InfoS("Creating controller manager with configuration",
"leaderElectionID", leaderElectionID,
"leaderElection", coreOptions.Server.EnableLeaderElection,
"leaderElectionNamespace", coreOptions.Server.LeaderElectionNamespace,
"leaseDuration", coreOptions.Server.LeaseDuration,
"renewDeadline", coreOptions.Server.RenewDeadline)
managerOptions := buildManagerOptions(ctx, coreOptions)
manager, err := ctrl.NewManager(kubeConfig, managerOptions)
})
if err != nil {
klog.ErrorS(err, "Unable to create a controller manager")
return nil, err
}
return manager, nil
}
// setupControllers sets up controllers based on sharding configuration
func setupControllers(ctx context.Context, manager ctrl.Manager, coreOptions *options.CoreOptions) error {
if !sharding.EnableSharding {
return prepareRun(ctx, manager, coreOptions)
}
return prepareRunInShardingMode(ctx, manager, coreOptions)
}
// startApplicationMonitor starts the application metrics watcher
func startApplicationMonitor(ctx context.Context, manager ctrl.Manager) error {
klog.InfoS("Starting vela application monitor")
applicationInformer, err := manager.GetCache().GetInformer(ctx, &v1beta1.Application{})
if err != nil {
klog.ErrorS(err, "Unable to get informer for application")
return err
}
watcher.StartApplicationMetricsWatcher(applicationInformer)
klog.V(2).InfoS("Application metrics watcher started successfully")
return nil
}
// performCleanup handles any necessary cleanup operations
func performCleanup(coreOptions *options.CoreOptions) {
klog.V(2).InfoS("Performing cleanup operations")
if coreOptions.Observability.LogFilePath != "" {
klog.V(3).InfoS("Flushing log file", "path", coreOptions.Observability.LogFilePath)
klog.Flush()
if err := registerHealthChecks(mgr); err != nil {
klog.ErrorS(err, "Unable to register ready/health checks")
return err
}
}
// prepareRunInShardingMode initializes the controller manager in sharding mode where workload
// is distributed across multiple controller instances. In sharding mode:
// - Master shard handles webhooks, scheduling, and full controller setup
// - Non-master shards only run the Application controller for their assigned Applications
// This enables horizontal scaling of the KubeVela control plane across multiple pods.
func prepareRunInShardingMode(ctx context.Context, manager manager.Manager, coreOptions *options.CoreOptions) error {
if sharding.IsMaster() {
klog.InfoS("Controller running in sharding mode",
"shardType", "master",
"webhookAutoSchedule", !utilfeature.DefaultMutableFeatureGate.Enabled(features.DisableWebhookAutoSchedule))
if !utilfeature.DefaultMutableFeatureGate.Enabled(features.DisableWebhookAutoSchedule) {
klog.V(2).InfoS("Starting webhook auto-scheduler in background")
go sharding.DefaultScheduler.Get().Start(ctx)
}
if err := prepareRun(ctx, manager, coreOptions); err != nil {
if !sharding.EnableSharding {
if err = prepareRun(ctx, mgr, s); err != nil {
return err
}
} else {
klog.InfoS("Controller running in sharding mode",
"shardType", "worker",
"shardID", sharding.ShardID)
klog.V(2).InfoS("Setting up application controller for worker shard")
if err := application.Setup(manager, coreOptions.Controller.Args); err != nil {
klog.ErrorS(err, "Failed to setup application controller in sharding mode")
if err = prepareRunInShardingMode(ctx, mgr, s); err != nil {
return err
}
}
klog.Info("Start the vela application monitor")
informer, err := mgr.GetCache().GetInformer(ctx, &v1beta1.Application{})
if err != nil {
klog.ErrorS(err, "Unable to get informer for application")
}
watcher.StartApplicationMetricsWatcher(informer)
if err := mgr.Start(ctx); err != nil {
klog.ErrorS(err, "Failed to run manager")
return err
}
if s.LogFilePath != "" {
klog.Flush()
}
klog.Info("Safely stops Program...")
return nil
}
func prepareRunInShardingMode(ctx context.Context, mgr manager.Manager, s *options.CoreOptions) error {
if sharding.IsMaster() {
klog.Infof("controller running in sharding mode, current shard is master")
if !utilfeature.DefaultMutableFeatureGate.Enabled(features.DisableWebhookAutoSchedule) {
go sharding.DefaultScheduler.Get().Start(ctx)
}
if err := prepareRun(ctx, mgr, s); err != nil {
return err
}
} else {
klog.Infof("controller running in sharding mode, current shard id: %s", sharding.ShardID)
if err := application.Setup(mgr, *s.ControllerArgs); err != nil {
return err
}
klog.InfoS("Application controller setup completed for worker shard")
}
return nil
}
// prepareRun sets up the complete KubeVela controller manager with all necessary components:
// - Configures and registers OAM webhooks if enabled
// - Sets up all OAM controllers (Application, ComponentDefinition, WorkflowStepDefinition, PolicyDefinition, and TraitDefinition)
// - Initializes multi-cluster capabilities and cluster info
// - Runs pre-start validation hooks to ensure system readiness
// This function is used in single-instance mode or by the master shard in sharding mode.
func prepareRun(ctx context.Context, manager manager.Manager, coreOptions *options.CoreOptions) error {
if coreOptions.Webhook.UseWebhook {
klog.InfoS("Webhook enabled, registering OAM webhooks",
"port", coreOptions.Webhook.WebhookPort,
"certDir", coreOptions.Webhook.CertDir)
oamwebhook.Register(manager, coreOptions.Controller.Args)
klog.V(2).InfoS("Waiting for webhook secret volume",
"timeout", waitSecretTimeout,
"checkInterval", waitSecretInterval)
if err := waitWebhookSecretVolume(coreOptions.Webhook.CertDir, waitSecretTimeout, waitSecretInterval); err != nil {
func prepareRun(ctx context.Context, mgr manager.Manager, s *options.CoreOptions) error {
if s.UseWebhook {
klog.InfoS("Enable webhook", "server port", strconv.Itoa(s.WebhookPort))
oamwebhook.Register(mgr, *s.ControllerArgs)
if err := waitWebhookSecretVolume(s.CertDir, waitSecretTimeout, waitSecretInterval); err != nil {
klog.ErrorS(err, "Unable to get webhook secret")
return err
}
klog.InfoS("Webhook secret volume ready, webhooks registered successfully")
}
klog.InfoS("Setting up OAM controllers")
if err := oamv1beta1.Setup(manager, coreOptions.Controller.Args); err != nil {
klog.ErrorS(err, "Unable to setup the OAM controller")
return err
}
klog.InfoS("OAM controllers setup completed successfully")
klog.V(2).InfoS("Initializing control plane cluster info")
if err := multicluster.InitClusterInfo(manager.GetConfig()); err != nil {
klog.ErrorS(err, "Failed to init control plane cluster info")
if err := oamv1beta1.Setup(mgr, *s.ControllerArgs); err != nil {
klog.ErrorS(err, "Unable to setup the oam controller")
return err
}
klog.InfoS("Starting vela controller manager with pre-start validation")
if err := multicluster.InitClusterInfo(mgr.GetConfig()); err != nil {
klog.ErrorS(err, "Init control plane cluster info")
return err
}
klog.Info("Start the vela controller manager")
for _, hook := range []hooks.PreStartHook{hooks.NewSystemCRDValidationHook()} {
klog.V(2).InfoS("Running pre-start hook", "hook", fmt.Sprintf("%T", hook))
if err := hook.Run(ctx); err != nil {
klog.ErrorS(err, "Failed to run pre-start hook", "hook", fmt.Sprintf("%T", hook))
return fmt.Errorf("failed to run hook %T: %w", hook, err)
}
}
klog.InfoS("Pre-start validation completed successfully")
return nil
}
// registerHealthChecks is used to create readiness&liveness probes
func registerHealthChecks(manager ctrl.Manager) error {
klog.InfoS("Registering readiness and health checks")
if err := manager.AddReadyzCheck("ping", healthz.Ping); err != nil {
klog.ErrorS(err, "Failed to add readiness check")
func registerHealthChecks(mgr ctrl.Manager) error {
klog.Info("Create readiness/health check")
if err := mgr.AddReadyzCheck("ping", healthz.Ping); err != nil {
return err
}
klog.V(3).InfoS("Readiness check registered", "check", "ping")
// TODO: change the health check to be different from readiness check
if err := manager.AddHealthzCheck("ping", healthz.Ping); err != nil {
klog.ErrorS(err, "Failed to add health check")
return err
}
klog.V(3).InfoS("Health check registered", "check", "ping")
return nil
return mgr.AddHealthzCheck("ping", healthz.Ping)
}
// waitWebhookSecretVolume waits for webhook secret ready to avoid manager running crash
// waitWebhookSecretVolume waits for webhook secret ready to avoid mgr running crash
func waitWebhookSecretVolume(certDir string, timeout, interval time.Duration) error {
start := time.Now()
for {
@@ -519,23 +288,23 @@ func waitWebhookSecretVolume(certDir string, timeout, interval time.Duration) er
"timeout(second)", int64(timeout.Seconds()))
if _, err := os.Stat(certDir); !os.IsNotExist(err) {
ready := func() bool {
certDirectory, err := os.Open(filepath.Clean(certDir))
f, err := os.Open(filepath.Clean(certDir))
if err != nil {
return false
}
defer func() {
if err := certDirectory.Close(); err != nil {
klog.ErrorS(err, "Failed to close directory")
if err := f.Close(); err != nil {
klog.Error(err, "Failed to close file")
}
}()
// check if dir is empty
if _, err := certDirectory.Readdir(1); errors.Is(err, io.EOF) {
if _, err := f.Readdir(1); errors.Is(err, io.EOF) {
return false
}
// check if secret files are empty
err = filepath.Walk(certDir, func(path string, fileInfo os.FileInfo, err error) error {
err = filepath.Walk(certDir, func(path string, info os.FileInfo, err error) error {
// even Cert dir is created, cert files are still empty for a while
if fileInfo.Size() == 0 {
if info.Size() == 0 {
return errors.New("secret is not ready")
}
return nil

View File

@@ -17,42 +17,18 @@ limitations under the License.
package app
import (
"bytes"
"context"
"flag"
"fmt"
"os"
"path/filepath"
"testing"
"time"
. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
"k8s.io/apiserver/pkg/util/feature"
"k8s.io/client-go/rest"
"k8s.io/klog/v2"
"sigs.k8s.io/controller-runtime/pkg/envtest"
"github.com/oam-dev/kubevela/apis/types"
"github.com/oam-dev/kubevela/cmd/core/app/config"
"github.com/oam-dev/kubevela/cmd/core/app/options"
commonconfig "github.com/oam-dev/kubevela/pkg/controller/common"
"github.com/oam-dev/kubevela/version"
)
/*
Test Organization Notes:
- Unit tests for all server helper functions are in this file
- Tests use mocks and fakes to avoid needing real Kubernetes components
- All tests use Ginkgo for consistency
*/
var (
testdir = "testdir"
testTimeout = 2 * time.Second
testInterval = 1 * time.Second
testEnv *envtest.Environment
testConfig *rest.Config
)
func TestGinkgo(t *testing.T) {
@@ -60,574 +36,46 @@ func TestGinkgo(t *testing.T) {
RunSpecs(t, "test main")
}
var _ = BeforeSuite(func() {
By("bootstrapping test environment")
useExistCluster := false
var _ = Describe("test waitSecretVolume", func() {
BeforeEach(func() {
err := os.MkdirAll(testdir, 0755)
Expect(err).NotTo(HaveOccurred())
})
AfterEach(func() {
os.RemoveAll(testdir)
})
// Resolve the CRD path relative to the test file location
crdPath := filepath.Join("..", "..", "..", "charts", "vela-core", "crds")
When("dir not exist or empty", func() {
It("return timeout error", func() {
err := waitWebhookSecretVolume(testdir, testTimeout, testInterval)
Expect(err).To(HaveOccurred())
By("remove dir")
os.RemoveAll(testdir)
err = waitWebhookSecretVolume(testdir, testTimeout, testInterval)
Expect(err).To(HaveOccurred())
})
})
testEnv = &envtest.Environment{
ControlPlaneStartTimeout: 2 * time.Minute, // Increased timeout for CI
ControlPlaneStopTimeout: time.Minute,
CRDDirectoryPaths: []string{
crdPath,
},
UseExistingCluster: &useExistCluster,
ErrorIfCRDPathMissing: true, // Fail fast if CRDs are not found
}
When("dir contains empty file", func() {
It("return timeout error", func() {
By("add empty file")
_, err := os.Create(testdir + "/emptyFile")
Expect(err).NotTo(HaveOccurred())
err = waitWebhookSecretVolume(testdir, testTimeout, testInterval)
Expect(err).To(HaveOccurred())
})
})
var err error
testConfig, err = testEnv.Start()
Expect(err).ToNot(HaveOccurred())
Expect(testConfig).ToNot(BeNil())
})
var _ = AfterSuite(func() {
By("tearing down the test environment")
if testEnv != nil {
err := testEnv.Stop()
Expect(err).ToNot(HaveOccurred())
}
})
var _ = Describe("Server Tests", func() {
Describe("waitWebhookSecretVolume", func() {
BeforeEach(func() {
err := os.MkdirAll(testdir, 0755)
When("files in dir are not empty", func() {
It("return nil", func() {
By("add non-empty file")
_, err := os.Create(testdir + "/file")
Expect(err).NotTo(HaveOccurred())
err = os.WriteFile(testdir+"/file", []byte("test"), os.ModeAppend)
Expect(err).NotTo(HaveOccurred())
err = waitWebhookSecretVolume(testdir, testTimeout, testInterval)
Expect(err).NotTo(HaveOccurred())
})
AfterEach(func() {
os.RemoveAll(testdir)
})
When("dir not exist or empty", func() {
It("return timeout error", func() {
err := waitWebhookSecretVolume(testdir, testTimeout, testInterval)
Expect(err).To(HaveOccurred())
By("remove dir")
os.RemoveAll(testdir)
err = waitWebhookSecretVolume(testdir, testTimeout, testInterval)
Expect(err).To(HaveOccurred())
})
})
When("dir contains empty file", func() {
It("return timeout error", func() {
By("add empty file")
err := os.WriteFile(testdir+"/emptyFile", []byte{}, 0644)
Expect(err).NotTo(HaveOccurred())
err = waitWebhookSecretVolume(testdir, testTimeout, testInterval)
Expect(err).To(HaveOccurred())
})
})
When("files in dir are not empty", func() {
It("return nil", func() {
By("add non-empty file")
err := os.WriteFile(testdir+"/file", []byte("test"), 0600)
Expect(err).NotTo(HaveOccurred())
err = waitWebhookSecretVolume(testdir, testTimeout, testInterval)
Expect(err).NotTo(HaveOccurred())
})
})
})
Describe("syncConfigurations", func() {
var coreOpts *options.CoreOptions
BeforeEach(func() {
coreOpts = options.NewCoreOptions()
})
Context("with all configs populated", func() {
It("should sync all configuration values to global variables", func() {
// Set some test values using actual fields from the config structs
coreOpts.Workflow.MaxWaitBackoffTime = 120
coreOpts.Workflow.MaxFailedBackoffTime = 600
coreOpts.Application.ReSyncPeriod = 30 * time.Minute
coreOpts.Kubernetes.InformerSyncPeriod = 10 * time.Hour
// Call sync function
syncConfigurations(coreOpts)
// Verify globals were updated (this is a smoke test - actual values depend on implementation)
// The key point is the function runs without panicking
Expect(func() { syncConfigurations(coreOpts) }).NotTo(Panic())
})
})
Context("with partial configs", func() {
It("should handle nil configs gracefully", func() {
opts := &options.CoreOptions{
Workflow: config.NewWorkflowConfig(),
CUE: config.NewCUEConfig(),
Application: nil, // Intentionally nil
Performance: config.NewPerformanceConfig(),
Resource: config.NewResourceConfig(),
OAM: config.NewOAMConfig(),
}
// Should not panic even with nil fields
Expect(func() {
syncConfigurations(opts)
}).NotTo(Panic())
})
})
Context("with empty CoreOptions", func() {
It("should handle nil options safely", func() {
nilOpts := &options.CoreOptions{}
// Should not panic even with nil fields
Expect(func() {
syncConfigurations(nilOpts)
}).NotTo(Panic())
})
})
})
Describe("setupLogging", func() {
var origStderr *os.File
BeforeEach(func() {
origStderr = os.Stderr
})
AfterEach(func() {
os.Stderr = origStderr
// Reset klog settings
klog.LogToStderr(true)
flag.Set("logtostderr", "true")
})
Context("debug logging", func() {
It("should configure debug logging when LogDebug is true", func() {
obsConfig := &config.ObservabilityConfig{
LogDebug: true,
}
setupLogging(obsConfig)
// Verify debug level was set (we can't directly check flag values easily)
// But we can verify the function doesn't panic
Expect(func() { setupLogging(obsConfig) }).NotTo(Panic())
})
})
Context("file logging", func() {
It("should configure file logging when LogFilePath is set", func() {
tempDir := GinkgoT().TempDir()
logFile := filepath.Join(tempDir, "test.log")
obsConfig := &config.ObservabilityConfig{
LogFilePath: logFile,
LogFileMaxSize: 100,
}
setupLogging(obsConfig)
// Verify flags were set (indirectly by checking no panic)
Expect(func() { setupLogging(obsConfig) }).NotTo(Panic())
})
})
Context("dev logging", func() {
It("should configure dev logging with color output", func() {
obsConfig := &config.ObservabilityConfig{
DevLogs: true,
}
// Capture output to verify color writer is used
var buf bytes.Buffer
klog.SetOutput(&buf)
defer klog.SetOutput(os.Stderr)
setupLogging(obsConfig)
// The function should complete without error
Expect(func() { setupLogging(obsConfig) }).NotTo(Panic())
})
})
Context("standard logging", func() {
It("should configure standard logging when DevLogs is false", func() {
obsConfig := &config.ObservabilityConfig{
DevLogs: false,
}
setupLogging(obsConfig)
Expect(func() { setupLogging(obsConfig) }).NotTo(Panic())
})
})
})
Describe("configureFeatureGates", func() {
var coreOpts *options.CoreOptions
var originalPeriod time.Duration
BeforeEach(func() {
coreOpts = options.NewCoreOptions()
originalPeriod = commonconfig.ApplicationReSyncPeriod
})
AfterEach(func() {
commonconfig.ApplicationReSyncPeriod = originalPeriod
feature.DefaultMutableFeatureGate.Set("ApplyOnce=false")
})
Context("when ApplyOnce is enabled", func() {
It("should configure ApplicationReSyncPeriod", func() {
// Enable the feature gate
feature.DefaultMutableFeatureGate.Set("ApplyOnce=true")
testPeriod := 5 * time.Minute
coreOpts.Kubernetes.InformerSyncPeriod = testPeriod
configureFeatureGates(coreOpts)
Expect(commonconfig.ApplicationReSyncPeriod).To(Equal(testPeriod))
})
})
Context("when ApplyOnce is disabled", func() {
It("should not change ApplicationReSyncPeriod", func() {
feature.DefaultMutableFeatureGate.Set("ApplyOnce=false")
coreOpts.Kubernetes.InformerSyncPeriod = 10 * time.Minute
configureFeatureGates(coreOpts)
Expect(commonconfig.ApplicationReSyncPeriod).To(Equal(originalPeriod))
})
})
Context("with different sync periods", func() {
DescribeTable("should handle various sync periods correctly",
func(enabled bool, syncPeriod time.Duration, expectedResult time.Duration) {
flagValue := fmt.Sprintf("ApplyOnce=%v", enabled)
feature.DefaultMutableFeatureGate.Set(flagValue)
coreOpts.Kubernetes.InformerSyncPeriod = syncPeriod
configureFeatureGates(coreOpts)
if enabled {
Expect(commonconfig.ApplicationReSyncPeriod).To(Equal(expectedResult))
} else {
Expect(commonconfig.ApplicationReSyncPeriod).To(Equal(originalPeriod))
}
},
Entry("enabled with 5 minutes", true, 5*time.Minute, 5*time.Minute),
Entry("enabled with 10 minutes", true, 10*time.Minute, 10*time.Minute),
Entry("disabled with 5 minutes", false, 5*time.Minute, originalPeriod),
Entry("disabled with 10 minutes", false, 10*time.Minute, originalPeriod),
)
})
})
Describe("performCleanup", func() {
var coreOpts *options.CoreOptions
BeforeEach(func() {
coreOpts = options.NewCoreOptions()
})
Context("with log file path", func() {
It("should flush logs when LogFilePath is set", func() {
coreOpts.Observability.LogFilePath = "/tmp/test.log"
// Should not panic
Expect(func() { performCleanup(coreOpts) }).NotTo(Panic())
// Verify klog.Flush was called (indirectly)
performCleanup(coreOpts)
})
})
Context("without log file path", func() {
It("should do nothing when LogFilePath is empty", func() {
coreOpts.Observability.LogFilePath = ""
// Should not panic
Expect(func() { performCleanup(coreOpts) }).NotTo(Panic())
})
})
DescribeTable("should handle various log file configurations",
func(logFilePath string) {
coreOpts.Observability.LogFilePath = logFilePath
// Should not panic
Expect(func() { performCleanup(coreOpts) }).NotTo(Panic())
},
Entry("empty path", ""),
Entry("tmp file", "/tmp/test.log"),
Entry("relative path", "test.log"),
Entry("nested path", "/var/log/kubevela/test.log"),
)
})
Describe("configureKubernetesClient", func() {
Context("when creating Kubernetes config", func() {
It("should configure REST config with correct parameters using ENVTEST", func() {
// Create a test Kubernetes config with specific values
k8sConfig := &config.KubernetesConfig{
QPS: 100,
Burst: 200,
}
// Create a config provider that returns our test config from ENVTEST
configProvider := func() (*rest.Config, error) {
// Create a copy of the test config to avoid modifying the shared config
cfg := rest.CopyConfig(testConfig)
return cfg, nil
}
// Call the function under test with dependency injection
resultConfig, err := configureKubernetesClientWithProvider(k8sConfig, configProvider)
// Assert no error occurred
Expect(err).NotTo(HaveOccurred())
Expect(resultConfig).NotTo(BeNil())
// Verify that QPS and Burst were set correctly
Expect(resultConfig.QPS).To(Equal(float32(100)))
Expect(resultConfig.Burst).To(Equal(200))
// Verify UserAgent was set
Expect(resultConfig.UserAgent).To(ContainSubstring(types.KubeVelaName))
Expect(resultConfig.UserAgent).To(ContainSubstring(version.GitRevision))
// Verify that the config has the impersonating round tripper wrapper
Expect(resultConfig.Wrap).NotTo(BeNil())
})
It("should handle config provider errors gracefully", func() {
k8sConfig := &config.KubernetesConfig{
QPS: 100,
Burst: 200,
}
// Create a config provider that returns an error
configProvider := func() (*rest.Config, error) {
return nil, fmt.Errorf("failed to get config")
}
// Call the function and expect an error
resultConfig, err := configureKubernetesClientWithProvider(k8sConfig, configProvider)
// Assert error occurred
Expect(err).To(HaveOccurred())
Expect(err.Error()).To(ContainSubstring("failed to get config"))
Expect(resultConfig).To(BeNil())
})
It("should apply impersonating round tripper wrapper", func() {
k8sConfig := &config.KubernetesConfig{
QPS: 50,
Burst: 100,
}
configProvider := func() (*rest.Config, error) {
cfg := rest.CopyConfig(testConfig)
return cfg, nil
}
resultConfig, err := configureKubernetesClientWithProvider(k8sConfig, configProvider)
Expect(err).NotTo(HaveOccurred())
Expect(resultConfig).NotTo(BeNil())
// Verify the wrap function was applied
// We can't directly test the round tripper, but we can verify Wrap is not nil
Expect(resultConfig.Wrap).NotTo(BeNil())
})
})
})
Describe("buildManagerOptions", func() {
var (
coreOpts *options.CoreOptions
ctx context.Context
cancel context.CancelFunc
)
BeforeEach(func() {
ctx, cancel = context.WithCancel(context.Background())
coreOpts = options.NewCoreOptions()
// Configure options for testing
coreOpts.Server.EnableLeaderElection = false
coreOpts.Server.HealthAddr = ":8081"
coreOpts.Observability.MetricsAddr = ":8080"
coreOpts.Webhook.UseWebhook = false
coreOpts.Webhook.CertDir = GinkgoT().TempDir()
coreOpts.Webhook.WebhookPort = 9443
})
AfterEach(func() {
if cancel != nil {
cancel()
}
})
Context("when building manager options", func() {
It("should construct options with correct values from CoreOptions", func() {
// Call the function under test
managerOpts := buildManagerOptions(ctx, coreOpts)
// Verify metrics configuration
Expect(managerOpts.Metrics.BindAddress).To(Equal(":8080"))
// Verify health probe configuration
Expect(managerOpts.HealthProbeBindAddress).To(Equal(":8081"))
// Verify leader election configuration
Expect(managerOpts.LeaderElection).To(BeFalse())
Expect(managerOpts.LeaderElectionID).NotTo(BeEmpty())
// Verify scheme is set
Expect(managerOpts.Scheme).NotTo(BeNil())
// Verify webhook server is configured
Expect(managerOpts.WebhookServer).NotTo(BeNil())
// Verify timing configurations
Expect(managerOpts.LeaseDuration).NotTo(BeNil())
Expect(*managerOpts.LeaseDuration).To(Equal(coreOpts.Server.LeaseDuration))
Expect(managerOpts.RenewDeadline).NotTo(BeNil())
Expect(*managerOpts.RenewDeadline).To(Equal(coreOpts.Server.RenewDeadline))
Expect(managerOpts.RetryPeriod).NotTo(BeNil())
Expect(*managerOpts.RetryPeriod).To(Equal(coreOpts.Server.RetryPeriod))
// Verify client configuration
Expect(managerOpts.NewClient).NotTo(BeNil())
})
It("should handle leader election enabled configuration", func() {
// Configure with leader election enabled
coreOpts.Server.EnableLeaderElection = true
coreOpts.Server.LeaderElectionNamespace = "test-namespace"
coreOpts.Server.LeaseDuration = 10 * time.Second
coreOpts.Server.RenewDeadline = 8 * time.Second
coreOpts.Server.RetryPeriod = 2 * time.Second
managerOpts := buildManagerOptions(ctx, coreOpts)
// Verify leader election is enabled
Expect(managerOpts.LeaderElection).To(BeTrue())
Expect(managerOpts.LeaderElectionNamespace).To(Equal("test-namespace"))
// Verify timing configurations match
Expect(*managerOpts.LeaseDuration).To(Equal(10 * time.Second))
Expect(*managerOpts.RenewDeadline).To(Equal(8 * time.Second))
Expect(*managerOpts.RetryPeriod).To(Equal(2 * time.Second))
})
It("should construct leader election ID correctly", func() {
// Test without controller requirement flag
coreOpts.Controller.IgnoreAppWithoutControllerRequirement = false
managerOpts := buildManagerOptions(ctx, coreOpts)
leaderElectionID := managerOpts.LeaderElectionID
Expect(leaderElectionID).To(ContainSubstring("kubevela"))
Expect(leaderElectionID).NotTo(BeEmpty())
// Test with controller requirement flag
coreOpts.Controller.IgnoreAppWithoutControllerRequirement = true
managerOpts2 := buildManagerOptions(ctx, coreOpts)
leaderElectionID2 := managerOpts2.LeaderElectionID
// Leader election ID should be different when flag changes
Expect(leaderElectionID2).NotTo(Equal(leaderElectionID))
})
It("should configure webhook server with correct port and certDir", func() {
coreOpts.Webhook.WebhookPort = 9999
coreOpts.Webhook.CertDir = "/custom/cert/dir"
managerOpts := buildManagerOptions(ctx, coreOpts)
// Note: WebhookServer is already constructed, we can't directly inspect
// port and certDir after construction, but we verify it's not nil
Expect(managerOpts.WebhookServer).NotTo(BeNil())
})
})
})
Describe("setupControllers", func() {
var (
ctx context.Context
cancel context.CancelFunc
coreOpts *options.CoreOptions
)
BeforeEach(func() {
ctx, cancel = context.WithCancel(context.Background())
coreOpts = options.NewCoreOptions()
coreOpts.Webhook.UseWebhook = false // Disable webhooks for simpler testing
})
AfterEach(func() {
if cancel != nil {
cancel()
}
})
Context("error handling", func() {
It("should require a valid manager", func() {
// setupControllers requires a real manager and will panic with nil
// This documents the current behavior - the function assumes valid inputs
Expect(func() {
_ = setupControllers(ctx, nil, coreOpts)
}).To(Panic())
// Note: In production, setupControllers is only called after successful
// createControllerManager, so nil manager should never occur
})
})
// Note: Full integration tests with real manager require:
// - Complete ENVTEST infrastructure with CRDs
// - Controller manager initialization
// - Webhook server setup (if enabled)
})
Describe("startApplicationMonitor", func() {
var (
ctx context.Context
cancel context.CancelFunc
)
BeforeEach(func() {
ctx, cancel = context.WithCancel(context.Background())
})
AfterEach(func() {
if cancel != nil {
cancel()
}
})
Context("error handling", func() {
It("should require a valid manager", func() {
// startApplicationMonitor requires a real manager and will panic with nil
// This documents the current behavior - the function assumes valid inputs
Expect(func() {
_ = startApplicationMonitor(ctx, nil)
}).To(Panic())
// Note: In production, startApplicationMonitor is only called after
// successful manager creation, so nil manager should never occur
})
})
// Note: Full integration tests require:
// - Initialized controller manager with running informers
// - Metrics registry setup
// - Application resources in cluster
})
// Note: The run() function requires full Kubernetes environment with CRDs.
// These unit tests focuses on individual functions with mocked dependencies.
})

45
go.mod
View File

@@ -3,7 +3,7 @@ module github.com/oam-dev/kubevela
go 1.23.8
require (
cuelang.org/go v0.14.1
cuelang.org/go v0.9.2
github.com/AlecAivazis/survey/v2 v2.1.1
github.com/FogDong/uitable v0.0.5
github.com/Masterminds/semver v1.5.0
@@ -37,7 +37,7 @@ require (
github.com/hinshun/vt10x v0.0.0-20180616224451-1954e6464174
github.com/imdario/mergo v0.3.16
github.com/jeremywohl/flatten/v2 v2.0.0-20211013061545-07e4a09fb8e4
github.com/kubevela/pkg v1.9.3-0.20251015050342-14cd204ff6fc
github.com/kubevela/pkg v1.9.3-0.20250625225831-a2894a62a307
github.com/kubevela/workflow v0.6.3-0.20250717221743-56b80cee4121
github.com/kyokomi/emoji v2.2.4+incompatible
github.com/magiconair/properties v1.8.7
@@ -59,23 +59,23 @@ require (
github.com/prometheus/client_model v0.6.1
github.com/rivo/tview v0.0.0-20221128165837-db36428c92d9
github.com/sirupsen/logrus v1.9.3
github.com/spf13/cobra v1.9.1
github.com/spf13/pflag v1.0.7
github.com/spf13/cobra v1.8.1
github.com/spf13/pflag v1.0.5
github.com/stretchr/testify v1.10.0
github.com/tidwall/gjson v1.14.4
github.com/wercker/stern v0.0.0-20190705090245-4fa46dd6987f
github.com/xlab/treeprint v1.2.0
gitlab.com/gitlab-org/api/client-go v0.127.0
go.uber.org/multierr v1.11.0
go.yaml.in/yaml/v3 v3.0.4
golang.org/x/crypto v0.40.0
golang.org/x/mod v0.26.0
golang.org/x/oauth2 v0.30.0
golang.org/x/sync v0.16.0
golang.org/x/term v0.33.0
golang.org/x/text v0.27.0
golang.org/x/tools v0.35.0
golang.org/x/crypto v0.37.0
golang.org/x/mod v0.24.0
golang.org/x/oauth2 v0.29.0
golang.org/x/sync v0.13.0
golang.org/x/term v0.31.0
golang.org/x/text v0.24.0
golang.org/x/tools v0.31.0
gomodules.xyz/jsonpatch/v2 v2.4.0
gopkg.in/yaml.v3 v3.0.1
helm.sh/helm/v3 v3.14.4
k8s.io/api v0.31.10
k8s.io/apiextensions-apiserver v0.31.10
@@ -99,7 +99,7 @@ require (
)
require (
cuelabs.dev/go/oci/ociregistry v0.0.0-20250715075730-49cab49c8e9d // indirect
cuelabs.dev/go/oci/ociregistry v0.0.0-20240404174027-a39bec0462d2 // indirect
dario.cat/mergo v1.0.0 // indirect
github.com/AdaLogics/go-fuzz-headers v0.0.0-20230811130428-ced1acdcaa24 // indirect
github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161 // indirect
@@ -128,13 +128,13 @@ require (
github.com/chai2010/gettext-go v1.0.2 // indirect
github.com/cloudflare/circl v1.6.1 // indirect
github.com/cockroachdb/apd/v3 v3.2.1 // indirect
github.com/containerd/containerd v1.7.29 // indirect
github.com/containerd/containerd v1.7.27 // indirect
github.com/containerd/errdefs v0.3.0 // indirect
github.com/containerd/platforms v0.2.1 // indirect
github.com/containerd/stargz-snapshotter/estargz v0.15.1 // indirect
github.com/coreos/go-semver v0.3.1 // indirect
github.com/coreos/go-systemd/v22 v22.5.0 // indirect
github.com/cpuguy83/go-md2man/v2 v2.0.6 // indirect
github.com/cpuguy83/go-md2man/v2 v2.0.4 // indirect
github.com/creack/pty v1.1.18 // indirect
github.com/cyphar/filepath-securejoin v0.4.1 // indirect
github.com/distribution/reference v0.6.0 // indirect
@@ -145,7 +145,7 @@ require (
github.com/docker/go-connections v0.5.0 // indirect
github.com/docker/go-metrics v0.0.1 // indirect
github.com/emicklei/go-restful/v3 v3.12.0 // indirect
github.com/emicklei/proto v1.14.2 // indirect
github.com/emicklei/proto v1.10.0 // indirect
github.com/emirpasic/gods v1.18.1 // indirect
github.com/evanphx/json-patch v5.7.0+incompatible // indirect
github.com/evanphx/json-patch/v5 v5.9.0 // indirect
@@ -232,16 +232,15 @@ require (
github.com/oasdiff/yaml v0.0.0-20250309154309-f31be36b4037 // indirect
github.com/oasdiff/yaml3 v0.0.0-20250309153720-d2182401db90 // indirect
github.com/opencontainers/go-digest v1.0.0 // indirect
github.com/opencontainers/image-spec v1.1.1 // indirect
github.com/opencontainers/image-spec v1.1.0 // indirect
github.com/openshift/library-go v0.0.0-20230327085348-8477ec72b725 // indirect
github.com/pelletier/go-toml/v2 v2.2.4 // indirect
github.com/perimeterx/marshmallow v1.1.5 // indirect
github.com/peterbourgon/diskv v2.0.1+incompatible // indirect
github.com/pjbgf/sha1cd v0.3.2 // indirect
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect
github.com/prometheus/common v0.55.0 // indirect
github.com/prometheus/procfs v0.15.1 // indirect
github.com/protocolbuffers/txtpbfmt v0.0.0-20250627152318-f293424e46b5 // indirect
github.com/protocolbuffers/txtpbfmt v0.0.0-20230328191034-3462fbc510c0 // indirect
github.com/rivo/uniseg v0.4.3 // indirect
github.com/robfig/cron/v3 v3.0.1 // indirect
github.com/rogpeppe/go-internal v1.14.1 // indirect
@@ -277,10 +276,9 @@ require (
go.uber.org/automaxprocs v1.5.3 // indirect
go.uber.org/zap v1.26.0 // indirect
golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56 // indirect
golang.org/x/net v0.42.0 // indirect
golang.org/x/sys v0.34.0 // indirect
golang.org/x/time v0.12.0 // indirect
golang.org/x/tools/go/expect v0.1.1-deprecated // indirect
golang.org/x/net v0.39.0 // indirect
golang.org/x/sys v0.32.0 // indirect
golang.org/x/time v0.10.0 // indirect
google.golang.org/genproto v0.0.0-20240227224415-6ceb2ff114de // indirect
google.golang.org/genproto/googleapis/api v0.0.0-20240814211410-ddb44dafa142 // indirect
google.golang.org/genproto/googleapis/rpc v0.0.0-20240814211410-ddb44dafa142 // indirect
@@ -294,7 +292,6 @@ require (
gopkg.in/natefinch/lumberjack.v2 v2.2.1 // indirect
gopkg.in/warnings.v0 v0.1.2 // indirect
gopkg.in/yaml.v2 v2.4.0 // indirect
gopkg.in/yaml.v3 v3.0.1 // indirect
k8s.io/klog v1.0.0 // indirect
k8s.io/kms v0.31.10 // indirect
k8s.io/kube-openapi v0.0.0-20250610211856-8b98d1ed966a // indirect

88
go.sum
View File

@@ -1,9 +1,9 @@
cloud.google.com/go v0.100.2/go.mod h1:4Xra9TjzAeYHrl5+oeLlzbM2k3mjVhZh4UqTZ//w99A=
cloud.google.com/go/compute v0.1.0/go.mod h1:GAesmwr110a34z04OlxYkATPBEfVhkymfTBXtfbBFow=
cuelabs.dev/go/oci/ociregistry v0.0.0-20250715075730-49cab49c8e9d h1:lX0EawyoAu4kgMJJfy7MmNkIHioBcdBGFRSKDZ+CWo0=
cuelabs.dev/go/oci/ociregistry v0.0.0-20250715075730-49cab49c8e9d/go.mod h1:4WWeZNxUO1vRoZWAHIG0KZOd6dA25ypyWuwD3ti0Tdc=
cuelang.org/go v0.14.1 h1:kxFAHr7bvrCikbtVps2chPIARazVdnRmlz65dAzKyWg=
cuelang.org/go v0.14.1/go.mod h1:aSP9UZUM5m2izHAHUvqtq0wTlWn5oLjuv2iBMQZBLLs=
cuelabs.dev/go/oci/ociregistry v0.0.0-20240404174027-a39bec0462d2 h1:BnG6pr9TTr6CYlrJznYUDj6V7xldD1W+1iXPum0wT/w=
cuelabs.dev/go/oci/ociregistry v0.0.0-20240404174027-a39bec0462d2/go.mod h1:pK23AUVXuNzzTpfMCA06sxZGeVQ/75FdVtW249de9Uo=
cuelang.org/go v0.9.2 h1:pfNiry2PdRBr02G/aKm5k2vhzmqbAOoaB4WurmEbWvs=
cuelang.org/go v0.9.2/go.mod h1:qpAYsLOf7gTM1YdEg6cxh553uZ4q9ZDWlPbtZr9q1Wk=
dario.cat/mergo v1.0.0 h1:AGCNq9Evsj31mOgNPcLyXc+4PNABt905YmuqPYYpBWk=
dario.cat/mergo v1.0.0/go.mod h1:uNxQE+84aUszobStD9th8a29P2fMDhsBdgRYvZOxGmk=
filippo.io/edwards25519 v1.1.0 h1:FNf4tywRC1HmFuKW5xopWpigGjJKiJSV0Cqo0cJWDaA=
@@ -127,8 +127,8 @@ github.com/cockroachdb/apd/v3 v3.2.1/go.mod h1:klXJcjp+FffLTHlhIG69tezTDvdP065na
github.com/containerd/cgroups v1.1.0 h1:v8rEWFl6EoqHB+swVNjVoCJE8o3jX7e8nqBGPLaDFBM=
github.com/containerd/cgroups/v3 v3.0.2 h1:f5WFqIVSgo5IZmtTT3qVBo6TzI1ON6sycSBKkymb9L0=
github.com/containerd/cgroups/v3 v3.0.2/go.mod h1:JUgITrzdFqp42uI2ryGA+ge0ap/nxzYgkGmIcetmErE=
github.com/containerd/containerd v1.7.29 h1:90fWABQsaN9mJhGkoVnuzEY+o1XDPbg9BTC9QTAHnuE=
github.com/containerd/containerd v1.7.29/go.mod h1:azUkWcOvHrWvaiUjSQH0fjzuHIwSPg1WL5PshGP4Szs=
github.com/containerd/containerd v1.7.27 h1:yFyEyojddO3MIGVER2xJLWoCIn+Up4GaHFquP7hsFII=
github.com/containerd/containerd v1.7.27/go.mod h1:xZmPnl75Vc+BLGt4MIfu6bp+fy03gdHAn9bz+FreFR0=
github.com/containerd/continuity v0.4.4 h1:/fNVfTJ7wIl/YPMHjf+5H32uFhl63JucB34PlCpMKII=
github.com/containerd/continuity v0.4.4/go.mod h1:/lNJvtJKUQStBzpVQ1+rasXO1LAWtUQssk28EZvJ3nE=
github.com/containerd/errdefs v0.3.0 h1:FSZgGOeK4yuT/+DnF07/Olde/q4KBoMsaamhXxIMDp4=
@@ -144,8 +144,8 @@ github.com/coreos/go-semver v0.3.1/go.mod h1:irMmmIw/7yzSRPWryHsK7EYSg09caPQL03V
github.com/coreos/go-systemd/v22 v22.5.0 h1:RrqgGjYQKalulkV8NGVIfkXQf6YYmOyiJKk8iXXhfZs=
github.com/coreos/go-systemd/v22 v22.5.0/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc=
github.com/cpuguy83/go-md2man/v2 v2.0.1/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
github.com/cpuguy83/go-md2man/v2 v2.0.6 h1:XJtiaUW6dEEqVuZiMTn1ldk455QWwEIsMIJlo5vtkx0=
github.com/cpuguy83/go-md2man/v2 v2.0.6/go.mod h1:oOW0eioCTA6cOiMLiUPZOpcVxMig6NIQQ7OS05n1F4g=
github.com/cpuguy83/go-md2man/v2 v2.0.4 h1:wfIWP927BUkWJb2NmU/kNDYIBTh/ziUX91+lVfRxZq4=
github.com/cpuguy83/go-md2man/v2 v2.0.4/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY=
github.com/creack/pty v1.1.18 h1:n56/Zwd5o6whRC5PMGretI4IdRLlmBXYNjScPaBgsbY=
github.com/creack/pty v1.1.18/go.mod h1:MOBLtS5ELjhRRrroQr9kyvTxUAFNvYEK993ew/Vr4O4=
@@ -185,8 +185,8 @@ github.com/elazarl/goproxy v1.7.2 h1:Y2o6urb7Eule09PjlhQRGNsqRfPmYI3KKQLFpCAV3+o
github.com/elazarl/goproxy v1.7.2/go.mod h1:82vkLNir0ALaW14Rc399OTTjyNREgmdL2cVoIbS6XaE=
github.com/emicklei/go-restful/v3 v3.12.0 h1:y2DdzBAURM29NFF94q6RaY4vjIH1rtwDapwQtU84iWk=
github.com/emicklei/go-restful/v3 v3.12.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc=
github.com/emicklei/proto v1.14.2 h1:wJPxPy2Xifja9cEMrcA/g08art5+7CGJNFNk35iXC1I=
github.com/emicklei/proto v1.14.2/go.mod h1:rn1FgRS/FANiZdD2djyH7TMA9jdRDcYQ9IEN9yvjX0A=
github.com/emicklei/proto v1.10.0 h1:pDGyFRVV5RvV+nkBK9iy3q67FBy9Xa7vwrOTE+g5aGw=
github.com/emicklei/proto v1.10.0/go.mod h1:rn1FgRS/FANiZdD2djyH7TMA9jdRDcYQ9IEN9yvjX0A=
github.com/emirpasic/gods v1.18.1 h1:FXtiHYKDGKCW2KzwZKx0iC0PQmdlorYgdFG9jPXJ1Bc=
github.com/emirpasic/gods v1.18.1/go.mod h1:8tpGGwCnJ5H4r6BWwaV6OrWmMoPhUl5jm/FMNAnJvWQ=
github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
@@ -476,8 +476,8 @@ github.com/kr/pty v1.1.8/go.mod h1:O1sed60cT9XZ5uDucP5qwvh+TE3NnUj51EiZO/lmSfw=
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
github.com/kubevela/pkg v1.9.3-0.20251015050342-14cd204ff6fc h1:nuXTUQRQDJORMopbRD1fV4iwKT43MWgvMERM9YnrSPk=
github.com/kubevela/pkg v1.9.3-0.20251015050342-14cd204ff6fc/go.mod h1:EmM4VIyU7KxDmPBq9hG4GpSZbGwiM76/W/8paLBk8wY=
github.com/kubevela/pkg v1.9.3-0.20250625225831-a2894a62a307 h1:6vebFO0h5vU/0gSol3l/9KlgZeuZzYhl3/DlDr0jI6E=
github.com/kubevela/pkg v1.9.3-0.20250625225831-a2894a62a307/go.mod h1:P1yK32LmSs+NRjGu3Wu45VeCeKgIXiRg4qItN1MbgA8=
github.com/kubevela/workflow v0.6.3-0.20250717221743-56b80cee4121 h1:clU2P7FyrhLm1l/xviiLO1Cen00ZI01oOfPxAOoMi0w=
github.com/kubevela/workflow v0.6.3-0.20250717221743-56b80cee4121/go.mod h1:79KSLzfgBnJboWgxy5P/1GCc2ZUOLEYlF+vS4xQ3FNo=
github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc=
@@ -596,8 +596,8 @@ github.com/onsi/gomega v1.36.2 h1:koNYke6TVk6ZmnyHrCXba/T/MoLBXFjeC1PtvYgw0A8=
github.com/onsi/gomega v1.36.2/go.mod h1:DdwyADRjrc825LhMEkD76cHR5+pUnjhUN8GlHlRPHzY=
github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U=
github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM=
github.com/opencontainers/image-spec v1.1.1 h1:y0fUlFfIZhPF1W537XOLg0/fcx6zcHCJwooC2xJA040=
github.com/opencontainers/image-spec v1.1.1/go.mod h1:qpqAh3Dmcf36wStyyWU+kCeDgrGnAve2nCC8+7h8Q0M=
github.com/opencontainers/image-spec v1.1.0 h1:8SG7/vwALn54lVB/0yZ/MMwhFrPYtpEHQb2IpWsCzug=
github.com/opencontainers/image-spec v1.1.0/go.mod h1:W4s4sFTMaBeK1BQLXbG4AdM2szdn85PY75RI83NrTrM=
github.com/openkruise/kruise-api v1.4.0 h1:MDDXQIYvaCh0ioIJSRniF4kCKby9JI3/ec6pZHHw/Ao=
github.com/openkruise/kruise-api v1.4.0/go.mod h1:HyRlDV0MfW5Zm+3g36bx7u4CcWHcKBxL8g/c/2bjcd4=
github.com/openkruise/rollouts v0.3.0 h1:T02r9BxHJ02MRkbc7C4F12qMGgrziZVjgmukwz6k60s=
@@ -607,8 +607,6 @@ github.com/openshift/library-go v0.0.0-20230327085348-8477ec72b725/go.mod h1:Osp
github.com/pelletier/go-toml v1.9.4/go.mod h1:u1nR/EPcESfeI/szUZKdtJ0xRNbUoANCkoOuaOx1Y+c=
github.com/pelletier/go-toml v1.9.5 h1:4yBQzkHv+7BHq2PQUZF3Mx0IYxG7LsP222s7Agd3ve8=
github.com/pelletier/go-toml v1.9.5/go.mod h1:u1nR/EPcESfeI/szUZKdtJ0xRNbUoANCkoOuaOx1Y+c=
github.com/pelletier/go-toml/v2 v2.2.4 h1:mye9XuhQ6gvn5h28+VilKrrPoQVanw5PMw/TB0t5Ec4=
github.com/pelletier/go-toml/v2 v2.2.4/go.mod h1:2gIqNv+qfxSVS7cM2xJQKtLSTLUE9V8t9Stt+h56mCY=
github.com/perimeterx/marshmallow v1.1.5 h1:a2LALqQ1BlHM8PZblsDdidgv1mWi1DgC2UmX50IvK2s=
github.com/perimeterx/marshmallow v1.1.5/go.mod h1:dsXbUu8CRzfYP5a87xpp0xq9S3u0Vchtcl8we9tYaXw=
github.com/peterbourgon/diskv v2.0.1+incompatible h1:UBdAOUP5p4RWqPBg048CAvpKN+vxiaj6gdUUzhl4XmI=
@@ -657,8 +655,8 @@ github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1
github.com/prometheus/procfs v0.7.3/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA=
github.com/prometheus/procfs v0.15.1 h1:YagwOFzUgYfKKHX6Dr+sHT7km/hxC76UB0learggepc=
github.com/prometheus/procfs v0.15.1/go.mod h1:fB45yRUv8NstnjriLhBQLuOUt+WW4BsoGhij/e3PBqk=
github.com/protocolbuffers/txtpbfmt v0.0.0-20250627152318-f293424e46b5 h1:WWs1ZFnGobK5ZXNu+N9If+8PDNVB9xAqrib/stUXsV4=
github.com/protocolbuffers/txtpbfmt v0.0.0-20250627152318-f293424e46b5/go.mod h1:BnHogPTyzYAReeQLZrOxyxzS739DaTNtTvohVdbENmA=
github.com/protocolbuffers/txtpbfmt v0.0.0-20230328191034-3462fbc510c0 h1:sadMIsgmHpEOGbUs6VtHBXRR1OHevnj7hLx9ZcdNGW4=
github.com/protocolbuffers/txtpbfmt v0.0.0-20230328191034-3462fbc510c0/go.mod h1:jgxiZysxFPM+iWKwQwPR+y+Jvo54ARd4EisXxKYpB5c=
github.com/rivo/tview v0.0.0-20221128165837-db36428c92d9 h1:ccTgRxA37ypj3q8zB8G4k3xGPfBbIaMwrf3Yw6k50NY=
github.com/rivo/tview v0.0.0-20221128165837-db36428c92d9/go.mod h1:YX2wUZOcJGOIycErz2s9KvDaP0jnWwRCirQMPLPpQ+Y=
github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc=
@@ -693,12 +691,10 @@ github.com/spf13/cast v1.3.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkU
github.com/spf13/cast v1.5.0 h1:rj3WzYc11XZaIZMPKmwP96zkFEnnAmV8s6XbB2aY32w=
github.com/spf13/cast v1.5.0/go.mod h1:SpXXQ5YoyJw6s3/6cMTQuxvgRl3PCJiyaX9p6b155UU=
github.com/spf13/cobra v1.4.0/go.mod h1:Wo4iy3BUC+X2Fybo0PDqwJIv3dNRiZLHQymsfxlB84g=
github.com/spf13/cobra v1.9.1 h1:CXSaggrXdbHK9CF+8ywj8Amf7PBRmPCOJugH954Nnlo=
github.com/spf13/cobra v1.9.1/go.mod h1:nDyEzZ8ogv936Cinf6g1RU9MRY64Ir93oCnqb9wxYW0=
github.com/spf13/cobra v1.8.1 h1:e5/vxKd/rZsfSJMUX1agtjeTDf+qv1/JdBF8gg5k9ZM=
github.com/spf13/cobra v1.8.1/go.mod h1:wHxEcudfqmLYa8iTfL+OuZPbBZkmvliBWKIezN3kD9Y=
github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA=
github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
github.com/spf13/pflag v1.0.6/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
github.com/spf13/pflag v1.0.7 h1:vN6T9TfwStFPFM5XzjsvmzZkLuaLX+HS+0SeFLRgU6M=
github.com/spf13/pflag v1.0.7/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
github.com/stoewer/go-strcase v1.2.0 h1:Z2iHWqGXH00XYgqDmNgQbIBxf3wrNq0F3feEy0ainaU=
github.com/stoewer/go-strcase v1.2.0/go.mod h1:IBiWB2sKIp3wVVQ3Y035++gc+knqhUQag1KpM8ahLw8=
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
@@ -812,8 +808,6 @@ go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN8
go.uber.org/zap v1.21.0/go.mod h1:wjWOCqI0f2ZZrJF/UufIOkiC8ii6tm1iqIsLo76RfJw=
go.uber.org/zap v1.26.0 h1:sI7k6L95XOKS281NhVKOFCUNIvv9e0w4BF8N3u+tCRo=
go.uber.org/zap v1.26.0/go.mod h1:dtElttAiwGvoJ/vj4IwHBS/gXsEu/pZ50mUIRWuG0so=
go.yaml.in/yaml/v3 v3.0.4 h1:tfq32ie2Jv2UxXFdLJdh3jXuOzWiL1fo0bu/FbuKpbc=
go.yaml.in/yaml/v3 v3.0.4/go.mod h1:DhzuOOF2ATzADvBadXxruRBLzYTpT36CKvDb3+aBEFg=
golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
golang.org/x/crypto v0.0.0-20190530122614-20be4c3c3ed5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
@@ -823,8 +817,8 @@ golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5y
golang.org/x/crypto v0.0.0-20220622213112-05595931fe9d/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
golang.org/x/crypto v0.3.0/go.mod h1:hebNnKkNXi2UzZN1eVRvBB7co0a+JxK6XbPiWVs/3J4=
golang.org/x/crypto v0.13.0/go.mod h1:y6Z2r+Rw4iayiXXAIxJIDAJ1zMW4yaTpebo8fPOliYc=
golang.org/x/crypto v0.40.0 h1:r4x+VvoG5Fm+eJcxMaY8CQM7Lb0l1lsmjGBQ6s8BfKM=
golang.org/x/crypto v0.40.0/go.mod h1:Qr1vMER5WyS2dfPHAlsOj01wgLbsyWtFn/aY+5+ZdxY=
golang.org/x/crypto v0.37.0 h1:kJNSjF/Xp7kU0iB2Z+9viTPMW4EqqsrywMXLJOOsXSE=
golang.org/x/crypto v0.37.0/go.mod h1:vg+k43peMZ0pUMhYmVAWysMK35e6ioLh3wB8ZCAfbVc=
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56 h1:2dVuKD2vS7b0QIHQbpyTISPd0LeHDbnYEryqj5Q1ug8=
golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56/go.mod h1:M4RDyNAINzryxdtnbRXRL/OHtkFuWGRjvuhBJpk2IlY=
@@ -839,8 +833,8 @@ golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4=
golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
golang.org/x/mod v0.26.0 h1:EGMPT//Ezu+ylkCijjPc+f4Aih7sZvaAr+O3EHBxvZg=
golang.org/x/mod v0.26.0/go.mod h1:/j6NAhSk8iQ723BGAUyoAcn7SlD7s15Dp9Nd/SfeaFQ=
golang.org/x/mod v0.24.0 h1:ZfthKaKaT4NrhGVZHO1/WDTwGES4De8KtWO0SIbNJMU=
golang.org/x/mod v0.24.0/go.mod h1:IXM97Txy2VM4PJ3gI61r1YEk/gAj6zAHN3AdZt6S9Ww=
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
@@ -867,8 +861,8 @@ golang.org/x/net v0.2.0/go.mod h1:KqCZLdyyvdV855qA2rE3GC2aiw5xGR5TEjj8smXukLY=
golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs=
golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg=
golang.org/x/net v0.15.0/go.mod h1:idbUs1IY1+zTqbi8yxTbhexhEEk5ur9LInksu6HrEpk=
golang.org/x/net v0.42.0 h1:jzkYrhi3YQWD6MLBJcsklgQsoAcw89EcZbJw8Z614hs=
golang.org/x/net v0.42.0/go.mod h1:FF1RA5d3u7nAYA4z2TkclSCKh68eSXtiFwcWQpPXdt8=
golang.org/x/net v0.39.0 h1:ZCu7HMWDxpXpaiKdhzIfaltL9Lp31x/3fCP11bc6/fY=
golang.org/x/net v0.39.0/go.mod h1:X7NRbYVEA+ewNkCNyJ513WmMdQ3BineSwVtN2zD/d+E=
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
@@ -876,8 +870,8 @@ golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ
golang.org/x/oauth2 v0.0.0-20210805134026-6f1e6394065a/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
golang.org/x/oauth2 v0.0.0-20210819190943-2bc19b11175f/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
golang.org/x/oauth2 v0.0.0-20211104180415-d3ed0bb246c8/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
golang.org/x/oauth2 v0.30.0 h1:dnDm7JmhM45NNpd8FDDeLhK6FwqbOf4MLCM9zb1BOHI=
golang.org/x/oauth2 v0.30.0/go.mod h1:B++QgG3ZKulg6sRPGD/mqlHQs5rB3Ml9erfeDY7xKlU=
golang.org/x/oauth2 v0.29.0 h1:WdYw2tdTK1S8olAzWHdgeqfy+Mtm9XNhv/xJsY65d98=
golang.org/x/oauth2 v0.29.0/go.mod h1:onh5ek6nERTohokkhCD/y2cV4Do3fxFHFuAejCkRWT8=
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
@@ -888,8 +882,8 @@ golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJ
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.16.0 h1:ycBJEhp9p4vXvUZNszeOq0kGTPghopOL8q0fq3vstxw=
golang.org/x/sync v0.16.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA=
golang.org/x/sync v0.13.0 h1:AauUjRAJ9OSnvULf/ARrrVywoJDy0YS2AwQ98I37610=
golang.org/x/sync v0.13.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA=
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
@@ -928,16 +922,16 @@ golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.12.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.34.0 h1:H5Y5sJ2L2JRdyv7ROF1he/lPdvFsd0mJHFw2ThKHxLA=
golang.org/x/sys v0.34.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k=
golang.org/x/sys v0.32.0 h1:s77OFDvIQeibCmezSnk/q6iAfkdiQaJi4VzroCFrN20=
golang.org/x/sys v0.32.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
golang.org/x/term v0.2.0/go.mod h1:TVmDHMZPmdnySmBfhjOoOdhjzdE1h4u1VwSiw2l1Nuc=
golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k=
golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo=
golang.org/x/term v0.12.0/go.mod h1:owVbMEjm3cBLCHdkQu9b1opXd4ETQWc3BhuQGKgXgvU=
golang.org/x/term v0.33.0 h1:NuFncQrRcaRvVmgRkvM3j/F00gWIAlcmlB8ACEKmGIg=
golang.org/x/term v0.33.0/go.mod h1:s18+ql9tYWp1IfpV9DmCtQDDSRBUjKaw9M1eAv5UeF0=
golang.org/x/term v0.31.0 h1:erwDkOK1Msy6offm1mOgvspSkslFnIGsFnxOKoufg3o=
golang.org/x/term v0.31.0/go.mod h1:R4BeIy7D95HzImkxGkTW1UQTtP54tio2RyHz7PwK0aw=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
@@ -948,11 +942,11 @@ golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8=
golang.org/x/text v0.13.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE=
golang.org/x/text v0.27.0 h1:4fGWRpyh641NLlecmyl4LOe6yDdfaYNrGb2zdfo4JV4=
golang.org/x/text v0.27.0/go.mod h1:1D28KMCvyooCX9hBiosv5Tz/+YLxj0j7XhWjpSUF7CU=
golang.org/x/text v0.24.0 h1:dd5Bzh4yt5KYA8f9CJHCP4FB4D51c2c6JvN37xJJkJ0=
golang.org/x/text v0.24.0/go.mod h1:L8rBsPeo2pSS+xqN0d5u2ikmjtmoJbDBT1b7nHvFCdU=
golang.org/x/time v0.0.0-20220722155302-e5dcc9cfc0b9/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.12.0 h1:ScB/8o8olJvc+CQPWrK3fPZNfh7qgwCrY0zJmoEQLSE=
golang.org/x/time v0.12.0/go.mod h1:CDIdPxbZBQxdj6cxyCIdrNogrJKMJ7pr37NYpMcMDSg=
golang.org/x/time v0.10.0 h1:3usCWA8tQn0L8+hFJQNgzpWbd89begxN66o1Ojdn5L4=
golang.org/x/time v0.10.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM=
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY=
@@ -966,12 +960,8 @@ golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc=
golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU=
golang.org/x/tools v0.35.0 h1:mBffYraMEf7aa0sB+NuKnuCy8qI/9Bughn8dC2Gu5r0=
golang.org/x/tools v0.35.0/go.mod h1:NKdj5HkL/73byiZSJjqJgKn3ep7KjFkBOkR/Hps3VPw=
golang.org/x/tools/go/expect v0.1.1-deprecated h1:jpBZDwmgPhXsKZC6WhL20P4b/wmnpsEAGHaNy0n/rJM=
golang.org/x/tools/go/expect v0.1.1-deprecated/go.mod h1:eihoPOH+FgIqa3FpoTwguz/bVUSGBlGQU67vpBeOrBY=
golang.org/x/tools/go/packages/packagestest v0.1.1-deprecated h1:1h2MnaIAIXISqTFKdENegdpAgUXz6NrPEsbIeWaBRvM=
golang.org/x/tools/go/packages/packagestest v0.1.1-deprecated/go.mod h1:RVAQXBGNv1ib0J382/DPCRS/BPnsGebyM1Gj5VSDpG8=
golang.org/x/tools v0.31.0 h1:0EedkvKDbh+qistFTd0Bcwe/YLh4vHwWEkiI0toFIBU=
golang.org/x/tools v0.31.0/go.mod h1:naFTU+Cev749tSJRXJlna0T3WxKvb1kWEx15xA4SdmQ=
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=

View File

@@ -199,6 +199,18 @@ webhooks:
admissionReviewVersions: ["v1", "v1beta1"]
sideEffects: None
failurePolicy: Fail
- name: applications.core.oam.dev
clientConfig:
url: https://${HOST_IP}:9445/validating-core-oam-dev-v1beta1-applications
caBundle: ${CA_BUNDLE}
rules:
- apiGroups: ["core.oam.dev"]
apiVersions: ["v1beta1"]
resources: ["applications"]
operations: ["CREATE", "UPDATE"]
admissionReviewVersions: ["v1", "v1beta1"]
sideEffects: None
failurePolicy: Fail
EOF
kubectl apply -f /tmp/webhook-config.yaml

View File

@@ -153,8 +153,9 @@ func main() {
lines[idx] = ""
continue
}
// Don't remove .md extensions - Docusaurus handles them properly
// and keeping them provides better IDE support and compatibility
if strings.Contains(line, ".md") && strings.Contains(line, "](") {
lines[idx] = strings.Replace(line, ".md", "", -1)
}
}
newlines = append(newlines, lines...)

View File

@@ -42,12 +42,12 @@ var ComponentDefDirs = []string{"./vela-templates/definitions/internal/component
// CustomComponentHeaderEN .
var CustomComponentHeaderEN = `---
title: Built-in Components Type
title: Built-in ParsedComponents Type
---
This documentation will walk through all the built-in component types sorted alphabetically.
` + fmt.Sprintf("> It was generated automatically by [scripts](../../contributor/cli-ref-doc.md), please don't update manually, last updated at %s.\n\n", time.Now().Format(time.RFC3339))
` + fmt.Sprintf("> It was generated automatically by [scripts](../../contributor/cli-ref-doc), please don't update manually, last updated at %s.\n\n", time.Now().Format(time.RFC3339))
// CustomComponentHeaderZH .
var CustomComponentHeaderZH = `---
@@ -56,7 +56,7 @@ title: 内置组件列表
本文档将**按字典序**展示所有内置组件的参数列表。
` + fmt.Sprintf("> 本文档由[脚本](../../contributor/cli-ref-doc.md)自动生成,请勿手动修改,上次更新于 %s。\n\n", time.Now().Format(time.RFC3339))
` + fmt.Sprintf("> 本文档由[脚本](../../contributor/cli-ref-doc)自动生成,请勿手动修改,上次更新于 %s。\n\n", time.Now().Format(time.RFC3339))
// ComponentDef generate component def reference doc
func ComponentDef(ctx context.Context, c common.Args, opt Options) {

View File

@@ -47,7 +47,7 @@ title: Built-in Policy Type
This documentation will walk through all the built-in policy types sorted alphabetically.
` + fmt.Sprintf("> It was generated automatically by [scripts](../../contributor/cli-ref-doc.md), please don't update manually, last updated at %s.\n\n", time.Now().Format(time.RFC3339))
` + fmt.Sprintf("> It was generated automatically by [scripts](../../contributor/cli-ref-doc), please don't update manually, last updated at %s.\n\n", time.Now().Format(time.RFC3339))
// CustomPolicyHeaderZH .
var CustomPolicyHeaderZH = `---
@@ -56,7 +56,7 @@ title: 内置策略列表
本文档将**按字典序**展示所有内置策略的参数列表。
` + fmt.Sprintf("> 本文档由[脚本](../../contributor/cli-ref-doc.md)自动生成,请勿手动修改,上次更新于 %s。\n\n", time.Now().Format(time.RFC3339))
` + fmt.Sprintf("> 本文档由[脚本](../../contributor/cli-ref-doc)自动生成,请勿手动修改,上次更新于 %s。\n\n", time.Now().Format(time.RFC3339))
// PolicyDef generate policy def reference doc
func PolicyDef(ctx context.Context, c common.Args, opt Options) {

View File

@@ -47,7 +47,7 @@ title: Built-in Trait Type
This documentation will walk through all the built-in trait types sorted alphabetically.
` + fmt.Sprintf("> It was generated automatically by [scripts](../../contributor/cli-ref-doc.md), please don't update manually, last updated at %s.\n\n", time.Now().Format(time.RFC3339))
` + fmt.Sprintf("> It was generated automatically by [scripts](../../contributor/cli-ref-doc), please don't update manually, last updated at %s.\n\n", time.Now().Format(time.RFC3339))
// CustomTraitHeaderZH .
var CustomTraitHeaderZH = `---
@@ -56,7 +56,7 @@ title: 内置运维特征列表
本文档将**按字典序**展示所有内置运维特征的参数列表。
` + fmt.Sprintf("> 本文档由[脚本](../../contributor/cli-ref-doc.md)自动生成,请勿手动修改,上次更新于 %s。\n\n", time.Now().Format(time.RFC3339))
` + fmt.Sprintf("> 本文档由[脚本](../../contributor/cli-ref-doc)自动生成,请勿手动修改,上次更新于 %s。\n\n", time.Now().Format(time.RFC3339))
// TraitDef generate trait def reference doc
func TraitDef(ctx context.Context, c common.Args, opt Options) {

View File

@@ -47,7 +47,7 @@ title: Built-in WorkflowStep Type
This documentation will walk through all the built-in workflow step types sorted alphabetically.
` + fmt.Sprintf("> It was generated automatically by [scripts](../../contributor/cli-ref-doc.md), please don't update manually, last updated at %s.\n\n", time.Now().Format(time.RFC3339))
` + fmt.Sprintf("> It was generated automatically by [scripts](../../contributor/cli-ref-doc), please don't update manually, last updated at %s.\n\n", time.Now().Format(time.RFC3339))
// CustomWorkflowHeaderZH .
var CustomWorkflowHeaderZH = `---
@@ -56,7 +56,7 @@ title: 内置工作流步骤列表
本文档将**按字典序**展示所有内置工作流步骤的参数列表。
` + fmt.Sprintf("> 本文档由[脚本](../../contributor/cli-ref-doc.md)自动生成,请勿手动修改,上次更新于 %s。\n\n", time.Now().Format(time.RFC3339))
` + fmt.Sprintf("> 本文档由[脚本](../../contributor/cli-ref-doc)自动生成,请勿手动修改,上次更新于 %s。\n\n", time.Now().Format(time.RFC3339))
// WorkflowDef generate workflow def reference doc
func WorkflowDef(ctx context.Context, c common.Args, opt Options) {

View File

@@ -1,36 +1,36 @@
#!/usr/bin/env bash
set -euo pipefail
#! /bin/bash
DEF_PATH="charts/vela-core/templates/defwithtemplate"
function install_defs() {
local def_path="$1"
if [[ ! -d "$def_path" ]]; then
echo "Skip: path '$def_path' not found"
return 0
function check_install() {
res=`kubectl get namespace -A | grep vela-system`
if [ -n "$res" ];then
echo 'checking: vela-system namespace exist'
else
echo 'vela-system namespace do not exist'
echo 'creating vela-system namespace ...'
kubectl create namespace vela-system
fi
echo "applying definitions ..."
cd "$DEF_PATH"
echo "Applying definitions in '$def_path' ..."
cd "$def_path"
for file in *.yaml ;
do
echo "Info: changing "$DEF_PATH"/""$file"
sed -i.bak "s#namespace: {{ include \"systemDefinitionNamespace\" . }}#namespace: vela-system#g" "$file"
kubectl apply -f "$file"
rm "$file"
mv "$file"".bak" "$file"
done
shopt -s nullglob
for file in *.yaml; do
echo "Info: processing $def_path/$file"
sed -i.bak 's#namespace: {{ include "systemDefinitionNamespace" . }}#namespace: vela-system#g' "$file"
kubectl apply -f "$file" || { mv "$file.bak" "$file"; return 1; }
mv "$file.bak" "$file" # restore original
done
shopt -u nullglob
cd - >/dev/null
cd -
}
# Ensure vela-system namespace
if kubectl get namespace vela-system >/dev/null 2>&1; then
echo "Namespace vela-system exists"
else
echo "Creating namespace vela-system"
kubectl create namespace vela-system
fi
check_install
install_defs "charts/vela-core/templates/defwithtemplate"
install_defs "charts/vela-core/templates/definitions"
install_defs "charts/vela-core/templates/velaql"
DEF_PATH="charts/vela-core/templates/definitions"
check_install
DEF_PATH="charts/vela-core/templates/velaql"
check_install

View File

@@ -47,7 +47,4 @@ VELA_CLI_IMAGE ?= oamdev/vela-cli:latest
VELA_CORE_TEST_IMAGE ?= vela-core-test:$(GIT_COMMIT)
VELA_APISERVER_IMAGE ?= apiserver:latest
RUNTIME_CLUSTER_CONFIG ?= /tmp/worker.client.kubeconfig
RUNTIME_CLUSTER_NAME ?= worker
COMMON_CRD_FILES = \
core.oam.dev_workflows.yaml
RUNTIME_CLUSTER_NAME ?= worker

View File

@@ -50,7 +50,7 @@ else
GOIMPORTS=$(shell which goimports)
endif
CUE_VERSION ?= v0.14.1
CUE_VERSION ?= v0.9.2
.PHONY: installcue
installcue:
ifeq (, $(shell which cue))
@@ -98,17 +98,3 @@ endif
envtest: $(ENVTEST) ## Download envtest-setup locally if necessary.
$(ENVTEST): $(LOCALBIN)
GOBIN=$(LOCALBIN) go install sigs.k8s.io/controller-runtime/tools/setup-envtest@latest
.PHONY: tidy
tidy:
go mod tidy
.PHONY: sync-crds
PKG_MODULE = github.com/kubevela/pkg # fetch common crds from the pkg repo instead of generating locally
sync-crds: ## Copy CRD from pinned module version in go.mod
@moddir=$$(go list -m -f '{{.Dir}}' $(PKG_MODULE) 2>/dev/null); \
mkdir -p config/crd/base; \
for file in $(COMMON_CRD_FILES); do \
src="$$moddir/crds/$$file"; \
cp -f "$$src" "config/crd/base/"; \
done

View File

@@ -10,8 +10,6 @@ e2e-setup-core-post-hook:
go run ./e2e/addon/mock &
bin/vela addon enable ./e2e/addon/mock/testdata/fluxcd
bin/vela addon enable ./e2e/addon/mock/testdata/terraform
# Wait for webhook service endpoints to be ready before enabling addons that require webhook validation
kubectl wait --for=condition=Ready pod -l app.kubernetes.io/name=vela-core -n vela-system --timeout=180s
bin/vela addon enable ./e2e/addon/mock/testdata/terraform-alibaba ALICLOUD_ACCESS_KEY=xxx ALICLOUD_SECRET_KEY=yyy ALICLOUD_REGION=cn-beijing
timeout 600s bash -c -- 'while true; do kubectl get ns flux-system; if [ $$? -eq 0 ] ; then break; else sleep 5; fi;done'
@@ -114,74 +112,6 @@ e2e-test-local:
ginkgo -v ./test/e2e-test
@$(OK) tests pass
# Run main_e2e_test.go with k3d cluster and embedded test binary
.PHONY: e2e-test-main-local
e2e-test-main-local:
@echo "==> Setting up k3d cluster for main_e2e_test..."
# Delete existing cluster if it exists and recreate
@k3d cluster delete kubevela-e2e-main 2>/dev/null || true
@k3d cluster create kubevela-e2e-main --servers 1 --agents 1
@echo "==> Building test binary with Dockerfile.e2e..."
# Detect architecture for proper binary naming
$(eval ARCH := $(shell uname -m | sed 's/x86_64/amd64/; s/aarch64\|arm64/arm64/'))
@echo " Detected architecture: $(ARCH)"
# Build test image with embedded e2e test
# Note: Use 'make e2e-test-main-rebuild' if you get "manager-${ARCH}: not found" errors
docker build -t vela-core:e2e-main-test -f Dockerfile.e2e . \
--no-cache \
--build-arg=TARGETARCH=$(ARCH) \
--build-arg=VERSION=e2e-main-test \
--build-arg=GITVERSION=test
# Load image into k3d cluster
k3d image import vela-core:e2e-main-test -c kubevela-e2e-main
@echo "==> Modifying Helm charts to enable e2e test..."
# Backup original chart
@cp ./charts/vela-core/templates/kubevela-controller.yaml ./charts/vela-core/templates/kubevela-controller.yaml.bak || true
# Modify charts to add test flags
sh ./hack/e2e/modify_charts.sh
@echo "==> Deploying vela-core with embedded test..."
# Clean up any existing webhook configs
kubectl delete validatingwebhookconfiguration kubevela-vela-core-admission 2>/dev/null || true
# Deploy with test binary and flags
helm upgrade --install kubevela ./charts/vela-core \
--namespace vela-system --create-namespace \
--set image.repository=vela-core \
--set image.tag=e2e-main-test \
--set image.pullPolicy=IfNotPresent \
--set admissionWebhooks.enabled=false \
--set multicluster.enabled=false \
--set multicluster.clusterGateway.enabled=false \
--set featureGates.enableCueValidation=true \
--set featureGates.validateResourcesExist=true \
--set applicationRevisionLimit=5 \
--set controllerArgs.reSyncPeriod=1m \
--wait --timeout 3m
@echo "==> Waiting for test to complete..."
# Give the test time to run (it starts the server and runs tests)
@sleep 10
@echo "==> Checking test results from pod logs..."
# Get the pod name and check logs for test results
@kubectl logs -n vela-system -l app.kubernetes.io/name=vela-core --tail=100 | grep -E "PASS|FAIL|TestE2EMain" || true
@echo "==> Test coverage will be available at /workspace/data/e2e-profile.out in the pod"
# Optionally copy coverage data from pod
@POD=$$(kubectl get pod -n vela-system -l app.kubernetes.io/name=vela-core -o jsonpath='{.items[0].metadata.name}') && \
kubectl cp vela-system/$$POD:/workspace/data/e2e-profile.out ./e2e-main-coverage.out 2>/dev/null || \
echo "Coverage data not yet available or test still running"
# Restore original chart
@mv ./charts/vela-core/templates/kubevela-controller.yaml.bak ./charts/vela-core/templates/kubevela-controller.yaml 2>/dev/null || true
@echo "==> Done. Check pod logs for detailed test output:"
@echo " kubectl logs -n vela-system -l app.kubernetes.io/name=vela-core -f"
@$(OK) main_e2e_test setup complete
# Clean up k3d cluster used for main_e2e_test
.PHONY: e2e-test-main-clean
e2e-test-main-clean:
@echo "==> Cleaning up k3d cluster for main_e2e_test..."
k3d cluster delete kubevela-e2e-main || true
# Restore original chart if backup exists
@mv ./charts/vela-core/templates/kubevela-controller.yaml.bak ./charts/vela-core/templates/kubevela-controller.yaml 2>/dev/null || true
@echo "==> Cleanup complete"
.PHONY: e2e-addon-test
e2e-addon-test:
@@ -191,7 +121,7 @@ e2e-addon-test:
.PHONY: e2e-multicluster-test
e2e-multicluster-test:
cd ./test/e2e-multicluster-test && go test -timeout=30m -v -ginkgo.v -ginkgo.trace -coverpkg=./... -coverprofile=/tmp/e2e_multicluster_test.out
cd ./test/e2e-multicluster-test && go test -v -ginkgo.v -ginkgo.trace -coverpkg=./... -coverprofile=/tmp/e2e_multicluster_test.out
@$(OK) tests pass
.PHONY: e2e-cleanup

View File

@@ -26,7 +26,7 @@ import (
. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
"github.com/pkg/errors"
yaml3 "go.yaml.in/yaml/v3"
yaml3 "gopkg.in/yaml.v3"
appsv1 "k8s.io/api/apps/v1"
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"

View File

@@ -20,8 +20,6 @@ import (
"strings"
"testing"
"github.com/google/go-github/v32/github"
"github.com/pkg/errors"
"github.com/stretchr/testify/assert"
)
@@ -43,13 +41,3 @@ func TestGetAvailableVersion(t *testing.T) {
assert.NotEmpty(t, err)
assert.Equal(t, version, "")
}
func TestWrapErrRateLimit(t *testing.T) {
regularErr := errors.New("regular error")
wrappedErr := WrapErrRateLimit(regularErr)
assert.Equal(t, regularErr, wrappedErr)
rateLimitErr := &github.RateLimitError{}
wrappedErr = WrapErrRateLimit(rateLimitErr)
assert.Equal(t, ErrRateLimit, wrappedErr)
}

View File

@@ -0,0 +1,71 @@
/*
Copyright 2021 The KubeVela Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package addon
import (
"testing"
"github.com/stretchr/testify/assert"
"helm.sh/helm/v3/pkg/chart/loader"
)
var files = []*loader.BufferedFile{
{
Name: "metadata.yaml",
Data: []byte(`name: test-helm-addon
version: 1.0.0
description: This is a addon for test when install addon from helm repo
icon: https://www.terraform.io/assets/images/logo-text-8c3ba8a6.svg
url: https://terraform.io/
tags: []
deployTo:
control_plane: true
runtime_cluster: false
dependencies: []
invisible: false`),
},
{
Name: "/resources/parameter.cue",
Data: []byte(`parameter: {
// test wrong parameter
example: *"default"
}`),
},
}
func TestMemoryReader(t *testing.T) {
m := MemoryReader{
Name: "fluxcd",
Files: files,
}
meta, err := m.ListAddonMeta()
assert.NoError(t, err)
assert.Equal(t, len(meta["fluxcd"].Items), 2)
metaFile, err := m.ReadFile("metadata.yaml")
assert.NoError(t, err)
assert.NotEmpty(t, metaFile)
parameterData, err := m.ReadFile("/resources/parameter.cue")
assert.NoError(t, err)
assert.NotEmpty(t, parameterData)
}

View File

@@ -97,155 +97,3 @@ func TestGiteeReader(t *testing.T) {
assert.NoError(t, err)
}
func TestNewGiteeClient(t *testing.T) {
defaultURL, _ := url.Parse(DefaultGiteeURL)
testCases := map[string]struct {
httpClient *http.Client
baseURL *url.URL
wantClient *http.Client
wantURL *url.URL
}{
"Nil inputs": {
httpClient: nil,
baseURL: nil,
wantClient: &http.Client{},
wantURL: defaultURL,
},
"Custom inputs": {
httpClient: &http.Client{Timeout: 10},
baseURL: &url.URL{Host: "my-gitee.com"},
wantClient: &http.Client{Timeout: 10},
wantURL: &url.URL{Host: "my-gitee.com"},
},
}
for name, tc := range testCases {
t.Run(name, func(t *testing.T) {
client := NewGiteeClient(tc.httpClient, tc.baseURL)
assert.Equal(t, tc.wantClient.Timeout, client.Client.Timeout)
assert.Equal(t, tc.wantURL.Host, client.BaseURL.Host)
})
}
}
func TestGiteeReaderRelativePath(t *testing.T) {
testCases := map[string]struct {
basePath string
itemPath string
expectedPath string
}{
"No base path": {
basePath: "",
itemPath: "fluxcd/metadata.yaml",
expectedPath: "fluxcd/metadata.yaml",
},
"With base path": {
basePath: "addons",
itemPath: "addons/fluxcd/metadata.yaml",
expectedPath: "fluxcd/metadata.yaml",
},
"With deep base path": {
basePath: "official/addons",
itemPath: "official/addons/fluxcd/template.cue",
expectedPath: "fluxcd/template.cue",
},
"Item at root of base path": {
basePath: "addons",
itemPath: "addons/README.md",
expectedPath: "README.md",
},
}
for name, tc := range testCases {
t.Run(name, func(t *testing.T) {
gith := &giteeHelper{
Meta: &utils.Content{GiteeContent: utils.GiteeContent{
Path: tc.basePath,
}},
}
r := &giteeReader{h: gith}
item := &github.RepositoryContent{Path: &tc.itemPath}
result := r.RelativePath(item)
assert.Equal(t, tc.expectedPath, result)
})
}
}
func TestGiteeReader_ListAddonMeta(t *testing.T) {
client, mux, teardown := giteeSetup()
defer teardown()
giteePattern := "/repos/o/r/contents/"
mux.HandleFunc(giteePattern, func(rw http.ResponseWriter, req *http.Request) {
var contents []*github.RepositoryContent
queryPath := strings.TrimPrefix(req.URL.Path, giteePattern)
switch queryPath {
case "": // Root directory
contents = []*github.RepositoryContent{
{Type: String("dir"), Name: String("fluxcd"), Path: String("fluxcd")},
{Type: String("dir"), Name: String("velaux"), Path: String("velaux")},
{Type: String("file"), Name: String("README.md"), Path: String("README.md")},
}
case "fluxcd":
contents = []*github.RepositoryContent{
{Type: String("file"), Name: String("metadata.yaml"), Path: String("fluxcd/metadata.yaml")},
{Type: String("dir"), Name: String("resources"), Path: String("fluxcd/resources")},
}
case "fluxcd/resources":
contents = []*github.RepositoryContent{
{Type: String("file"), Name: String("parameter.cue"), Path: String("fluxcd/resources/parameter.cue")},
}
case "velaux":
contents = []*github.RepositoryContent{
{Type: String("file"), Name: String("metadata.yaml"), Path: String("velaux/metadata.yaml")},
}
default:
rw.WriteHeader(http.StatusNotFound)
return
}
res, _ := json.Marshal(contents)
rw.Write(res)
})
gith := &giteeHelper{
Client: client,
Meta: &utils.Content{GiteeContent: utils.GiteeContent{
Owner: "o",
Repo: "r",
}},
}
r := &giteeReader{h: gith}
meta, err := r.ListAddonMeta()
assert.NoError(t, err)
assert.NotNil(t, meta)
assert.Equal(t, 2, len(meta), "Expected to find 2 addons, root files should be ignored")
t.Run("fluxcd addon discovery", func(t *testing.T) {
addon, ok := meta["fluxcd"]
assert.True(t, ok, "fluxcd addon should be discovered")
assert.Equal(t, "fluxcd", addon.Name)
// Should find 2 items recursively: metadata.yaml and resources/parameter.cue
assert.Equal(t, 2, len(addon.Items), "fluxcd should contain 2 files")
foundPaths := make(map[string]bool)
for _, item := range addon.Items {
foundPaths[item.GetPath()] = true
}
assert.True(t, foundPaths["fluxcd/metadata.yaml"], "should find fluxcd/metadata.yaml")
assert.True(t, foundPaths["fluxcd/resources/parameter.cue"], "should find fluxcd/resources/parameter.cue")
})
t.Run("velaux addon discovery", func(t *testing.T) {
addon, ok := meta["velaux"]
assert.True(t, ok, "velaux addon should be discovered")
assert.Equal(t, "velaux", addon.Name)
assert.Equal(t, 1, len(addon.Items), "velaux should contain 1 file")
assert.Equal(t, "velaux/metadata.yaml", addon.Items[0].GetPath())
})
}

View File

@@ -64,10 +64,8 @@ func setup() (client *github.Client, mux *http.ServeMux, teardown func()) {
return client, mux, server.Close
}
func TestGitHubReader_ReadFile(t *testing.T) {
func TestGitHubReader(t *testing.T) {
client, mux, teardown := setup()
defer teardown()
githubPattern := "/repos/o/r/contents/"
mux.HandleFunc(githubPattern, func(rw http.ResponseWriter, req *http.Request) {
queryPath := strings.TrimPrefix(req.URL.Path, githubPattern)
@@ -78,7 +76,6 @@ func TestGitHubReader_ReadFile(t *testing.T) {
content := &github.RepositoryContent{Type: String("file"), Name: String(path.Base(queryPath)), Size: Int(len(file)), Encoding: String(""), Path: String(queryPath), Content: String(string(file))}
res, _ := json.Marshal(content)
rw.Write(res)
return
}
// otherwise, it could be directory
@@ -94,11 +91,11 @@ func TestGitHubReader_ReadFile(t *testing.T) {
}
dRes, _ := json.Marshal(contents)
rw.Write(dRes)
return
}
rw.WriteHeader(http.StatusNotFound)
rw.Write([]byte("invalid github query"))
})
defer teardown()
gith := &gitHelper{
Client: client,
@@ -110,95 +107,7 @@ func TestGitHubReader_ReadFile(t *testing.T) {
var r AsyncReader = &gitReader{gith}
_, err := r.ReadFile("example/metadata.yaml")
assert.NoError(t, err)
}
func TestGitReader_RelativePath(t *testing.T) {
testCases := map[string]struct {
basePath string
itemPath string
expectedPath string
}{
"No base path": {
basePath: "",
itemPath: "fluxcd/metadata.yaml",
expectedPath: "fluxcd/metadata.yaml",
},
"With base path": {
basePath: "addons",
itemPath: "addons/fluxcd/metadata.yaml",
expectedPath: "fluxcd/metadata.yaml",
},
}
for name, tc := range testCases {
t.Run(name, func(t *testing.T) {
gith := &gitHelper{
Meta: &utils.Content{GithubContent: utils.GithubContent{
Path: tc.basePath,
}},
}
r := &gitReader{h: gith}
item := &github.RepositoryContent{Path: &tc.itemPath}
result := r.RelativePath(item)
assert.Equal(t, tc.expectedPath, result)
})
}
}
func TestGitReader_ListAddonMeta(t *testing.T) {
client, mux, teardown := setup()
defer teardown()
githubPattern := "/repos/o/r/contents/"
mux.HandleFunc(githubPattern, func(rw http.ResponseWriter, req *http.Request) {
var contents []*github.RepositoryContent
queryPath := strings.TrimPrefix(req.URL.Path, githubPattern)
switch queryPath {
case "": // Root directory
contents = []*github.RepositoryContent{
{Type: String("dir"), Name: String("fluxcd"), Path: String("fluxcd")},
{Type: String("file"), Name: String("README.md"), Path: String("README.md")},
}
case "fluxcd":
contents = []*github.RepositoryContent{
{Type: String("file"), Name: String("metadata.yaml"), Path: String("fluxcd/metadata.yaml")},
{Type: String("dir"), Name: String("resources"), Path: String("fluxcd/resources")},
{Type: String("file"), Name: String("template.cue"), Path: String("fluxcd/template.cue")},
}
case "fluxcd/resources":
contents = []*github.RepositoryContent{
{Type: String("file"), Name: String("parameter.cue"), Path: String("fluxcd/resources/parameter.cue")},
}
default:
rw.WriteHeader(http.StatusNotFound)
return
}
res, _ := json.Marshal(contents)
rw.Write(res)
})
gith := &gitHelper{
Client: client,
Meta: &utils.Content{GithubContent: utils.GithubContent{
Owner: "o",
Repo: "r",
}},
}
r := &gitReader{h: gith}
meta, err := r.ListAddonMeta()
assert.NoError(t, err)
assert.NotNil(t, meta)
assert.Equal(t, 1, len(meta), "Expected to find 1 addon, root files should be ignored")
t.Run("fluxcd addon discovery", func(t *testing.T) {
addon, ok := meta["fluxcd"]
assert.True(t, ok, "fluxcd addon should be discovered")
assert.Equal(t, "fluxcd", addon.Name)
assert.Equal(t, 3, len(addon.Items), "fluxcd should contain 3 files")
})
}
// Int is a helper routine that allocates a new int value

View File

@@ -52,9 +52,6 @@ func (g GitLabItem) GetType() string {
// GetPath get addon's sub item path
func (g GitLabItem) GetPath() string {
if g.basePath == "" {
return g.path
}
return g.path[len(g.basePath)+1:]
}

Some files were not shown because too many files have changed in this diff Show More