mirror of
https://github.com/kubevela/kubevela.git
synced 2026-03-02 17:50:58 +00:00
Compare commits
11 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
066c448c1a | ||
|
|
8de80ebdb2 | ||
|
|
855cbfe3ec | ||
|
|
162534b611 | ||
|
|
6bd5d8e6e2 | ||
|
|
22079aacd3 | ||
|
|
b2329d548d | ||
|
|
9152c15a88 | ||
|
|
73b3d3106b | ||
|
|
237c71d94e | ||
|
|
2200d199f3 |
6
.github/workflows/e2e-multicluster-test.yml
vendored
6
.github/workflows/e2e-multicluster-test.yml
vendored
@@ -76,10 +76,16 @@ jobs:
|
||||
- name: Load Image to kind cluster (Hub)
|
||||
run: make kind-load
|
||||
|
||||
- name: Load Image to kind cluster (Worker)
|
||||
run: |
|
||||
make kind-load-runtime-cluster
|
||||
|
||||
- name: Cleanup for e2e tests
|
||||
run: |
|
||||
make e2e-cleanup
|
||||
make e2e-setup-core
|
||||
make
|
||||
make setup-runtime-e2e-cluster
|
||||
|
||||
- name: Run e2e multicluster tests
|
||||
run: make e2e-multicluster-test
|
||||
|
||||
5
.gitignore
vendored
5
.gitignore
vendored
@@ -45,4 +45,7 @@ charts/vela-core/crds/_.yaml
|
||||
.vela/
|
||||
|
||||
# check docs
|
||||
git-page/
|
||||
git-page/
|
||||
|
||||
# e2e rollout runtime image build
|
||||
runtime/rollout/e2e/tmp
|
||||
12
Makefile
12
Makefile
@@ -39,6 +39,9 @@ endif
|
||||
VELA_CORE_IMAGE ?= vela-core:latest
|
||||
VELA_CORE_TEST_IMAGE ?= vela-core-test:$(GIT_COMMIT)
|
||||
VELA_RUNTIME_ROLLOUT_IMAGE ?= vela-runtime-rollout:latest
|
||||
VELA_RUNTIME_ROLLOUT_TEST_IMAGE ?= vela-runtime-rollout-test:$(GIT_COMMIT)
|
||||
RUNTIME_CLUSTER_CONFIG ?= /tmp/worker.kubeconfig
|
||||
RUNTIME_CLUSTER_NAME ?= worker
|
||||
|
||||
all: build
|
||||
|
||||
@@ -143,6 +146,9 @@ e2e-setup-core:
|
||||
helm upgrade --install --create-namespace --namespace vela-system --set image.pullPolicy=IfNotPresent --set image.repository=vela-core-test --set applicationRevisionLimit=5 --set dependCheckWait=10s --set image.tag=$(GIT_COMMIT) --set multicluster.enabled=true --wait kubevela ./charts/vela-core
|
||||
kubectl wait --for=condition=Available deployment/kubevela-vela-core -n vela-system --timeout=180s
|
||||
|
||||
setup-runtime-e2e-cluster:
|
||||
helm upgrade --install --create-namespace --namespace vela-system --kubeconfig=$(RUNTIME_CLUSTER_CONFIG) --set image.pullPolicy=IfNotPresent --set image.repository=vela-runtime-rollout-test --set image.tag=$(GIT_COMMIT) --wait vela-rollout ./runtime/rollout/charts
|
||||
|
||||
e2e-setup:
|
||||
helm install kruise https://github.com/openkruise/kruise/releases/download/v0.9.0/kruise-chart.tgz --set featureGates="PreDownloadImageForInPlaceUpdate=true"
|
||||
sh ./hack/e2e/modify_charts.sh
|
||||
@@ -209,6 +215,12 @@ kind-load:
|
||||
docker build -t $(VELA_CORE_TEST_IMAGE) -f Dockerfile.e2e .
|
||||
kind load docker-image $(VELA_CORE_TEST_IMAGE) || { echo >&2 "kind not installed or error loading image: $(VELA_CORE_TEST_IMAGE)"; exit 1; }
|
||||
|
||||
kind-load-runtime-cluster:
|
||||
/bin/sh hack/e2e/build_runtime_rollout.sh
|
||||
docker build -t $(VELA_RUNTIME_ROLLOUT_TEST_IMAGE) -f runtime/rollout/e2e/Dockerfile.e2e runtime/rollout/e2e/
|
||||
rm -rf runtime/rollout/e2e/tmp
|
||||
kind load docker-image $(VELA_RUNTIME_ROLLOUT_TEST_IMAGE) --name=$(RUNTIME_CLUSTER_NAME) || { echo >&2 "kind not installed or error loading image: $(VELA_RUNTIME_ROLLOUT_TEST_IMAGE)"; exit 1; }
|
||||
|
||||
# Run tests
|
||||
core-test: fmt vet manifests
|
||||
go test ./pkg/... -coverprofile cover.out
|
||||
|
||||
@@ -38,7 +38,8 @@ type WorkflowStepDefinitionSpec struct {
|
||||
type WorkflowStepDefinitionStatus struct {
|
||||
// ConditionedStatus reflects the observed status of a resource
|
||||
condition.ConditionedStatus `json:",inline"`
|
||||
|
||||
// ConfigMapRef refer to a ConfigMap which contains OpenAPI V3 JSON schema of Component parameters.
|
||||
ConfigMapRef string `json:"configMapRef,omitempty"`
|
||||
// LatestRevision of the component definition
|
||||
// +optional
|
||||
LatestRevision *common.Revision `json:"latestRevision,omitempty"`
|
||||
|
||||
@@ -4399,6 +4399,10 @@ spec:
|
||||
- type
|
||||
type: object
|
||||
type: array
|
||||
configMapRef:
|
||||
description: ConfigMapRef refer to a ConfigMap which contains
|
||||
OpenAPI V3 JSON schema of Component parameters.
|
||||
type: string
|
||||
latestRevision:
|
||||
description: LatestRevision of the component definition
|
||||
properties:
|
||||
|
||||
@@ -1123,6 +1123,10 @@ spec:
|
||||
- type
|
||||
type: object
|
||||
type: array
|
||||
configMapRef:
|
||||
description: ConfigMapRef refer to a ConfigMap which contains
|
||||
OpenAPI V3 JSON schema of Component parameters.
|
||||
type: string
|
||||
latestRevision:
|
||||
description: LatestRevision of the component definition
|
||||
properties:
|
||||
|
||||
@@ -216,6 +216,10 @@ spec:
|
||||
- type
|
||||
type: object
|
||||
type: array
|
||||
configMapRef:
|
||||
description: ConfigMapRef refer to a ConfigMap which contains OpenAPI
|
||||
V3 JSON schema of Component parameters.
|
||||
type: string
|
||||
latestRevision:
|
||||
description: LatestRevision of the component definition
|
||||
properties:
|
||||
|
||||
@@ -8,7 +8,7 @@ data:
|
||||
addons.oam.dev/description: Kubernetes Terraform Controller for Alibaba Cloud
|
||||
addons.oam.dev/name: terraform/provider-alibaba
|
||||
name: terraform-provider-alibaba
|
||||
namespace: default
|
||||
namespace: vela-system
|
||||
spec:
|
||||
components:
|
||||
- name: alibaba-account-creds
|
||||
|
||||
@@ -8,7 +8,7 @@ data:
|
||||
addons.oam.dev/description: Kubernetes Terraform Controller for AWS
|
||||
addons.oam.dev/name: terraform/provider-aws
|
||||
name: terraform-provider-aws
|
||||
namespace: default
|
||||
namespace: vela-system
|
||||
spec:
|
||||
components:
|
||||
- name: aws-account-creds
|
||||
|
||||
@@ -8,7 +8,7 @@ data:
|
||||
addons.oam.dev/description: Kubernetes Terraform Controller for Azure
|
||||
addons.oam.dev/name: terraform/provider-azure
|
||||
name: terraform-provider-azure
|
||||
namespace: default
|
||||
namespace: vela-system
|
||||
spec:
|
||||
components:
|
||||
- name: azure-account-creds
|
||||
|
||||
@@ -24,8 +24,10 @@ spec:
|
||||
componentName: context.name
|
||||
rolloutPlan: {
|
||||
rolloutStrategy: "IncreaseFirst"
|
||||
rolloutBatches: parameter.rolloutBatches
|
||||
targetSize: parameter.targetSize
|
||||
if parameter.rolloutBatches != _|_ {
|
||||
rolloutBatches: parameter.rolloutBatches
|
||||
}
|
||||
targetSize: parameter.targetSize
|
||||
if parameter["batchPartition"] != _|_ {
|
||||
batchPartition: parameter.batchPartition
|
||||
}
|
||||
@@ -35,7 +37,7 @@ spec:
|
||||
parameter: {
|
||||
targetRevision: *context.revision | string
|
||||
targetSize: int
|
||||
rolloutBatches: [...rolloutBatch]
|
||||
rolloutBatches?: [...rolloutBatch]
|
||||
batchPartition?: int
|
||||
}
|
||||
rolloutBatch: replicas: int
|
||||
|
||||
@@ -97,17 +97,17 @@ spec:
|
||||
url?: string
|
||||
value?: string
|
||||
style?: string
|
||||
text?: text
|
||||
text?: textType
|
||||
confirm?: {
|
||||
title: text
|
||||
text: text
|
||||
confirm: text
|
||||
deny: text
|
||||
title: textType
|
||||
text: textType
|
||||
confirm: textType
|
||||
deny: textType
|
||||
style?: string
|
||||
}
|
||||
options?: [...option]
|
||||
initial_options?: [...option]
|
||||
placeholder?: text
|
||||
placeholder?: textType
|
||||
initial_date?: string
|
||||
image_url?: string
|
||||
alt_text?: string
|
||||
@@ -121,16 +121,16 @@ spec:
|
||||
initial_time?: string
|
||||
}]
|
||||
}
|
||||
text: {
|
||||
textType: {
|
||||
type: string
|
||||
text: string
|
||||
emoji?: bool
|
||||
verbatim?: bool
|
||||
}
|
||||
option: {
|
||||
text: text
|
||||
text: textType
|
||||
value: string
|
||||
description?: text
|
||||
description?: textType
|
||||
url?: string
|
||||
}
|
||||
// send webhook notification
|
||||
|
||||
@@ -4399,6 +4399,10 @@ spec:
|
||||
- type
|
||||
type: object
|
||||
type: array
|
||||
configMapRef:
|
||||
description: ConfigMapRef refer to a ConfigMap which contains
|
||||
OpenAPI V3 JSON schema of Component parameters.
|
||||
type: string
|
||||
latestRevision:
|
||||
description: LatestRevision of the component definition
|
||||
properties:
|
||||
|
||||
@@ -1123,6 +1123,10 @@ spec:
|
||||
- type
|
||||
type: object
|
||||
type: array
|
||||
configMapRef:
|
||||
description: ConfigMapRef refer to a ConfigMap which contains
|
||||
OpenAPI V3 JSON schema of Component parameters.
|
||||
type: string
|
||||
latestRevision:
|
||||
description: LatestRevision of the component definition
|
||||
properties:
|
||||
|
||||
@@ -216,6 +216,10 @@ spec:
|
||||
- type
|
||||
type: object
|
||||
type: array
|
||||
configMapRef:
|
||||
description: ConfigMapRef refer to a ConfigMap which contains OpenAPI
|
||||
V3 JSON schema of Component parameters.
|
||||
type: string
|
||||
latestRevision:
|
||||
description: LatestRevision of the component definition
|
||||
properties:
|
||||
|
||||
@@ -24,8 +24,10 @@ spec:
|
||||
componentName: context.name
|
||||
rolloutPlan: {
|
||||
rolloutStrategy: "IncreaseFirst"
|
||||
rolloutBatches: parameter.rolloutBatches
|
||||
targetSize: parameter.targetSize
|
||||
if parameter.rolloutBatches != _|_ {
|
||||
rolloutBatches: parameter.rolloutBatches
|
||||
}
|
||||
targetSize: parameter.targetSize
|
||||
if parameter["batchPartition"] != _|_ {
|
||||
batchPartition: parameter.batchPartition
|
||||
}
|
||||
@@ -35,7 +37,7 @@ spec:
|
||||
parameter: {
|
||||
targetRevision: *context.revision | string
|
||||
targetSize: int
|
||||
rolloutBatches: [...rolloutBatch]
|
||||
rolloutBatches?: [...rolloutBatch]
|
||||
batchPartition?: int
|
||||
}
|
||||
rolloutBatch: replicas: int
|
||||
|
||||
@@ -97,17 +97,17 @@ spec:
|
||||
url?: string
|
||||
value?: string
|
||||
style?: string
|
||||
text?: text
|
||||
text?: textType
|
||||
confirm?: {
|
||||
title: text
|
||||
text: text
|
||||
confirm: text
|
||||
deny: text
|
||||
title: textType
|
||||
text: textType
|
||||
confirm: textType
|
||||
deny: textType
|
||||
style?: string
|
||||
}
|
||||
options?: [...option]
|
||||
initial_options?: [...option]
|
||||
placeholder?: text
|
||||
placeholder?: textType
|
||||
initial_date?: string
|
||||
image_url?: string
|
||||
alt_text?: string
|
||||
@@ -121,16 +121,16 @@ spec:
|
||||
initial_time?: string
|
||||
}]
|
||||
}
|
||||
text: {
|
||||
textType: {
|
||||
type: string
|
||||
text: string
|
||||
emoji?: bool
|
||||
verbatim?: bool
|
||||
}
|
||||
option: {
|
||||
text: text
|
||||
text: textType
|
||||
value: string
|
||||
description?: text
|
||||
description?: textType
|
||||
url?: string
|
||||
}
|
||||
// send webhook notification
|
||||
|
||||
@@ -32,6 +32,7 @@ import (
|
||||
|
||||
appsv1 "k8s.io/api/apps/v1"
|
||||
"k8s.io/klog/v2"
|
||||
"k8s.io/klog/v2/klogr"
|
||||
ctrl "sigs.k8s.io/controller-runtime"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
"sigs.k8s.io/controller-runtime/pkg/healthz"
|
||||
@@ -192,7 +193,7 @@ func main() {
|
||||
os.Exit(1)
|
||||
}
|
||||
}
|
||||
|
||||
ctrl.SetLogger(klogr.New())
|
||||
mgr, err := ctrl.NewManager(restConfig, ctrl.Options{
|
||||
Scheme: scheme,
|
||||
MetricsBindAddress: metricsAddr,
|
||||
|
||||
130
docs/examples/obervability/application-observability.yaml
Normal file
130
docs/examples/obervability/application-observability.yaml
Normal file
@@ -0,0 +1,130 @@
|
||||
apiVersion: v1
|
||||
kind: Namespace
|
||||
metadata:
|
||||
name: observability
|
||||
spec: { }
|
||||
|
||||
---
|
||||
apiVersion: core.oam.dev/v1beta1
|
||||
kind: Application
|
||||
metadata:
|
||||
annotations:
|
||||
addons.oam.dev/description: "An out of the box solution for KubeVela observability"
|
||||
name: grafana
|
||||
namespace: observability
|
||||
spec:
|
||||
components:
|
||||
# install grafana datasource registration chart
|
||||
- name: grafana-registration-release
|
||||
type: helm
|
||||
properties:
|
||||
repoType: git
|
||||
url: https://github.com/oam-dev/grafana-registration
|
||||
git:
|
||||
branch: master
|
||||
chart: ./chart
|
||||
targetNamespace: observability
|
||||
values:
|
||||
replicaCount: 1
|
||||
|
||||
# install Grafana
|
||||
- name: grafana
|
||||
properties:
|
||||
chart: grafana
|
||||
version: 6.14.1
|
||||
repoType: helm
|
||||
# original url: https://grafana.github.io/helm-charts
|
||||
url: https://charts.kubevela.net/addons
|
||||
targetNamespace: observability
|
||||
releaseName: grafana
|
||||
type: helm
|
||||
traits:
|
||||
- type: pure-ingress
|
||||
properties:
|
||||
domain: grafana.c58136db32cbc44cca364bf1cf7f90519.cn-hongkong.alicontainer.com
|
||||
http:
|
||||
"/": 80
|
||||
- type: import-grafana-dashboard
|
||||
properties:
|
||||
grafanaServiceName: grafana
|
||||
grafanaServiceNamespace: observability
|
||||
credentialSecret: grafana
|
||||
credentialSecretNamespace: observability
|
||||
urls:
|
||||
- "https://charts.kubevela.net/addons/dashboards/kubevela_core_logging.json"
|
||||
- "https://charts.kubevela.net/addons/dashboards/kubevela_core_monitoring.json"
|
||||
- "https://charts.kubevela.net/addons/dashboards/flux2/cluster.json"
|
||||
- "https://charts.kubevela.net/addons/dashboards/kubevela_application_logging.json"
|
||||
|
||||
# install loki
|
||||
- name: loki
|
||||
type: helm
|
||||
properties:
|
||||
chart: loki-stack
|
||||
version: 2.4.1
|
||||
repoType: helm
|
||||
# original url: https://grafana.github.io/helm-charts
|
||||
url: https://charts.kubevela.net/addons
|
||||
targetNamespace: observability
|
||||
releaseName: loki
|
||||
traits:
|
||||
- type: register-grafana-datasource # register loki datasource to Grafana
|
||||
properties:
|
||||
grafanaServiceName: grafana
|
||||
grafanaServiceNamespace: observability
|
||||
credentialSecret: grafana
|
||||
credentialSecretNamespace: observability
|
||||
name: loki
|
||||
service: loki
|
||||
namespace: observability
|
||||
type: loki
|
||||
access: proxy
|
||||
|
||||
# install Prometheus
|
||||
- name: prometheus-server
|
||||
type: helm
|
||||
properties:
|
||||
chart: prometheus
|
||||
version: 14.4.1
|
||||
repoType: helm
|
||||
# original url: https://prometheus-community.github.io/helm-charts
|
||||
url: https://charts.kubevela.net/addons
|
||||
targetNamespace: observability
|
||||
releaseName: prometheus
|
||||
values:
|
||||
alertmanager:
|
||||
persistentVolume:
|
||||
storageClass: "alicloud-disk-available"
|
||||
size: "20Gi"
|
||||
server:
|
||||
persistentVolume:
|
||||
storageClass: "alicloud-disk-available"
|
||||
size: "20Gi"
|
||||
|
||||
traits:
|
||||
- type: register-grafana-datasource # register Prometheus datasource to Grafana
|
||||
properties:
|
||||
grafanaServiceName: grafana
|
||||
grafanaServiceNamespace: observability
|
||||
credentialSecret: grafana
|
||||
credentialSecretNamespace: observability
|
||||
name: prometheus
|
||||
service: prometheus-server
|
||||
namespace: observability
|
||||
type: prometheus
|
||||
access: proxy
|
||||
|
||||
# install kube-state-metrics
|
||||
- name: kube-state-metrics
|
||||
type: helm
|
||||
properties:
|
||||
chart: kube-state-metrics
|
||||
version: 3.4.1
|
||||
repoType: helm
|
||||
# original url: https://prometheus-community.github.io/helm-charts
|
||||
url: https://charts.kubevela.net/addons
|
||||
targetNamespace: observability
|
||||
values:
|
||||
image:
|
||||
repository: oamdev/kube-state-metrics
|
||||
tag: v2.1.0
|
||||
@@ -0,0 +1,31 @@
|
||||
apiVersion: core.oam.dev/v1beta1
|
||||
kind: TraitDefinition
|
||||
metadata:
|
||||
annotations:
|
||||
definition.oam.dev/description: "Import dashboards to Grafana"
|
||||
name: import-grafana-dashboard
|
||||
namespace: vela-system
|
||||
spec:
|
||||
schematic:
|
||||
cue:
|
||||
template: |
|
||||
outputs: registerdatasource: {
|
||||
apiVersion: "grafana.extension.oam.dev/v1alpha1"
|
||||
kind: "ImportDashboard"
|
||||
spec: {
|
||||
grafana: {
|
||||
service: parameter.grafanaServiceName
|
||||
namespace: parameter.grafanaServiceNamespace
|
||||
credentialSecret: parameter.credentialSecret
|
||||
credentialSecretNamespace: parameter.credentialSecretNamespace
|
||||
}
|
||||
urls: parameter.urls
|
||||
}
|
||||
}
|
||||
parameter: {
|
||||
grafanaServiceName: string
|
||||
grafanaServiceNamespace: *"default" | string
|
||||
credentialSecret: string
|
||||
credentialSecretNamespace: *"default" | string
|
||||
urls: [...string]
|
||||
}
|
||||
@@ -0,0 +1,58 @@
|
||||
apiVersion: core.oam.dev/v1beta1
|
||||
kind: TraitDefinition
|
||||
metadata:
|
||||
annotations:
|
||||
definition.oam.dev/description: "Enable public web traffic for the component without creating a Service."
|
||||
name: pure-ingress
|
||||
namespace: vela-system
|
||||
spec:
|
||||
status:
|
||||
customStatus: |-
|
||||
let igs = context.outputs.ingress.status.loadBalancer.ingress
|
||||
if igs == _|_ {
|
||||
message: "No loadBalancer found, visiting by using 'vela port-forward " + context.appName + " --route'\n"
|
||||
}
|
||||
if len(igs) > 0 {
|
||||
if igs[0].ip != _|_ {
|
||||
message: "Visiting URL: " + context.outputs.ingress.spec.rules[0].host + ", IP: " + igs[0].ip
|
||||
}
|
||||
if igs[0].ip == _|_ {
|
||||
message: "Visiting URL: " + context.outputs.ingress.spec.rules[0].host
|
||||
}
|
||||
}
|
||||
healthPolicy: |
|
||||
isHealth: len(context.outputs.ingress.status.loadBalancer.ingress) > 0
|
||||
schematic:
|
||||
cue:
|
||||
template: |
|
||||
|
||||
outputs: ingress: {
|
||||
apiVersion: "networking.k8s.io/v1beta1"
|
||||
kind: "Ingress"
|
||||
metadata:
|
||||
name: context.name
|
||||
spec: {
|
||||
rules: [{
|
||||
host: parameter.domain
|
||||
http: {
|
||||
paths: [
|
||||
for k, v in parameter.http {
|
||||
path: k
|
||||
backend: {
|
||||
serviceName: context.name
|
||||
servicePort: v
|
||||
}
|
||||
},
|
||||
]
|
||||
}
|
||||
}]
|
||||
}
|
||||
}
|
||||
|
||||
parameter: {
|
||||
// +usage=Specify the domain you want to expose
|
||||
domain: string
|
||||
|
||||
// +usage=Specify the mapping relationship between the http path and the workload port
|
||||
http: [string]: int
|
||||
}
|
||||
@@ -0,0 +1,42 @@
|
||||
apiVersion: core.oam.dev/v1beta1
|
||||
kind: TraitDefinition
|
||||
metadata:
|
||||
annotations:
|
||||
definition.oam.dev/description: "Add a datasource to Grafana"
|
||||
name: register-grafana-datasource
|
||||
namespace: vela-system
|
||||
spec:
|
||||
schematic:
|
||||
cue:
|
||||
template: |
|
||||
outputs: registerdatasource: {
|
||||
apiVersion: "grafana.extension.oam.dev/v1alpha1"
|
||||
kind: "DatasourceRegistration"
|
||||
spec: {
|
||||
grafana: {
|
||||
service: parameter.grafanaServiceName
|
||||
namespace: parameter.grafanaServiceNamespace
|
||||
credentialSecret: parameter.credentialSecret
|
||||
credentialSecretNamespace: parameter.credentialSecretNamespace
|
||||
}
|
||||
datasource: {
|
||||
name: parameter.name
|
||||
type: parameter.type
|
||||
access: parameter.access
|
||||
service: parameter.service
|
||||
namespace: parameter.namespace
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
parameter: {
|
||||
grafanaServiceName: string
|
||||
grafanaServiceNamespace: *"default" | string
|
||||
credentialSecret: string
|
||||
credentialSecretNamespace: string
|
||||
name: string
|
||||
type: string
|
||||
access: *"proxy" | string
|
||||
service: string
|
||||
namespace: *"default" | string
|
||||
}
|
||||
@@ -1,159 +0,0 @@
|
||||
apiVersion: v1
|
||||
kind: Namespace
|
||||
metadata:
|
||||
name: observability
|
||||
spec: { }
|
||||
|
||||
---
|
||||
apiVersion: core.oam.dev/v1beta1
|
||||
kind: Initializer
|
||||
metadata:
|
||||
name: grafana
|
||||
namespace: observability
|
||||
spec:
|
||||
appTemplate:
|
||||
spec:
|
||||
components:
|
||||
# install grafana datasource registration chart
|
||||
- name: grafana-registration-release
|
||||
properties:
|
||||
apiVersion: helm.toolkit.fluxcd.io/v2beta1
|
||||
kind: HelmRelease
|
||||
metadata:
|
||||
name: grafana-registration-release
|
||||
namespace: observability
|
||||
spec:
|
||||
chart:
|
||||
spec:
|
||||
chart: ./chart
|
||||
interval: 1m
|
||||
sourceRef:
|
||||
kind: GitRepository
|
||||
name: grafana-registration-repo
|
||||
namespace: observability
|
||||
interval: 5m
|
||||
values:
|
||||
replicaCount: 1
|
||||
type: raw
|
||||
- name: grafana-registration-repo
|
||||
properties:
|
||||
apiVersion: source.toolkit.fluxcd.io/v1beta1
|
||||
kind: GitRepository
|
||||
metadata:
|
||||
name: grafana-registration-repo
|
||||
namespace: observability
|
||||
spec:
|
||||
interval: 5m
|
||||
ref:
|
||||
branch: master
|
||||
url: https://github.com/oam-dev/grafana-registration
|
||||
type: raw
|
||||
|
||||
# install Grafana
|
||||
- name: grafana
|
||||
properties:
|
||||
chart: grafana
|
||||
version: 6.14.1
|
||||
repoType: helm
|
||||
# original url: https://grafana.github.io/helm-charts
|
||||
url: https://charts.kubevela.net/addons
|
||||
targetNamespace: observability
|
||||
releaseName: grafana
|
||||
type: helm
|
||||
traits:
|
||||
- type: pure-ingress
|
||||
properties:
|
||||
domain: grafana.cf7223b8abedc4691b7eccfe3c675850a.cn-hongkong.alicontainer.com
|
||||
http:
|
||||
"/": 80
|
||||
- type: import-grafana-dashboard
|
||||
properties:
|
||||
grafanaServiceName: grafana
|
||||
grafanaServiceNamespace: observability
|
||||
credentialSecret: grafana
|
||||
credentialSecretNamespace: observability
|
||||
urls:
|
||||
- "https://charts.kubevela.net/addons/dashboards/kubevela_core_logging.json"
|
||||
- "https://charts.kubevela.net/addons/dashboards/kubevela_core_monitoring.json"
|
||||
- "https://charts.kubevela.net/addons/dashboards/flux2/cluster.json"
|
||||
- "https://charts.kubevela.net/addons/dashboards/kubevela_application_logging.json"
|
||||
|
||||
# install loki
|
||||
- name: loki
|
||||
type: helm
|
||||
properties:
|
||||
chart: loki-stack
|
||||
version: 2.4.1
|
||||
repoType: helm
|
||||
# original url: https://grafana.github.io/helm-charts
|
||||
url: https://charts.kubevela.net/addons
|
||||
targetNamespace: observability
|
||||
releaseName: loki
|
||||
traits:
|
||||
- type: register-grafana-datasource # register loki datasource to Grafana
|
||||
properties:
|
||||
grafanaServiceName: grafana
|
||||
grafanaServiceNamespace: observability
|
||||
credentialSecret: grafana
|
||||
credentialSecretNamespace: observability
|
||||
name: loki
|
||||
service: loki
|
||||
namespace: observability
|
||||
type: loki
|
||||
access: proxy
|
||||
|
||||
# install Prometheus
|
||||
- name: prometheus-server
|
||||
type: helm
|
||||
properties:
|
||||
chart: prometheus
|
||||
version: 14.4.1
|
||||
repoType: helm
|
||||
# original url: https://prometheus-community.github.io/helm-charts
|
||||
url: https://charts.kubevela.net/addons
|
||||
targetNamespace: observability
|
||||
releaseName: prometheus
|
||||
values:
|
||||
alertmanager:
|
||||
persistentVolume:
|
||||
storageClass: "alicloud-disk-available"
|
||||
size: "20Gi"
|
||||
server:
|
||||
persistentVolume:
|
||||
storageClass: "alicloud-disk-available"
|
||||
size: "20Gi"
|
||||
|
||||
traits:
|
||||
- type: register-grafana-datasource # register Prometheus datasource to Grafana
|
||||
properties:
|
||||
grafanaServiceName: grafana
|
||||
grafanaServiceNamespace: observability
|
||||
credentialSecret: grafana
|
||||
credentialSecretNamespace: observability
|
||||
name: prometheus
|
||||
service: prometheus-server
|
||||
namespace: observability
|
||||
type: prometheus
|
||||
access: proxy
|
||||
|
||||
# install kube-state-metrics
|
||||
- name: kube-state-metrics
|
||||
type: helm
|
||||
properties:
|
||||
chart: kube-state-metrics
|
||||
version: 3.4.1
|
||||
repoType: helm
|
||||
# original url: https://prometheus-community.github.io/helm-charts
|
||||
url: https://charts.kubevela.net/addons
|
||||
targetNamespace: observability
|
||||
values:
|
||||
image:
|
||||
repository: oamdev/kube-state-metrics
|
||||
tag: v2.1.0
|
||||
|
||||
dependsOn:
|
||||
- ref:
|
||||
apiVersion: core.oam.dev/v1beta1
|
||||
kind: Initializer
|
||||
name: fluxcd
|
||||
namespace: vela-system
|
||||
12
hack/e2e/build_runtime_rollout.sh
Executable file
12
hack/e2e/build_runtime_rollout.sh
Executable file
@@ -0,0 +1,12 @@
|
||||
#!/bin/sh
|
||||
|
||||
TEMP_DIR="./runtime/rollout/e2e/tmp/"
|
||||
|
||||
mkdir -p $TEMP_DIR
|
||||
cp -r go.mod $TEMP_DIR
|
||||
cp -r go.sum $TEMP_DIR
|
||||
cp -r entrypoint.sh $TEMP_DIR
|
||||
cp -r runtime/rollout/cmd/main.go $TEMP_DIR
|
||||
cp -r ./apis $TEMP_DIR
|
||||
cp -r ./pkg $TEMP_DIR
|
||||
cp -r ./version $TEMP_DIR
|
||||
@@ -4399,6 +4399,10 @@ spec:
|
||||
- type
|
||||
type: object
|
||||
type: array
|
||||
configMapRef:
|
||||
description: ConfigMapRef refer to a ConfigMap which contains
|
||||
OpenAPI V3 JSON schema of Component parameters.
|
||||
type: string
|
||||
latestRevision:
|
||||
description: LatestRevision of the component definition
|
||||
properties:
|
||||
|
||||
@@ -1123,6 +1123,10 @@ spec:
|
||||
- type
|
||||
type: object
|
||||
type: array
|
||||
configMapRef:
|
||||
description: ConfigMapRef refer to a ConfigMap which contains
|
||||
OpenAPI V3 JSON schema of Component parameters.
|
||||
type: string
|
||||
latestRevision:
|
||||
description: LatestRevision of the component definition
|
||||
properties:
|
||||
|
||||
@@ -216,6 +216,10 @@ spec:
|
||||
- type
|
||||
type: object
|
||||
type: array
|
||||
configMapRef:
|
||||
description: ConfigMapRef refer to a ConfigMap which contains OpenAPI
|
||||
V3 JSON schema of Component parameters.
|
||||
type: string
|
||||
latestRevision:
|
||||
description: LatestRevision of the component definition
|
||||
properties:
|
||||
|
||||
@@ -43,6 +43,7 @@ import (
|
||||
common2 "github.com/oam-dev/kubevela/pkg/controller/common"
|
||||
core "github.com/oam-dev/kubevela/pkg/controller/core.oam.dev"
|
||||
"github.com/oam-dev/kubevela/pkg/controller/core.oam.dev/v1alpha1/envbinding"
|
||||
"github.com/oam-dev/kubevela/pkg/controller/core.oam.dev/v1alpha2/application/assemble"
|
||||
"github.com/oam-dev/kubevela/pkg/cue/packages"
|
||||
"github.com/oam-dev/kubevela/pkg/oam"
|
||||
"github.com/oam-dev/kubevela/pkg/oam/discoverymapper"
|
||||
@@ -223,7 +224,7 @@ func (r *Reconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Resu
|
||||
return r.endWithNegativeCondition(ctx, app, condition.ErrorCondition("Render", err), common.ApplicationRendering)
|
||||
}
|
||||
|
||||
handler.handleCheckManageWorkloadTrait(handler.currentAppRev.Spec.TraitDefinitions, comps)
|
||||
assemble.HandleCheckManageWorkloadTrait(*handler.currentAppRev, comps)
|
||||
|
||||
if err := handler.HandleComponentsRevision(ctx, comps); err != nil {
|
||||
klog.ErrorS(err, "Failed to handle compoents revision", "application", klog.KObj(app))
|
||||
|
||||
@@ -37,7 +37,6 @@ import (
|
||||
"github.com/oam-dev/kubevela/pkg/controller/core.oam.dev/v1alpha2/applicationrollout"
|
||||
"github.com/oam-dev/kubevela/pkg/controller/utils"
|
||||
"github.com/oam-dev/kubevela/pkg/cue/process"
|
||||
"github.com/oam-dev/kubevela/pkg/oam"
|
||||
oamutil "github.com/oam-dev/kubevela/pkg/oam/util"
|
||||
)
|
||||
|
||||
@@ -304,26 +303,6 @@ func (h *AppHandler) aggregateHealthStatus(appFile *appfile.Appfile) ([]common.A
|
||||
return appStatus, healthy, nil
|
||||
}
|
||||
|
||||
func (h *AppHandler) handleCheckManageWorkloadTrait(traitDefs map[string]v1beta1.TraitDefinition, comps []*types.ComponentManifest) {
|
||||
manageWorkloadTrait := map[string]bool{}
|
||||
for traitName, definition := range traitDefs {
|
||||
if definition.Spec.ManageWorkload {
|
||||
manageWorkloadTrait[traitName] = true
|
||||
}
|
||||
}
|
||||
if len(manageWorkloadTrait) == 0 {
|
||||
return
|
||||
}
|
||||
for _, comp := range comps {
|
||||
for _, trait := range comp.Traits {
|
||||
traitType := trait.GetLabels()[oam.TraitTypeLabel]
|
||||
if manageWorkloadTrait[traitType] {
|
||||
trait.SetLabels(oamutil.MergeMapOverrideWithDst(trait.GetLabels(), map[string]string{oam.LabelManageWorkloadTrait: "true"}))
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func generateScopeReference(scopes []appfile.Scope) []corev1.ObjectReference {
|
||||
var references []corev1.ObjectReference
|
||||
for _, scope := range scopes {
|
||||
|
||||
@@ -30,7 +30,6 @@ import (
|
||||
. "github.com/onsi/gomega"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
@@ -41,7 +40,6 @@ import (
|
||||
"github.com/oam-dev/kubevela/apis/core.oam.dev/v1beta1"
|
||||
velatypes "github.com/oam-dev/kubevela/apis/types"
|
||||
"github.com/oam-dev/kubevela/pkg/appfile"
|
||||
"github.com/oam-dev/kubevela/pkg/oam"
|
||||
)
|
||||
|
||||
const workloadDefinition = `
|
||||
@@ -217,37 +215,3 @@ var _ = Describe("Test statusAggregate", func() {
|
||||
Expect(err).Should(BeNil())
|
||||
})
|
||||
})
|
||||
|
||||
var _ = Describe("Test handleCheckManageWorkloadTrait func", func() {
|
||||
It("Test every situation", func() {
|
||||
traitDefs := map[string]v1beta1.TraitDefinition{
|
||||
"rollout": v1beta1.TraitDefinition{
|
||||
Spec: v1beta1.TraitDefinitionSpec{
|
||||
ManageWorkload: true,
|
||||
},
|
||||
},
|
||||
"normal": v1beta1.TraitDefinition{
|
||||
Spec: v1beta1.TraitDefinitionSpec{},
|
||||
},
|
||||
}
|
||||
rolloutTrait := &unstructured.Unstructured{}
|
||||
rolloutTrait.SetLabels(map[string]string{oam.TraitTypeLabel: "rollout"})
|
||||
|
||||
normalTrait := &unstructured.Unstructured{}
|
||||
normalTrait.SetLabels(map[string]string{oam.TraitTypeLabel: "normal"})
|
||||
comps := []*velatypes.ComponentManifest{
|
||||
{
|
||||
Traits: []*unstructured.Unstructured{
|
||||
rolloutTrait,
|
||||
normalTrait,
|
||||
},
|
||||
},
|
||||
}
|
||||
h := AppHandler{}
|
||||
h.handleCheckManageWorkloadTrait(traitDefs, comps)
|
||||
Expect(len(rolloutTrait.GetLabels())).Should(BeEquivalentTo(2))
|
||||
Expect(rolloutTrait.GetLabels()[oam.LabelManageWorkloadTrait]).Should(BeEquivalentTo("true"))
|
||||
Expect(len(normalTrait.GetLabels())).Should(BeEquivalentTo(1))
|
||||
Expect(normalTrait.GetLabels()[oam.LabelManageWorkloadTrait]).Should(BeEquivalentTo(""))
|
||||
})
|
||||
})
|
||||
|
||||
@@ -232,6 +232,9 @@ func PrepareBeforeApply(comp *types.ComponentManifest, appRev *v1beta1.Applicati
|
||||
}
|
||||
|
||||
assembledTraits := make([]*unstructured.Unstructured, len(comp.Traits))
|
||||
|
||||
HandleCheckManageWorkloadTrait(*appRev, []*types.ComponentManifest{comp})
|
||||
|
||||
for i, trait := range comp.Traits {
|
||||
setTraitLabels(trait, additionalLabel)
|
||||
assembledTraits[i] = trait
|
||||
@@ -329,3 +332,25 @@ func setTraitLabels(trait *unstructured.Unstructured, additionalLabels map[strin
|
||||
// add more trait-specific labels here
|
||||
util.AddLabels(trait, additionalLabels)
|
||||
}
|
||||
|
||||
// HandleCheckManageWorkloadTrait will checkout every trait whether a manage-workload trait, if yes set label and annotation in trait
|
||||
func HandleCheckManageWorkloadTrait(appRev v1beta1.ApplicationRevision, comps []*types.ComponentManifest) {
|
||||
traitDefs := appRev.Spec.TraitDefinitions
|
||||
manageWorkloadTrait := map[string]bool{}
|
||||
for traitName, definition := range traitDefs {
|
||||
if definition.Spec.ManageWorkload {
|
||||
manageWorkloadTrait[traitName] = true
|
||||
}
|
||||
}
|
||||
if len(manageWorkloadTrait) == 0 {
|
||||
return
|
||||
}
|
||||
for _, comp := range comps {
|
||||
for _, trait := range comp.Traits {
|
||||
traitType := trait.GetLabels()[oam.TraitTypeLabel]
|
||||
if manageWorkloadTrait[traitType] {
|
||||
trait.SetLabels(util.MergeMapOverrideWithDst(trait.GetLabels(), map[string]string{oam.LabelManageWorkloadTrait: "true"}))
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -26,6 +26,7 @@ import (
|
||||
"sigs.k8s.io/yaml"
|
||||
|
||||
"github.com/oam-dev/kubevela/apis/core.oam.dev/v1beta1"
|
||||
velatypes "github.com/oam-dev/kubevela/apis/types"
|
||||
"github.com/oam-dev/kubevela/pkg/oam"
|
||||
)
|
||||
|
||||
@@ -203,3 +204,49 @@ var _ = Describe("Test Assemble Options", func() {
|
||||
Expect(wl.GetName()).Should(Equal(workloadName))
|
||||
})
|
||||
})
|
||||
|
||||
var _ = Describe("Test handleCheckManageWorkloadTrait func", func() {
|
||||
It("Test every situation", func() {
|
||||
traitDefs := map[string]v1beta1.TraitDefinition{
|
||||
"rollout": v1beta1.TraitDefinition{
|
||||
Spec: v1beta1.TraitDefinitionSpec{
|
||||
ManageWorkload: true,
|
||||
},
|
||||
},
|
||||
"normal": v1beta1.TraitDefinition{
|
||||
Spec: v1beta1.TraitDefinitionSpec{},
|
||||
},
|
||||
}
|
||||
appRev := v1beta1.ApplicationRevision{
|
||||
Spec: v1beta1.ApplicationRevisionSpec{
|
||||
TraitDefinitions: traitDefs,
|
||||
},
|
||||
}
|
||||
rolloutTrait := &unstructured.Unstructured{}
|
||||
rolloutTrait.SetLabels(map[string]string{oam.TraitTypeLabel: "rollout"})
|
||||
|
||||
normalTrait := &unstructured.Unstructured{}
|
||||
normalTrait.SetLabels(map[string]string{oam.TraitTypeLabel: "normal"})
|
||||
|
||||
workload := unstructured.Unstructured{}
|
||||
workload.SetLabels(map[string]string{
|
||||
oam.WorkloadTypeLabel: "webservice",
|
||||
})
|
||||
|
||||
comps := []*velatypes.ComponentManifest{
|
||||
{
|
||||
Traits: []*unstructured.Unstructured{
|
||||
rolloutTrait,
|
||||
normalTrait,
|
||||
},
|
||||
StandardWorkload: &workload,
|
||||
},
|
||||
}
|
||||
|
||||
HandleCheckManageWorkloadTrait(appRev, comps)
|
||||
Expect(len(rolloutTrait.GetLabels())).Should(BeEquivalentTo(2))
|
||||
Expect(rolloutTrait.GetLabels()[oam.LabelManageWorkloadTrait]).Should(BeEquivalentTo("true"))
|
||||
Expect(len(normalTrait.GetLabels())).Should(BeEquivalentTo(1))
|
||||
Expect(normalTrait.GetLabels()[oam.LabelManageWorkloadTrait]).Should(BeEquivalentTo(""))
|
||||
})
|
||||
})
|
||||
|
||||
@@ -89,13 +89,10 @@ func HandleReplicas(ctx context.Context, rolloutComp string, c client.Client) as
|
||||
pv := fieldpath.Pave(u.UnstructuredContent())
|
||||
|
||||
// we hard code here, but we can easily support more types of workload by add more cases logic in switch
|
||||
var replicasFieldPath string
|
||||
switch u.GetKind() {
|
||||
case reflect.TypeOf(v1alpha1.CloneSet{}).Name(), reflect.TypeOf(appsv1.Deployment{}).Name(), reflect.TypeOf(appsv1.StatefulSet{}).Name():
|
||||
replicasFieldPath = "spec.replicas"
|
||||
default:
|
||||
replicasFieldPath, err := GetWorkloadReplicasPath(*u)
|
||||
if err != nil {
|
||||
klog.Errorf("rollout meet a workload we cannot support yet", "Kind", u.GetKind(), "name", u.GetName())
|
||||
return fmt.Errorf("rollout meet a workload we cannot support yet Kind %s name %s", u.GetKind(), u.GetName())
|
||||
return err
|
||||
}
|
||||
|
||||
workload := u.DeepCopy()
|
||||
@@ -127,6 +124,16 @@ func HandleReplicas(ctx context.Context, rolloutComp string, c client.Client) as
|
||||
})
|
||||
}
|
||||
|
||||
// GetWorkloadReplicasPath get replicas path of workload
|
||||
func GetWorkloadReplicasPath(u unstructured.Unstructured) (string, error) {
|
||||
switch u.GetKind() {
|
||||
case reflect.TypeOf(v1alpha1.CloneSet{}).Name(), reflect.TypeOf(appsv1.Deployment{}).Name(), reflect.TypeOf(appsv1.StatefulSet{}).Name():
|
||||
return "spec.replicas", nil
|
||||
default:
|
||||
return "", fmt.Errorf("rollout meet a workload we cannot support yet Kind %s name %s", u.GetKind(), u.GetName())
|
||||
}
|
||||
}
|
||||
|
||||
// appRollout should take over updating workload, so disable previous controller owner(resourceTracker)
|
||||
func disableControllerOwner(workload *unstructured.Unstructured) {
|
||||
if workload == nil {
|
||||
|
||||
@@ -20,12 +20,13 @@ import (
|
||||
"testing"
|
||||
|
||||
"github.com/oam-dev/kubevela/apis/core.oam.dev/common"
|
||||
|
||||
"github.com/oam-dev/kubevela/apis/core.oam.dev/v1alpha2"
|
||||
"github.com/oam-dev/kubevela/apis/core.oam.dev/v1beta1"
|
||||
oamstandard "github.com/oam-dev/kubevela/apis/standard.oam.dev/v1alpha1"
|
||||
"github.com/oam-dev/kubevela/pkg/oam/util"
|
||||
|
||||
"gotest.tools/assert"
|
||||
appsv1 "k8s.io/api/apps/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||
"k8s.io/utils/pointer"
|
||||
@@ -192,3 +193,37 @@ func TestHandleTerminated(t *testing.T) {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetWorkloadReplicasPath(t *testing.T) {
|
||||
deploy := appsv1.Deployment{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
APIVersion: "appsv1",
|
||||
Kind: "Deployment",
|
||||
},
|
||||
}
|
||||
u, err := util.Object2Unstructured(deploy)
|
||||
if err != nil {
|
||||
t.Errorf("deployment shounld't meet an error %w", err)
|
||||
}
|
||||
pathStr, err := GetWorkloadReplicasPath(*u)
|
||||
if err != nil {
|
||||
t.Errorf("deployment should handle deployment")
|
||||
}
|
||||
if pathStr != "spec.replicas" {
|
||||
t.Errorf("deployPath error got %s", pathStr)
|
||||
}
|
||||
ds := appsv1.DaemonSet{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
APIVersion: "appsv1",
|
||||
Kind: "DaemonSet",
|
||||
},
|
||||
}
|
||||
u, err = util.Object2Unstructured(ds)
|
||||
if err != nil {
|
||||
t.Errorf("ds shounld't meet an error %w", err)
|
||||
}
|
||||
_, err = GetWorkloadReplicasPath(*u)
|
||||
if err == nil {
|
||||
t.Errorf("daemonset shouldn't support")
|
||||
}
|
||||
}
|
||||
|
||||
@@ -29,6 +29,7 @@ import (
|
||||
"github.com/pkg/errors"
|
||||
apps "k8s.io/api/apps/v1"
|
||||
core "k8s.io/api/core/v1"
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
@@ -370,7 +371,7 @@ func getAppConfigNameFromLabel(o metav1.Object) string {
|
||||
func getVersioningPeerWorkloadRefs(ctx context.Context, c client.Reader, wlRef core.ObjectReference, ns string) ([]core.ObjectReference, error) {
|
||||
o := &unstructured.Unstructured{}
|
||||
o.SetGroupVersionKind(wlRef.GroupVersionKind())
|
||||
if err := c.Get(ctx, client.ObjectKey{Namespace: ns, Name: wlRef.Name}, o); err != nil {
|
||||
if err := c.Get(ctx, client.ObjectKey{Namespace: ns, Name: wlRef.Name}, o); err != nil && !apierrors.IsNotFound(err) {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
|
||||
@@ -18,6 +18,7 @@ package healthscope
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"sort"
|
||||
"strings"
|
||||
"sync"
|
||||
@@ -615,12 +616,34 @@ func (r *Reconciler) createWorkloadRefs(ctx context.Context, appRef v1alpha2.App
|
||||
}, o); err != nil {
|
||||
continue
|
||||
}
|
||||
if labels := o.GetLabels(); labels != nil && labels[oam.WorkloadTypeLabel] != "" {
|
||||
wlRefs = append(wlRefs, WorkloadReference{
|
||||
ObjectReference: rs.ObjectReference,
|
||||
clusterName: rs.Cluster,
|
||||
envName: decisionsMap[rs.Cluster],
|
||||
})
|
||||
|
||||
if labels := o.GetLabels(); labels != nil {
|
||||
if labels[oam.WorkloadTypeLabel] != "" {
|
||||
wlRefs = append(wlRefs, WorkloadReference{
|
||||
ObjectReference: rs.ObjectReference,
|
||||
clusterName: rs.Cluster,
|
||||
envName: decisionsMap[rs.Cluster],
|
||||
})
|
||||
} else if labels[oam.TraitTypeLabel] != "" && labels[oam.LabelManageWorkloadTrait] == "true" {
|
||||
// this means this trait is a manage-Workload trait, get workload GVK and name for trait's annotation
|
||||
objectRef := corev1.ObjectReference{}
|
||||
err := json.Unmarshal([]byte(o.GetAnnotations()[oam.AnnotationWorkloadGVK]), &objectRef)
|
||||
if err != nil {
|
||||
// don't break whole check process due to this error
|
||||
continue
|
||||
}
|
||||
if o.GetAnnotations() != nil && len(o.GetAnnotations()[oam.AnnotationWorkloadName]) != 0 {
|
||||
objectRef.Name = o.GetAnnotations()[oam.AnnotationWorkloadName]
|
||||
} else {
|
||||
// use component name as default
|
||||
objectRef.Name = labels[oam.LabelAppComponent]
|
||||
}
|
||||
wlRefs = append(wlRefs, WorkloadReference{
|
||||
ObjectReference: objectRef,
|
||||
clusterName: rs.Cluster,
|
||||
envName: decisionsMap[rs.Cluster],
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -122,6 +122,28 @@ func (r *Reconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Resu
|
||||
r.record.Event(&wfstepdefinition, event.Warning("failed to garbage collect DefinitionRevision of type WorkflowStepDefinition", err))
|
||||
}
|
||||
|
||||
def := utils.NewCapabilityStepDef(&wfstepdefinition)
|
||||
def.Name = req.NamespacedName.Name
|
||||
// Store the parameter of stepDefinition to configMap
|
||||
cmName, err := def.StoreOpenAPISchema(ctx, r.Client, r.pd, req.Namespace, req.Name, defRev.Name)
|
||||
if err != nil {
|
||||
klog.InfoS("Could not store capability in ConfigMap", "err", err)
|
||||
r.record.Event(&(wfstepdefinition), event.Warning("Could not store capability in ConfigMap", err))
|
||||
return ctrl.Result{}, util.PatchCondition(ctx, r, &wfstepdefinition,
|
||||
condition.ReconcileError(fmt.Errorf(util.ErrStoreCapabilityInConfigMap, wfstepdefinition.Name, err)))
|
||||
}
|
||||
|
||||
if wfstepdefinition.Status.ConfigMapRef != cmName {
|
||||
wfstepdefinition.Status.ConfigMapRef = cmName
|
||||
if err := r.UpdateStatus(ctx, &wfstepdefinition); err != nil {
|
||||
klog.ErrorS(err, "Could not update WorkflowStepDefinition Status", "workflowStepDefinition", klog.KRef(req.Namespace, req.Name))
|
||||
r.record.Event(&wfstepdefinition, event.Warning("Could not update WorkflowStepDefinition Status", err))
|
||||
return ctrl.Result{}, util.PatchCondition(ctx, r, &wfstepdefinition,
|
||||
condition.ReconcileError(fmt.Errorf(util.ErrUpdateWorkflowStepDefinition, wfstepdefinition.Name, err)))
|
||||
}
|
||||
klog.InfoS("Successfully updated the status.configMapRef of the WorkflowStepDefinition", "workflowStepDefinition",
|
||||
klog.KRef(req.Namespace, req.Name), "status.configMapRef", cmName)
|
||||
}
|
||||
return ctrl.Result{}, nil
|
||||
}
|
||||
|
||||
|
||||
@@ -34,6 +34,7 @@ import (
|
||||
"k8s.io/utils/pointer"
|
||||
|
||||
"github.com/crossplane/crossplane-runtime/pkg/event"
|
||||
"github.com/crossplane/crossplane-runtime/pkg/fieldpath"
|
||||
|
||||
"github.com/oam-dev/kubevela/apis/core.oam.dev/v1beta1"
|
||||
"github.com/oam-dev/kubevela/apis/standard.oam.dev/v1alpha1"
|
||||
@@ -240,6 +241,19 @@ func (h *handler) checkWorkloadNotExist(ctx context.Context) (bool, error) {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
func getWorkloadReplicasNum(u unstructured.Unstructured) (int32, error) {
|
||||
replicaPath, err := applicationrollout.GetWorkloadReplicasPath(u)
|
||||
if err != nil {
|
||||
return 0, fmt.Errorf("get workload replicas path err %w", err)
|
||||
}
|
||||
wlpv := fieldpath.Pave(u.UnstructuredContent())
|
||||
replicas, err := wlpv.GetInteger(replicaPath)
|
||||
if err != nil {
|
||||
return 0, fmt.Errorf("get workload replicas err %w", err)
|
||||
}
|
||||
return int32(replicas), nil
|
||||
}
|
||||
|
||||
// checkRollingTerminated check the rollout if have finished
|
||||
func checkRollingTerminated(rollout v1alpha1.Rollout) bool {
|
||||
// handle rollout completed
|
||||
|
||||
@@ -40,6 +40,7 @@ import (
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/utils/pointer"
|
||||
)
|
||||
|
||||
var _ = Describe("Test rollout related handler func", func() {
|
||||
@@ -513,6 +514,50 @@ var _ = Describe("Test rollout related handler func", func() {
|
||||
Expect(checkRt.Status.TrackedResources[0].Name).Should(BeEquivalentTo(u.GetName()))
|
||||
Expect(checkRt.Status.TrackedResources[0].UID).Should(BeEquivalentTo(u.GetUID()))
|
||||
})
|
||||
|
||||
It("TestGetWorkloadReplicasNum", func() {
|
||||
deployName := "test-workload-get"
|
||||
deploy := appsv1.Deployment{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
Kind: "Deployment",
|
||||
},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: namespace,
|
||||
Name: deployName,
|
||||
},
|
||||
Spec: appsv1.DeploymentSpec{
|
||||
Replicas: pointer.Int32Ptr(3),
|
||||
Selector: &metav1.LabelSelector{
|
||||
MatchLabels: map[string]string{
|
||||
"app": "test",
|
||||
},
|
||||
},
|
||||
Template: v1.PodTemplateSpec{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Labels: map[string]string{
|
||||
"app": "test",
|
||||
},
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Name: "test-container",
|
||||
Image: "test-image",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
Expect(k8sClient.Create(ctx, &deploy)).Should(BeNil())
|
||||
u := unstructured.Unstructured{}
|
||||
u.SetAPIVersion("apps/v1")
|
||||
u.SetKind("Deployment")
|
||||
Expect(k8sClient.Get(ctx, types.NamespacedName{Name: deployName, Namespace: namespace}, &u)).Should(BeNil())
|
||||
rep, err := getWorkloadReplicasNum(u)
|
||||
Expect(err).Should(BeNil())
|
||||
Expect(rep).Should(BeEquivalentTo(3))
|
||||
})
|
||||
})
|
||||
})
|
||||
|
||||
|
||||
@@ -18,6 +18,10 @@ package rollout
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"math"
|
||||
|
||||
"k8s.io/apimachinery/pkg/util/intstr"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
|
||||
@@ -34,6 +38,8 @@ import (
|
||||
common2 "github.com/oam-dev/kubevela/pkg/controller/common"
|
||||
rolloutplan "github.com/oam-dev/kubevela/pkg/controller/common/rollout"
|
||||
oamctrl "github.com/oam-dev/kubevela/pkg/controller/core.oam.dev"
|
||||
|
||||
"github.com/oam-dev/kubevela/pkg/oam"
|
||||
oamutil "github.com/oam-dev/kubevela/pkg/oam/util"
|
||||
"github.com/oam-dev/kubevela/pkg/utils/apply"
|
||||
)
|
||||
@@ -110,6 +116,33 @@ func (r *reconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Resu
|
||||
return ctrl.Result{}, err
|
||||
}
|
||||
|
||||
if rollout.Status.RollingState == v1alpha1.LocatingTargetAppState {
|
||||
if rollout.GetAnnotations() == nil || rollout.GetAnnotations()[oam.AnnotationWorkloadName] != h.targetWorkload.GetName() {
|
||||
// this is a update operation, the target workload will change so modify annotation
|
||||
gvk := map[string]string{"apiVersion": h.targetWorkload.GetAPIVersion(), "kind": h.targetWorkload.GetKind()}
|
||||
gvkValue, _ := json.Marshal(gvk)
|
||||
rollout.SetAnnotations(oamutil.MergeMapOverrideWithDst(rollout.GetAnnotations(),
|
||||
map[string]string{oam.AnnotationWorkloadName: h.targetWorkload.GetName(), oam.AnnotationWorkloadGVK: string(gvkValue)}))
|
||||
klog.InfoS("rollout controller set targetWorkload ", h.targetWorkload.GetName(),
|
||||
"in annotation in rollout namespace: ", rollout.Namespace, " name", rollout.Name, "gvk", gvkValue)
|
||||
// exit current reconcile before create target workload, this reconcile don't update status just modify annotation
|
||||
// next round reconcile will create workload and pass `LocatingTargetAppState` phase
|
||||
return ctrl.Result{}, h.Update(ctx, rollout)
|
||||
}
|
||||
|
||||
// this is a scale operation, if user don't fill rolloutBatches, fill it with default value
|
||||
if len(h.sourceRevName) == 0 && len(rollout.Spec.RolloutPlan.RolloutBatches) == 0 {
|
||||
// logic reach here means cannot get an error, so ignore it
|
||||
replicas, _ := getWorkloadReplicasNum(*h.targetWorkload)
|
||||
rollout.Spec.RolloutPlan.RolloutBatches = []v1alpha1.RolloutBatch{{
|
||||
Replicas: intstr.FromInt(int(math.Abs(float64(*rollout.Spec.RolloutPlan.TargetSize - replicas))))},
|
||||
}
|
||||
klog.InfoS("rollout controller set default rollout batches ", h.rollout.GetName(),
|
||||
" namespace: ", rollout.Namespace, "targetSize", rollout.Spec.RolloutPlan.TargetSize)
|
||||
return ctrl.Result{}, h.Update(ctx, rollout)
|
||||
}
|
||||
}
|
||||
|
||||
switch rollout.Status.RollingState {
|
||||
case v1alpha1.RolloutDeletingState:
|
||||
removed, err := h.checkWorkloadNotExist(ctx)
|
||||
|
||||
@@ -351,6 +351,75 @@ func (def *CapabilityTraitDefinition) StoreOpenAPISchema(ctx context.Context, k8
|
||||
return cmName, nil
|
||||
}
|
||||
|
||||
// CapabilityStepDefinition is the Capability struct for WorkflowStepDefinition
|
||||
type CapabilityStepDefinition struct {
|
||||
Name string `json:"name"`
|
||||
StepDefinition v1beta1.WorkflowStepDefinition `json:"stepDefinition"`
|
||||
|
||||
CapabilityBaseDefinition
|
||||
}
|
||||
|
||||
// NewCapabilityStepDef will create a CapabilityStepDefinition
|
||||
func NewCapabilityStepDef(stepdefinition *v1beta1.WorkflowStepDefinition) CapabilityStepDefinition {
|
||||
var def CapabilityStepDefinition
|
||||
def.Name = stepdefinition.Name
|
||||
def.StepDefinition = *stepdefinition.DeepCopy()
|
||||
return def
|
||||
}
|
||||
|
||||
// GetOpenAPISchema gets OpenAPI v3 schema by StepDefinition name
|
||||
func (def *CapabilityStepDefinition) GetOpenAPISchema(pd *packages.PackageDiscover, name string) ([]byte, error) {
|
||||
capability, err := appfile.ConvertTemplateJSON2Object(name, nil, def.StepDefinition.Spec.Schematic)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to convert WorkflowStepDefinition to Capability Object")
|
||||
}
|
||||
return getOpenAPISchema(capability, pd)
|
||||
}
|
||||
|
||||
// StoreOpenAPISchema stores OpenAPI v3 schema from StepDefinition in ConfigMap
|
||||
func (def *CapabilityStepDefinition) StoreOpenAPISchema(ctx context.Context, k8sClient client.Client, pd *packages.PackageDiscover, namespace, name string, revName string) (string, error) {
|
||||
var jsonSchema []byte
|
||||
var err error
|
||||
|
||||
jsonSchema, err = def.GetOpenAPISchema(pd, name)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("failed to generate OpenAPI v3 JSON schema for capability %s: %w", def.Name, err)
|
||||
}
|
||||
|
||||
stepDefinition := def.StepDefinition
|
||||
ownerReference := []metav1.OwnerReference{{
|
||||
APIVersion: stepDefinition.APIVersion,
|
||||
Kind: stepDefinition.Kind,
|
||||
Name: stepDefinition.Name,
|
||||
UID: stepDefinition.GetUID(),
|
||||
Controller: pointer.BoolPtr(true),
|
||||
BlockOwnerDeletion: pointer.BoolPtr(true),
|
||||
}}
|
||||
cmName, err := def.CreateOrUpdateConfigMap(ctx, k8sClient, namespace, stepDefinition.Name, jsonSchema, ownerReference)
|
||||
if err != nil {
|
||||
return cmName, err
|
||||
}
|
||||
|
||||
// Create a configmap to store parameter for each definitionRevision
|
||||
defRev := new(v1beta1.DefinitionRevision)
|
||||
if err = k8sClient.Get(ctx, client.ObjectKey{Namespace: namespace, Name: revName}, defRev); err != nil {
|
||||
return "", err
|
||||
}
|
||||
ownerReference = []metav1.OwnerReference{{
|
||||
APIVersion: defRev.APIVersion,
|
||||
Kind: defRev.Kind,
|
||||
Name: defRev.Name,
|
||||
UID: defRev.GetUID(),
|
||||
Controller: pointer.BoolPtr(true),
|
||||
BlockOwnerDeletion: pointer.BoolPtr(true),
|
||||
}}
|
||||
_, err = def.CreateOrUpdateConfigMap(ctx, k8sClient, namespace, revName, jsonSchema, ownerReference)
|
||||
if err != nil {
|
||||
return cmName, err
|
||||
}
|
||||
return cmName, nil
|
||||
}
|
||||
|
||||
// CapabilityBaseDefinition is the base struct for CapabilityWorkloadDefinition and CapabilityTraitDefinition
|
||||
type CapabilityBaseDefinition struct {
|
||||
}
|
||||
@@ -399,7 +468,7 @@ func (def *CapabilityBaseDefinition) CreateOrUpdateConfigMap(ctx context.Context
|
||||
return cmName, nil
|
||||
}
|
||||
|
||||
// getDefinition is the main function for GetDefinition API
|
||||
// getOpenAPISchema is the main function for GetDefinition API
|
||||
func getOpenAPISchema(capability types.Capability, pd *packages.PackageDiscover) ([]byte, error) {
|
||||
openAPISchema, err := generateOpenAPISchemaFromCapabilityParameter(capability, pd)
|
||||
if err != nil {
|
||||
|
||||
@@ -231,10 +231,11 @@ func (wd *workloadDef) Status(ctx process.Context, cli client.Client, ns string,
|
||||
if err != nil {
|
||||
return "", errors.WithMessage(err, "get template context")
|
||||
}
|
||||
return getStatusMessage(templateContext, customStatusTemplate, parameter)
|
||||
return getStatusMessage(wd.pd, templateContext, customStatusTemplate, parameter)
|
||||
}
|
||||
|
||||
func getStatusMessage(templateContext map[string]interface{}, customStatusTemplate string, parameter interface{}) (string, error) {
|
||||
func getStatusMessage(pd *packages.PackageDiscover, templateContext map[string]interface{}, customStatusTemplate string, parameter interface{}) (string, error) {
|
||||
bi := build.NewContext().NewInstance("", nil)
|
||||
var ctxBuff string
|
||||
var paramBuff = "parameter: {}\n"
|
||||
|
||||
@@ -251,10 +252,12 @@ func getStatusMessage(templateContext map[string]interface{}, customStatusTempla
|
||||
if string(bt) != "null" {
|
||||
paramBuff = "parameter: " + string(bt) + "\n"
|
||||
}
|
||||
var buff = ctxBuff + paramBuff + customStatusTemplate
|
||||
var buff = customStatusTemplate + "\n" + ctxBuff + paramBuff
|
||||
if err := bi.AddFile("-", buff); err != nil {
|
||||
return "", errors.WithMessagef(err, "invalid cue template of customStatus")
|
||||
}
|
||||
|
||||
var r cue.Runtime
|
||||
inst, err := r.Compile("-", buff)
|
||||
inst, err := pd.ImportPackagesAndBuildInstance(bi)
|
||||
if err != nil {
|
||||
return "", errors.WithMessage(err, "compile customStatus template")
|
||||
}
|
||||
@@ -426,7 +429,7 @@ func (td *traitDef) Status(ctx process.Context, cli client.Client, ns string, cu
|
||||
if err != nil {
|
||||
return "", errors.WithMessage(err, "get template context")
|
||||
}
|
||||
return getStatusMessage(templateContext, customStatusTemplate, parameter)
|
||||
return getStatusMessage(td.pd, templateContext, customStatusTemplate, parameter)
|
||||
}
|
||||
|
||||
// HealthCheck address health check for trait
|
||||
|
||||
@@ -1245,9 +1245,36 @@ if len(context.outputs.ingress.status.loadBalancer.ingress) == 0 {
|
||||
statusTemp: `message: parameter.configInfo.name + ".type: " + context.outputs["\(parameter.configInfo.name)"].spec.type`,
|
||||
expMessage: "test-name.type: NodePort",
|
||||
},
|
||||
"import package in template": {
|
||||
tpContext: map[string]interface{}{
|
||||
"outputs": map[string]interface{}{
|
||||
"service": map[string]interface{}{
|
||||
"spec": map[string]interface{}{
|
||||
"type": "NodePort",
|
||||
"clusterIP": "10.0.0.1",
|
||||
"ports": []interface{}{
|
||||
map[string]interface{}{
|
||||
"port": 80,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
"ingress": map[string]interface{}{
|
||||
"rules": []interface{}{
|
||||
map[string]interface{}{
|
||||
"host": "example.com",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
statusTemp: `import "strconv"
|
||||
message: "ports: " + strconv.FormatInt(context.outputs.service.spec.ports[0].port,10)`,
|
||||
expMessage: "ports: 80",
|
||||
},
|
||||
}
|
||||
for message, ca := range cases {
|
||||
gotMessage, err := getStatusMessage(ca.tpContext, ca.statusTemp, ca.parameter)
|
||||
gotMessage, err := getStatusMessage(&packages.PackageDiscover{}, ca.tpContext, ca.statusTemp, ca.parameter)
|
||||
assert.NoError(t, err, message)
|
||||
assert.Equal(t, ca.expMessage, gotMessage, message)
|
||||
}
|
||||
|
||||
@@ -35,6 +35,8 @@ import (
|
||||
"k8s.io/apimachinery/pkg/runtime/serializer"
|
||||
clientgoscheme "k8s.io/client-go/kubernetes/scheme"
|
||||
"k8s.io/client-go/rest"
|
||||
|
||||
"github.com/oam-dev/kubevela/pkg/stdlib"
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -107,6 +109,9 @@ func (pd *PackageDiscover) ImportBuiltinPackagesFor(bi *build.Instance) {
|
||||
// ImportPackagesAndBuildInstance Combine import built-in packages and build cue template together to avoid data race
|
||||
func (pd *PackageDiscover) ImportPackagesAndBuildInstance(bi *build.Instance) (inst *cue.Instance, err error) {
|
||||
pd.ImportBuiltinPackagesFor(bi)
|
||||
if err := stdlib.AddImportsFor(bi, ""); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
var r cue.Runtime
|
||||
pd.mutex.Lock()
|
||||
defer pd.mutex.Unlock()
|
||||
|
||||
@@ -197,7 +197,7 @@ func (ctx *templateContext) BaseContextFile() string {
|
||||
if len(ctx.auxiliaries) > 0 {
|
||||
var auxLines []string
|
||||
for _, auxiliary := range ctx.auxiliaries {
|
||||
auxLines = append(auxLines, fmt.Sprintf("%s: %s", auxiliary.Name, structMarshal(auxiliary.Ins.String())))
|
||||
auxLines = append(auxLines, fmt.Sprintf("\"%s\": %s", auxiliary.Name, structMarshal(auxiliary.Ins.String())))
|
||||
}
|
||||
if len(auxLines) > 0 {
|
||||
buff += fmt.Sprintf(model.OutputsFieldName+": {%s}\n", strings.Join(auxLines, "\n"))
|
||||
|
||||
@@ -64,6 +64,11 @@ image: "myserver"
|
||||
Name: "service",
|
||||
}
|
||||
|
||||
svcAuxWithAbnormalName := Auxiliary{
|
||||
Ins: svcIns,
|
||||
Name: "service-1",
|
||||
}
|
||||
|
||||
targetParams := map[string]interface{}{
|
||||
"parameter1": "string",
|
||||
"parameter2": map[string]string{
|
||||
@@ -98,6 +103,7 @@ image: "myserver"
|
||||
ctx := NewContext("myns", "mycomp", "myapp", "myapp-v1")
|
||||
ctx.SetBase(base)
|
||||
ctx.AppendAuxiliaries(svcAux)
|
||||
ctx.AppendAuxiliaries(svcAuxWithAbnormalName)
|
||||
ctx.SetParameters(targetParams)
|
||||
ctx.PushData(model.ContextDataArtifacts, targetData)
|
||||
ctx.PushData("arbitraryData", targetArbitraryData)
|
||||
@@ -132,6 +138,10 @@ image: "myserver"
|
||||
assert.Equal(t, nil, err)
|
||||
assert.Equal(t, "{\"apiVersion\":\"v1\",\"kind\":\"ConfigMap\"}", string(outputsJs))
|
||||
|
||||
outputsJs, err = ctxInst.Lookup("context", model.OutputsFieldName, "service-1").MarshalJSON()
|
||||
assert.Equal(t, nil, err)
|
||||
assert.Equal(t, "{\"apiVersion\":\"v1\",\"kind\":\"ConfigMap\"}", string(outputsJs))
|
||||
|
||||
ns, err := ctxInst.Lookup("context", model.ContextNamespace).String()
|
||||
assert.Equal(t, nil, err)
|
||||
assert.Equal(t, "myns", ns)
|
||||
|
||||
@@ -125,4 +125,10 @@ const (
|
||||
|
||||
// AnnotationLastAppliedConfiguration is kubectl annotations for 3-way merge
|
||||
AnnotationLastAppliedConfiguration = "kubectl.kubernetes.io/last-applied-configuration"
|
||||
|
||||
// AnnotationWorkloadGVK indicates the managed workload's GVK by trait
|
||||
AnnotationWorkloadGVK = "trait.oam.dev/workload-gvk"
|
||||
|
||||
// AnnotationWorkloadName indicates the managed workload's name by trait
|
||||
AnnotationWorkloadName = "trait.oam.dev/workload-name"
|
||||
)
|
||||
|
||||
@@ -100,6 +100,8 @@ const (
|
||||
ErrUpdateComponentDefinition = "cannot update ComponentDefinition %s: %v"
|
||||
// ErrUpdateTraitDefinition is the error while update TraitDefinition
|
||||
ErrUpdateTraitDefinition = "cannot update TraitDefinition %s: %v"
|
||||
// ErrUpdateStepDefinition is the error while update WorkflowStepDefinition
|
||||
ErrUpdateStepDefinition = "cannot update WorkflowStepDefinition %s: %v"
|
||||
// ErrUpdatePolicyDefinition is the error while update PolicyDefinition
|
||||
ErrUpdatePolicyDefinition = "cannot update PolicyDefinition %s: %v"
|
||||
// ErrUpdateWorkflowStepDefinition is the error while update WorkflowStepDefinition
|
||||
|
||||
@@ -27,8 +27,7 @@ import (
|
||||
|
||||
var (
|
||||
//go:embed pkgs op.cue
|
||||
fs embed.FS
|
||||
pkgContent string
|
||||
fs embed.FS
|
||||
)
|
||||
|
||||
// GetPackages Get Stdlib packages
|
||||
@@ -44,7 +43,7 @@ func GetPackages(tagTempl string) (map[string]string, error) {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
pkgContent = string(opBytes) + "\n"
|
||||
pkgContent := string(opBytes) + "\n"
|
||||
for _, file := range files {
|
||||
body, err := fs.ReadFile("pkgs/" + file.Name())
|
||||
if err != nil {
|
||||
|
||||
@@ -18,17 +18,17 @@
|
||||
url?: string
|
||||
value?: string
|
||||
style?: string
|
||||
text?: #text
|
||||
text?: #textType
|
||||
confirm?: {
|
||||
title: #text
|
||||
text: #text
|
||||
confirm: #text
|
||||
deny: #text
|
||||
title: #textType
|
||||
text: #textType
|
||||
confirm: #textType
|
||||
deny: #textType
|
||||
style?: string
|
||||
}
|
||||
options?: [...#option]
|
||||
initial_options?: [...#option]
|
||||
placeholder?: #text
|
||||
placeholder?: #textType
|
||||
initial_date?: string
|
||||
image_url?: string
|
||||
alt_text?: string
|
||||
@@ -45,7 +45,7 @@
|
||||
}]
|
||||
}
|
||||
|
||||
#text: {
|
||||
#textType: {
|
||||
type: string
|
||||
text: string
|
||||
emoji?: bool
|
||||
@@ -53,8 +53,8 @@
|
||||
}
|
||||
|
||||
#option: {
|
||||
text: text
|
||||
text: #textType
|
||||
value: string
|
||||
description?: text
|
||||
description?: #textType
|
||||
url?: string
|
||||
}
|
||||
|
||||
@@ -21,6 +21,7 @@ import (
|
||||
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/client-go/rest"
|
||||
"k8s.io/client-go/util/flowcontrol"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client/config"
|
||||
|
||||
@@ -43,6 +44,7 @@ func (a *Args) SetConfig() error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
restConf.RateLimiter = flowcontrol.NewTokenBucketRateLimiter(100, 200)
|
||||
a.Config = restConf
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -44,6 +44,7 @@ import (
|
||||
k8sruntime "k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
clientgoscheme "k8s.io/client-go/kubernetes/scheme"
|
||||
"k8s.io/client-go/util/flowcontrol"
|
||||
apiregistrationv1 "k8s.io/kube-aggregator/pkg/apis/apiregistration/v1"
|
||||
ocmclusterv1alpha1 "open-cluster-management.io/api/cluster/v1alpha1"
|
||||
ocmworkv1 "open-cluster-management.io/api/work/v1"
|
||||
@@ -52,6 +53,7 @@ import (
|
||||
"sigs.k8s.io/yaml"
|
||||
|
||||
oamcore "github.com/oam-dev/kubevela/apis/core.oam.dev"
|
||||
"github.com/oam-dev/kubevela/apis/core.oam.dev/common"
|
||||
oamstandard "github.com/oam-dev/kubevela/apis/standard.oam.dev/v1alpha1"
|
||||
velacue "github.com/oam-dev/kubevela/pkg/cue"
|
||||
"github.com/oam-dev/kubevela/pkg/cue/model"
|
||||
@@ -83,8 +85,10 @@ func InitBaseRestConfig() (Args, error) {
|
||||
if err != nil && os.Getenv("IGNORE_KUBE_CONFIG") != "true" {
|
||||
fmt.Println("get kubeConfig err", err)
|
||||
os.Exit(1)
|
||||
} else if err != nil {
|
||||
return Args{}, err
|
||||
}
|
||||
|
||||
restConf.RateLimiter = flowcontrol.NewTokenBucketRateLimiter(100, 200)
|
||||
return Args{
|
||||
Config: restConf,
|
||||
Schema: Scheme,
|
||||
@@ -236,6 +240,51 @@ func RealtimePrintCommandOutput(cmd *exec.Cmd, logFile string) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// ClusterObject2Map convert ClusterObjectReference to a readable map
|
||||
func ClusterObject2Map(refs []common.ClusterObjectReference) map[string]string {
|
||||
clusterResourceRefTmpl := "Cluster: %s | Namespace: %s | GVK: %s/%s | Name: %s"
|
||||
objs := make(map[string]string, len(refs))
|
||||
for _, r := range refs {
|
||||
if r.Cluster == "" {
|
||||
r.Cluster = "local"
|
||||
}
|
||||
objs[r.Name] = fmt.Sprintf(clusterResourceRefTmpl, r.Cluster, r.Namespace, r.APIVersion, r.ResourceVersion, r.Name)
|
||||
}
|
||||
return objs
|
||||
}
|
||||
|
||||
// AskToChooseOneAppliedResource will ask users to select one applied resource of the application if more than one
|
||||
// resources is a map for component to applied resources
|
||||
// return the selected ClusterObjectReference
|
||||
func AskToChooseOneAppliedResource(resources []common.ClusterObjectReference) (*common.ClusterObjectReference, error) {
|
||||
if len(resources) == 0 {
|
||||
return nil, fmt.Errorf("no applied resources exist in the application")
|
||||
}
|
||||
if len(resources) == 1 {
|
||||
return &resources[0], nil
|
||||
}
|
||||
opMap := ClusterObject2Map(resources)
|
||||
var ops []string
|
||||
for _, r := range opMap {
|
||||
ops = append(ops, r)
|
||||
}
|
||||
prompt := &survey.Select{
|
||||
Message: "You have multiple applied resources in your app. Please choose one:",
|
||||
Options: ops,
|
||||
}
|
||||
var selectedRsc string
|
||||
err := survey.AskOne(prompt, &selectedRsc)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("choosing resource err %w", err)
|
||||
}
|
||||
for k, resource := range ops {
|
||||
if selectedRsc == resource {
|
||||
return &resources[k], nil
|
||||
}
|
||||
}
|
||||
return nil, fmt.Errorf("choosing resource err %w", err)
|
||||
}
|
||||
|
||||
// AskToChooseOneService will ask users to select one service of the application if more than one exidi
|
||||
func AskToChooseOneService(svcNames []string) (string, error) {
|
||||
if len(svcNames) == 0 {
|
||||
|
||||
@@ -26,8 +26,12 @@ import (
|
||||
"github.com/pkg/errors"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
kerrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/utils/pointer"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
|
||||
"github.com/oam-dev/kubevela/apis/core.oam.dev/v1beta1"
|
||||
"github.com/oam-dev/kubevela/pkg/cue/model"
|
||||
"github.com/oam-dev/kubevela/pkg/cue/model/value"
|
||||
"github.com/oam-dev/kubevela/pkg/oam/util"
|
||||
@@ -256,35 +260,9 @@ func (comp *ComponentManifest) unmarshal(v string) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// NewContext new workflow context.
|
||||
func NewContext(cli client.Client, ns, rev string) (Context, error) {
|
||||
|
||||
var (
|
||||
ctx = context.Background()
|
||||
manifestCm corev1.ConfigMap
|
||||
)
|
||||
|
||||
if err := cli.Get(ctx, client.ObjectKey{
|
||||
Namespace: ns,
|
||||
Name: rev,
|
||||
}, &manifestCm); err != nil {
|
||||
return nil, errors.WithMessagef(err, "Get manifest ConfigMap %s/%s ", ns, rev)
|
||||
}
|
||||
|
||||
wfCtx, err := newContext(cli, ns, rev)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := wfCtx.LoadFromConfigMap(manifestCm); err != nil {
|
||||
return nil, errors.WithMessagef(err, "load from ConfigMap %s/%s", ns, rev)
|
||||
}
|
||||
|
||||
return wfCtx, wfCtx.Commit()
|
||||
}
|
||||
|
||||
// NewEmptyContext new workflow context without initialize data.
|
||||
func NewEmptyContext(cli client.Client, ns, app string) (Context, error) {
|
||||
wfCtx, err := newContext(cli, ns, app)
|
||||
// NewContext new workflow context without initialize data.
|
||||
func NewContext(cli client.Client, ns, app string, appUID types.UID) (Context, error) {
|
||||
wfCtx, err := newContext(cli, ns, app, appUID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -292,13 +270,22 @@ func NewEmptyContext(cli client.Client, ns, app string) (Context, error) {
|
||||
return wfCtx, wfCtx.Commit()
|
||||
}
|
||||
|
||||
func newContext(cli client.Client, ns, app string) (*WorkflowContext, error) {
|
||||
func newContext(cli client.Client, ns, app string, appUID types.UID) (*WorkflowContext, error) {
|
||||
var (
|
||||
ctx = context.Background()
|
||||
store corev1.ConfigMap
|
||||
)
|
||||
store.Name = generateStoreName(app)
|
||||
store.Namespace = ns
|
||||
store.SetOwnerReferences([]metav1.OwnerReference{
|
||||
{
|
||||
APIVersion: v1beta1.SchemeGroupVersion.String(),
|
||||
Kind: v1beta1.ApplicationKind,
|
||||
Name: app,
|
||||
UID: appUID,
|
||||
Controller: pointer.BoolPtr(true),
|
||||
},
|
||||
})
|
||||
if err := cli.Get(ctx, client.ObjectKey{Name: store.Name, Namespace: store.Namespace}, &store); err != nil {
|
||||
if kerrors.IsNotFound(err) {
|
||||
if err := cli.Create(ctx, &store); err != nil {
|
||||
|
||||
@@ -259,14 +259,11 @@ func TestContext(t *testing.T) {
|
||||
},
|
||||
}
|
||||
|
||||
wfCtx, err := NewContext(cli, "default", "app-v1")
|
||||
wfCtx, err := NewContext(cli, "default", "app-v1", "testuid")
|
||||
assert.NilError(t, err)
|
||||
err = wfCtx.Commit()
|
||||
assert.NilError(t, err)
|
||||
|
||||
_, err = NewContext(cli, "default", "app-not-found")
|
||||
assert.Equal(t, err != nil, true)
|
||||
|
||||
wfCtx, err = LoadContext(cli, "default", "app-v1")
|
||||
assert.NilError(t, err)
|
||||
err = wfCtx.Commit()
|
||||
@@ -276,7 +273,7 @@ func TestContext(t *testing.T) {
|
||||
_, err = LoadContext(cli, "default", "app-v1")
|
||||
assert.Equal(t, err != nil, true)
|
||||
|
||||
wfCtx, err = NewEmptyContext(cli, "default", "app-v1")
|
||||
wfCtx, err = NewContext(cli, "default", "app-v1", "testuid")
|
||||
assert.NilError(t, err)
|
||||
assert.Equal(t, len(wfCtx.GetComponents()), 0)
|
||||
_, err = wfCtx.GetComponent("server")
|
||||
|
||||
@@ -106,7 +106,7 @@ func mockContext(t *testing.T) wfContext.Context {
|
||||
return nil
|
||||
},
|
||||
}
|
||||
wfCtx, err := wfContext.NewEmptyContext(cli, "default", "v1")
|
||||
wfCtx, err := wfContext.NewContext(cli, "default", "v1", "testuid")
|
||||
require.NoError(t, err)
|
||||
return wfCtx
|
||||
}
|
||||
|
||||
@@ -144,7 +144,7 @@ func (w *workflow) makeContext(appName string) (wfCtx wfContext.Context, err err
|
||||
return
|
||||
}
|
||||
|
||||
wfCtx, err = wfContext.NewEmptyContext(w.cli, w.app.Namespace, appName)
|
||||
wfCtx, err = wfContext.NewContext(w.cli, w.app.Namespace, appName, w.app.GetUID())
|
||||
|
||||
if err != nil {
|
||||
err = errors.WithMessage(err, "new context")
|
||||
|
||||
@@ -363,6 +363,7 @@ var _ = Describe("Test Workflow", func() {
|
||||
|
||||
func makeTestCase(steps []oamcore.WorkflowStep) (*oamcore.Application, []wfTypes.TaskRunner) {
|
||||
app := &oamcore.Application{
|
||||
ObjectMeta: metav1.ObjectMeta{UID: "test-uid"},
|
||||
Spec: oamcore.ApplicationSpec{
|
||||
Workflow: &oamcore.Workflow{
|
||||
Steps: steps,
|
||||
|
||||
@@ -84,7 +84,7 @@ func LoadApplication(namespace, appName string, c common.Args) (*v1beta1.Applica
|
||||
return app, nil
|
||||
}
|
||||
|
||||
// GetComponents will get oam components from Appfile.
|
||||
// GetComponents will get oam components from v1beta1.Application.
|
||||
func GetComponents(app *v1beta1.Application) []string {
|
||||
var components []string
|
||||
for _, cmp := range app.Spec.Components {
|
||||
|
||||
@@ -24,6 +24,8 @@ import (
|
||||
"text/template"
|
||||
"time"
|
||||
|
||||
"github.com/oam-dev/kubevela/pkg/multicluster"
|
||||
|
||||
"github.com/fatih/color"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/spf13/cobra"
|
||||
@@ -40,73 +42,72 @@ import (
|
||||
|
||||
// NewLogsCommand creates `logs` command to tail logs of application
|
||||
func NewLogsCommand(c common.Args, ioStreams util.IOStreams) *cobra.Command {
|
||||
largs := &Args{C: c}
|
||||
cmd := &cobra.Command{}
|
||||
cmd.Use = "logs"
|
||||
cmd.Short = "Tail logs for application"
|
||||
cmd.Long = "Tail logs for application"
|
||||
cmd.PersistentPreRunE = func(cmd *cobra.Command, args []string) error {
|
||||
if err := c.SetConfig(); err != nil {
|
||||
return err
|
||||
}
|
||||
largs.C = c
|
||||
return nil
|
||||
}
|
||||
cmd.RunE = func(cmd *cobra.Command, args []string) error {
|
||||
if len(args) < 1 {
|
||||
ioStreams.Errorf("please specify app name")
|
||||
largs := &Args{Args: c}
|
||||
cmd := &cobra.Command{
|
||||
Use: "logs <appName>",
|
||||
Short: "Tail logs for application in multicluster",
|
||||
Long: "Tail logs for application in multicluster",
|
||||
Args: cobra.ExactArgs(1),
|
||||
PreRunE: func(cmd *cobra.Command, args []string) error {
|
||||
if err := c.SetConfig(); err != nil {
|
||||
return err
|
||||
}
|
||||
largs.Args = c
|
||||
largs.Args.Config.Wrap(multicluster.NewSecretModeMultiClusterRoundTripper)
|
||||
return nil
|
||||
}
|
||||
env, err := GetFlagEnvOrCurrent(cmd, c)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
app, err := appfile.LoadApplication(env.Namespace, args[0], c)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
largs.App = app
|
||||
largs.Env = env
|
||||
ctx := context.Background()
|
||||
if err := largs.Run(ctx, ioStreams); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
cmd.Annotations = map[string]string{
|
||||
types.TagCommandType: types.TypeApp,
|
||||
},
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
app, err := appfile.LoadApplication(largs.Namespace, args[0], c)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
largs.App = app
|
||||
ctx := context.Background()
|
||||
if err := largs.Run(ctx, ioStreams); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
},
|
||||
Annotations: map[string]string{
|
||||
types.TagCommandType: types.TypeApp,
|
||||
},
|
||||
}
|
||||
cmd.Flags().StringVarP(&largs.Output, "output", "o", "default", "output format for logs, support: [default, raw, json]")
|
||||
cmd.Flags().StringVarP(&largs.Namespace, "namespace", "n", "default", "application namespace")
|
||||
|
||||
return cmd
|
||||
}
|
||||
|
||||
// Args creates arguments for `logs` command
|
||||
type Args struct {
|
||||
Output string
|
||||
Env *types.EnvMeta
|
||||
C common.Args
|
||||
App *v1beta1.Application
|
||||
Output string
|
||||
Args common.Args
|
||||
Namespace string
|
||||
App *v1beta1.Application
|
||||
}
|
||||
|
||||
// Run refer to the implementation at https://github.com/oam-dev/stern/blob/master/stern/main.go
|
||||
func (l *Args) Run(ctx context.Context, ioStreams util.IOStreams) error {
|
||||
|
||||
clientSet, err := kubernetes.NewForConfig(l.C.Config)
|
||||
clientSet, err := kubernetes.NewForConfig(l.Args.Config)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
compName, err := common.AskToChooseOneService(appfile.GetComponents(l.App))
|
||||
appliedResources := l.App.Status.AppliedResources
|
||||
|
||||
selectedRes, err := common.AskToChooseOneAppliedResource(appliedResources)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
ctx = multicluster.ContextWithClusterName(ctx, selectedRes.Cluster)
|
||||
// TODO(wonderflow): we could get labels from service to narrow the pods scope selected
|
||||
labelSelector := labels.Everything()
|
||||
pod, err := regexp.Compile(compName + "-.*")
|
||||
pod, err := regexp.Compile(selectedRes.Name + "-.*")
|
||||
if err != nil {
|
||||
return fmt.Errorf("fail to compile '%s' for logs query", compName+".*")
|
||||
return fmt.Errorf("fail to compile '%s' for logs query", selectedRes.Name+".*")
|
||||
}
|
||||
container := regexp.MustCompile(".*")
|
||||
namespace := l.Env.Namespace
|
||||
namespace := selectedRes.Namespace
|
||||
added, removed, err := stern.Watch(ctx, clientSet.CoreV1().Pods(namespace), pod, container, nil, stern.RUNNING, labelSelector)
|
||||
if err != nil {
|
||||
return err
|
||||
|
||||
@@ -66,7 +66,7 @@ func main() {
|
||||
"Determines the namespace in which the leader election configmap will be created.")
|
||||
flag.BoolVar(&enableLeaderElection, "enable-leader-election", false,
|
||||
"Enable leader election for controller manager. Enabling this will ensure there is only one active controller manager.")
|
||||
flag.StringVar(&healthAddr, "health-addr", ":9440", "The address the health endpoint binds to.")
|
||||
flag.StringVar(&healthAddr, "health-addr", ":19440", "The address the health endpoint binds to.")
|
||||
flag.Parse()
|
||||
|
||||
// setup logging
|
||||
|
||||
44
runtime/rollout/e2e/Dockerfile.e2e
Normal file
44
runtime/rollout/e2e/Dockerfile.e2e
Normal file
@@ -0,0 +1,44 @@
|
||||
# Build the manager binary
|
||||
FROM --platform=${BUILDPLATFORM:-linux/amd64} golang:1.16-alpine as builder
|
||||
|
||||
WORKDIR /workspace
|
||||
# Copy the Go Modules manifests
|
||||
COPY ./tmp/go.mod go.mod
|
||||
COPY ./tmp/go.sum go.sum
|
||||
# cache deps before building and copying source so that we don't need to re-download as much
|
||||
# and so that source changes don't invalidate our downloaded layer
|
||||
RUN go mod download
|
||||
|
||||
# Copy the go source
|
||||
COPY ./tmp/main.go main.go
|
||||
COPY ./tmp/apis apis/
|
||||
COPY ./tmp/pkg pkg/
|
||||
COPY ./tmp/version version/
|
||||
|
||||
# Build
|
||||
ARG TARGETARCH
|
||||
ARG VERSION
|
||||
ARG GITVERSION
|
||||
RUN GO111MODULE=on CGO_ENABLED=0 GOOS=linux GOARCH=${TARGETARCH} \
|
||||
go build -a -ldflags "-s -w -X github.com/oam-dev/kubevela/version.VelaVersion=${VERSION:-undefined} -X github.com/oam-dev/kubevela/version.GitRevision=${GITVERSION:-undefined}" \
|
||||
-o manager-${TARGETARCH} main.go
|
||||
|
||||
# Use alpine as base image due to the discussion in issue #1448
|
||||
# You can replace distroless as minimal base image to package the manager binary
|
||||
# Refer to https://github.com/GoogleContainerTools/distroless for more details
|
||||
# Overwrite `BASE_IMAGE` by passing `--build-arg=BASE_IMAGE=gcr.io/distroless/static:nonroot`
|
||||
ARG BASE_IMAGE
|
||||
FROM ${BASE_IMAGE:-alpine:latest}
|
||||
# This is required by daemon connnecting with cri
|
||||
RUN apk add --no-cache ca-certificates bash
|
||||
|
||||
WORKDIR /
|
||||
|
||||
ARG TARGETARCH
|
||||
COPY --from=builder /workspace/manager-${TARGETARCH} /usr/local/bin/manager
|
||||
|
||||
COPY ./tmp/entrypoint.sh /usr/local/bin/
|
||||
|
||||
ENTRYPOINT ["entrypoint.sh"]
|
||||
|
||||
CMD ["manager"]
|
||||
213
test/e2e-multicluster-test/multicluster_rollout_test.go
Normal file
213
test/e2e-multicluster-test/multicluster_rollout_test.go
Normal file
@@ -0,0 +1,213 @@
|
||||
/*
|
||||
Copyright 2021 The KubeVela Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package e2e_multicluster_test
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
. "github.com/onsi/ginkgo"
|
||||
. "github.com/onsi/gomega"
|
||||
|
||||
appsv1 "k8s.io/api/apps/v1"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
|
||||
"github.com/oam-dev/kubevela/apis/core.oam.dev/v1beta1"
|
||||
"github.com/oam-dev/kubevela/apis/standard.oam.dev/v1alpha1"
|
||||
"github.com/oam-dev/kubevela/pkg/oam/util"
|
||||
|
||||
"sigs.k8s.io/yaml"
|
||||
)
|
||||
|
||||
var _ = Describe("Test MultiClustet Rollout", func() {
|
||||
Context("Test Runtime Cluster Rollout", func() {
|
||||
var namespace string
|
||||
var hubCtx context.Context
|
||||
var workerCtx context.Context
|
||||
var rollout v1alpha1.Rollout
|
||||
var componentName string
|
||||
var targetDeploy appsv1.Deployment
|
||||
var sourceDeploy appsv1.Deployment
|
||||
|
||||
BeforeEach(func() {
|
||||
hubCtx, workerCtx, namespace = initializeContextAndNamespace()
|
||||
componentName = "hello-world-server"
|
||||
})
|
||||
|
||||
AfterEach(func() {
|
||||
cleanUpNamespace(hubCtx, workerCtx, namespace)
|
||||
ns := v1.Namespace{}
|
||||
Eventually(func() error { return k8sClient.Get(hubCtx, types.NamespacedName{Name: namespace}, &ns) }, 300*time.Second, 300*time.Millisecond).Should(util.NotFoundMatcher{})
|
||||
})
|
||||
|
||||
verifySucceed := func(componentRevision string) {
|
||||
By("check rollout status have succeed")
|
||||
Eventually(func() error {
|
||||
rolloutKey := types.NamespacedName{Namespace: namespace, Name: componentName}
|
||||
if err := k8sClient.Get(workerCtx, rolloutKey, &rollout); err != nil {
|
||||
return err
|
||||
}
|
||||
if rollout.Spec.TargetRevisionName != componentRevision {
|
||||
return fmt.Errorf("rollout have not point to right targetRevision")
|
||||
}
|
||||
if rollout.Status.RollingState != v1alpha1.RolloutSucceedState {
|
||||
return fmt.Errorf("error rollout status state %s", rollout.Status.RollingState)
|
||||
}
|
||||
compRevName := rollout.Spec.TargetRevisionName
|
||||
deployKey := types.NamespacedName{Namespace: namespace, Name: compRevName}
|
||||
if err := k8sClient.Get(workerCtx, deployKey, &targetDeploy); err != nil {
|
||||
return err
|
||||
}
|
||||
if *targetDeploy.Spec.Replicas != *rollout.Spec.RolloutPlan.TargetSize {
|
||||
return fmt.Errorf("targetDeploy replicas missMatch %d, %d", targetDeploy.Spec.Replicas, rollout.Spec.RolloutPlan.TargetSize)
|
||||
}
|
||||
if targetDeploy.Status.UpdatedReplicas != *targetDeploy.Spec.Replicas {
|
||||
return fmt.Errorf("update not finish")
|
||||
}
|
||||
if len(targetDeploy.OwnerReferences) != 1 {
|
||||
return fmt.Errorf("workload ownerReference missMatch")
|
||||
}
|
||||
// guarantee rollout's owners and workload's owners are same
|
||||
if targetDeploy.OwnerReferences[0].Kind != rollout.OwnerReferences[0].Kind ||
|
||||
targetDeploy.OwnerReferences[0].Name != rollout.OwnerReferences[0].Name {
|
||||
return fmt.Errorf("workload ownerReference missMatch")
|
||||
}
|
||||
if rollout.Status.LastSourceRevision == "" {
|
||||
return nil
|
||||
}
|
||||
deployKey = types.NamespacedName{Namespace: namespace, Name: rollout.Status.LastSourceRevision}
|
||||
if err := k8sClient.Get(workerCtx, deployKey, &sourceDeploy); err == nil || !apierrors.IsNotFound(err) {
|
||||
return fmt.Errorf("source deploy still exist")
|
||||
}
|
||||
return nil
|
||||
}, time.Second*360, 300*time.Millisecond).Should(BeNil())
|
||||
}
|
||||
|
||||
It("Test Rollout whole feature in runtime cluster ", func() {
|
||||
app := &v1beta1.Application{}
|
||||
appYaml, err := ioutil.ReadFile("./testdata/app/app-rollout-envbinding.yaml")
|
||||
Expect(err).Should(Succeed())
|
||||
Expect(yaml.Unmarshal([]byte(appYaml), app)).Should(Succeed())
|
||||
app.SetNamespace(namespace)
|
||||
err = k8sClient.Create(hubCtx, app)
|
||||
Expect(err).Should(Succeed())
|
||||
verifySucceed(componentName + "-v1")
|
||||
|
||||
By("update application to v2")
|
||||
checkApp := &v1beta1.Application{}
|
||||
Eventually(func() error {
|
||||
if err := k8sClient.Get(hubCtx, types.NamespacedName{Namespace: namespace, Name: app.Name}, checkApp); err != nil {
|
||||
return err
|
||||
}
|
||||
checkApp.Spec.Components[0].Properties.Raw = []byte(`{"image": "stefanprodan/podinfo:5.0.2"}`)
|
||||
if err := k8sClient.Update(hubCtx, checkApp); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}, 500*time.Millisecond, 30*time.Second).Should(BeNil())
|
||||
verifySucceed(componentName + "-v2")
|
||||
|
||||
By("revert to v1, should guarantee compRev v1 still exist")
|
||||
appYaml, err = ioutil.ReadFile("./testdata/app/revert-app-envbinding.yaml")
|
||||
Expect(err).Should(Succeed())
|
||||
|
||||
Expect(k8sClient.Get(hubCtx, types.NamespacedName{Namespace: namespace, Name: app.Name}, checkApp)).Should(BeNil())
|
||||
revertApp := &v1beta1.Application{}
|
||||
Expect(yaml.Unmarshal([]byte(appYaml), revertApp)).Should(Succeed())
|
||||
revertApp.SetNamespace(namespace)
|
||||
revertApp.SetResourceVersion(checkApp.ResourceVersion)
|
||||
|
||||
Eventually(func() error {
|
||||
if err := k8sClient.Update(hubCtx, revertApp); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}, 500*time.Millisecond, 30*time.Second).Should(BeNil())
|
||||
verifySucceed(componentName + "-v1")
|
||||
})
|
||||
|
||||
It("Test Rollout with health check policy, guarantee health scope controller work ", func() {
|
||||
app := &v1beta1.Application{}
|
||||
appYaml, err := ioutil.ReadFile("./testdata/app/multi-cluster-health-policy.yaml")
|
||||
Expect(err).Should(Succeed())
|
||||
Expect(yaml.Unmarshal([]byte(appYaml), app)).Should(Succeed())
|
||||
app.SetNamespace(namespace)
|
||||
err = k8sClient.Create(hubCtx, app)
|
||||
Expect(err).Should(Succeed())
|
||||
verifySucceed(componentName + "-v1")
|
||||
Eventually(func() error {
|
||||
checkApp := v1beta1.Application{}
|
||||
if err := k8sClient.Get(hubCtx, types.NamespacedName{Namespace: namespace, Name: app.Name}, &checkApp); err != nil {
|
||||
return err
|
||||
}
|
||||
if len(checkApp.Status.Services) == 0 {
|
||||
return fmt.Errorf("app status service haven't write back")
|
||||
}
|
||||
compStatus := checkApp.Status.Services[0]
|
||||
if compStatus.Env != "staging" {
|
||||
return fmt.Errorf("comp status env miss-match")
|
||||
}
|
||||
if !compStatus.Healthy {
|
||||
return fmt.Errorf("comp status not healthy")
|
||||
}
|
||||
if !strings.Contains(compStatus.Message, "Ready:2/2") {
|
||||
return fmt.Errorf("comp status workload check don't work")
|
||||
}
|
||||
return nil
|
||||
}, 300*time.Millisecond, 30*time.Second).Should(BeNil())
|
||||
By("update application to v2")
|
||||
checkApp := &v1beta1.Application{}
|
||||
Eventually(func() error {
|
||||
if err := k8sClient.Get(hubCtx, types.NamespacedName{Namespace: namespace, Name: app.Name}, checkApp); err != nil {
|
||||
return err
|
||||
}
|
||||
checkApp.Spec.Components[0].Properties.Raw = []byte(`{"image": "stefanprodan/podinfo:5.0.2"}`)
|
||||
if err := k8sClient.Update(hubCtx, checkApp); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}, 500*time.Millisecond, 30*time.Second).Should(BeNil())
|
||||
verifySucceed(componentName + "-v2")
|
||||
Eventually(func() error {
|
||||
// Note: KubeVela will only check the workload of the target revision
|
||||
checkApp := v1beta1.Application{}
|
||||
if err := k8sClient.Get(hubCtx, types.NamespacedName{Namespace: namespace, Name: app.Name}, &checkApp); err != nil {
|
||||
return err
|
||||
}
|
||||
if len(checkApp.Status.Services) == 0 {
|
||||
return fmt.Errorf("app status service haven't write back")
|
||||
}
|
||||
compStatus := checkApp.Status.Services[0]
|
||||
if compStatus.Env != "staging" {
|
||||
return fmt.Errorf("comp status env miss-match")
|
||||
}
|
||||
if !compStatus.Healthy {
|
||||
return fmt.Errorf("comp status not healthy")
|
||||
}
|
||||
if !strings.Contains(compStatus.Message, "Ready:2/2") {
|
||||
return fmt.Errorf("comp status workload check don't work")
|
||||
}
|
||||
return nil
|
||||
}, 300*time.Millisecond, 30*time.Second).Should(BeNil())
|
||||
})
|
||||
})
|
||||
})
|
||||
37
test/e2e-multicluster-test/testdata/app/app-rollout-envbinding.yaml
vendored
Normal file
37
test/e2e-multicluster-test/testdata/app/app-rollout-envbinding.yaml
vendored
Normal file
@@ -0,0 +1,37 @@
|
||||
apiVersion: core.oam.dev/v1beta1
|
||||
kind: Application
|
||||
metadata:
|
||||
name: example-app
|
||||
namespace: default
|
||||
spec:
|
||||
components:
|
||||
- name: hello-world-server
|
||||
type: webservice
|
||||
properties:
|
||||
image: stefanprodan/podinfo:4.0.3
|
||||
traits:
|
||||
- type: rollout
|
||||
properties:
|
||||
targetSize: 2
|
||||
rolloutBatches:
|
||||
- replicas: 1
|
||||
- replicas: 1
|
||||
|
||||
policies:
|
||||
- name: example-multi-env-policy
|
||||
type: env-binding
|
||||
properties:
|
||||
envs:
|
||||
- name: staging
|
||||
placement: # selecting the cluster to deploy to
|
||||
clusterSelector:
|
||||
name: cluster-worker
|
||||
|
||||
workflow:
|
||||
steps:
|
||||
# deploy to staging env
|
||||
- name: deploy-staging
|
||||
type: deploy2env
|
||||
properties:
|
||||
policy: example-multi-env-policy
|
||||
env: staging
|
||||
48
test/e2e-multicluster-test/testdata/app/multi-cluster-health-policy.yaml
vendored
Normal file
48
test/e2e-multicluster-test/testdata/app/multi-cluster-health-policy.yaml
vendored
Normal file
@@ -0,0 +1,48 @@
|
||||
apiVersion: core.oam.dev/v1beta1
|
||||
kind: Application
|
||||
metadata:
|
||||
name: example-app-rollout
|
||||
namespace: default
|
||||
spec:
|
||||
components:
|
||||
- name: hello-world-server
|
||||
type: webservice
|
||||
properties:
|
||||
image: crccheck/hello-world
|
||||
port: 8000
|
||||
type: webservice
|
||||
traits:
|
||||
- type: rollout
|
||||
properties:
|
||||
targetSize: 2
|
||||
rolloutBatches:
|
||||
- replicas: 1
|
||||
- replicas: 1
|
||||
|
||||
policies:
|
||||
- name: example-multi-env-policy
|
||||
type: env-binding
|
||||
properties:
|
||||
envs:
|
||||
- name: staging
|
||||
placement: # 选择要部署的集群,并执行默认的发布策略
|
||||
clusterSelector:
|
||||
name: cluster-worker
|
||||
|
||||
|
||||
- name: health-policy-demo
|
||||
type: health
|
||||
properties:
|
||||
probeInterval: 5
|
||||
probeTimeout: 10
|
||||
|
||||
|
||||
|
||||
workflow:
|
||||
steps:
|
||||
# 部署到预发环境中
|
||||
- name: deploy-staging
|
||||
type: deploy2env
|
||||
properties:
|
||||
policy: example-multi-env-policy
|
||||
env: staging
|
||||
38
test/e2e-multicluster-test/testdata/app/revert-app-envbinding.yaml
vendored
Normal file
38
test/e2e-multicluster-test/testdata/app/revert-app-envbinding.yaml
vendored
Normal file
@@ -0,0 +1,38 @@
|
||||
apiVersion: core.oam.dev/v1beta1
|
||||
kind: Application
|
||||
metadata:
|
||||
name: example-app
|
||||
namespace: default
|
||||
spec:
|
||||
components:
|
||||
- name: hello-world-server
|
||||
type: webservice
|
||||
properties:
|
||||
image: stefanprodan/podinfo:5.0.2
|
||||
traits:
|
||||
- type: rollout
|
||||
properties:
|
||||
targetRevision: hello-world-server-v1
|
||||
targetSize: 2
|
||||
rolloutBatches:
|
||||
- replicas: 1
|
||||
- replicas: 1
|
||||
|
||||
policies:
|
||||
- name: example-multi-env-policy
|
||||
type: env-binding
|
||||
properties:
|
||||
envs:
|
||||
- name: staging
|
||||
placement: # selecting the cluster to deploy to
|
||||
clusterSelector:
|
||||
name: cluster-worker
|
||||
|
||||
workflow:
|
||||
steps:
|
||||
# deploy to staging env
|
||||
- name: deploy-staging
|
||||
type: deploy2env
|
||||
properties:
|
||||
policy: example-multi-env-policy
|
||||
env: staging
|
||||
@@ -18,18 +18,20 @@ package controllers_test
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"sigs.k8s.io/yaml"
|
||||
|
||||
v1 "k8s.io/api/apps/v1"
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/apimachinery/pkg/util/intstr"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
"sigs.k8s.io/yaml"
|
||||
|
||||
"github.com/oam-dev/kubevela/apis/standard.oam.dev/v1alpha1"
|
||||
"github.com/oam-dev/kubevela/pkg/oam"
|
||||
"github.com/oam-dev/kubevela/pkg/oam/util"
|
||||
"github.com/oam-dev/kubevela/pkg/utils/common"
|
||||
|
||||
@@ -113,10 +115,22 @@ var _ = Describe("rollout related e2e-test,rollout trait test", func() {
|
||||
return fmt.Errorf("error rollout status state %s", rollout.Status.RollingState)
|
||||
}
|
||||
compRevName = rollout.Spec.TargetRevisionName
|
||||
if rollout.GetAnnotations() == nil || rollout.GetAnnotations()[oam.AnnotationWorkloadName] != componentRevision {
|
||||
return fmt.Errorf("target workload name annotation missmatch want %s acctually %s",
|
||||
rollout.GetAnnotations()[oam.AnnotationWorkloadName], componentRevision)
|
||||
}
|
||||
deployKey := types.NamespacedName{Namespace: namespaceName, Name: compRevName}
|
||||
if err := k8sClient.Get(ctx, deployKey, &targerDeploy); err != nil {
|
||||
return err
|
||||
}
|
||||
gvkStr := rollout.GetAnnotations()[oam.AnnotationWorkloadGVK]
|
||||
gvk := map[string]string{}
|
||||
if err := json.Unmarshal([]byte(gvkStr), &gvk); err != nil {
|
||||
return err
|
||||
}
|
||||
if gvk["apiVersion"] != "apps/v1" || gvk["kind"] != "Deployment" {
|
||||
return fmt.Errorf("error targetWorkload gvk")
|
||||
}
|
||||
if *targerDeploy.Spec.Replicas != *rollout.Spec.RolloutPlan.TargetSize {
|
||||
return fmt.Errorf("targetDeploy replicas missMatch %d, %d", targerDeploy.Spec.Replicas, rollout.Spec.RolloutPlan.TargetSize)
|
||||
}
|
||||
@@ -139,12 +153,13 @@ var _ = Describe("rollout related e2e-test,rollout trait test", func() {
|
||||
return fmt.Errorf("source deploy still exist")
|
||||
}
|
||||
return nil
|
||||
}, time.Second*360, 300*time.Millisecond).Should(BeNil())
|
||||
}, time.Second*60, 300*time.Millisecond).Should(BeNil())
|
||||
}
|
||||
|
||||
BeforeEach(func() {
|
||||
By("Start to run a test, init whole env")
|
||||
namespaceName = randomNamespaceName("rollout-trait-e2e-test")
|
||||
app = v1beta1.Application{}
|
||||
createNamespace()
|
||||
createAllDef()
|
||||
componentName = "express-server"
|
||||
@@ -202,7 +217,7 @@ var _ = Describe("rollout related e2e-test,rollout trait test", func() {
|
||||
if err = k8sClient.Get(ctx, appKey, checkApp); err != nil {
|
||||
return err
|
||||
}
|
||||
checkApp.Spec.Components[0].Traits[0].Properties.Raw = []byte(`{"targetRevision":"express-server-v2"}`)
|
||||
checkApp.Spec.Components[0].Traits[0].Properties.Raw = []byte(`{"targetRevision":"express-server-v2","firstBatchReplicas":1,"secondBatchReplicas":1}`)
|
||||
if err = k8sClient.Update(ctx, checkApp); err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -305,6 +320,95 @@ var _ = Describe("rollout related e2e-test,rollout trait test", func() {
|
||||
return nil
|
||||
}, 30*time.Second, 300*time.Millisecond).Should(BeNil())
|
||||
})
|
||||
|
||||
It("rollout scale up adnd down without rollout batches", func() {
|
||||
By("first scale operation")
|
||||
Expect(common.ReadYamlToObject("testdata/rollout/deployment/application.yaml", &app)).Should(BeNil())
|
||||
app.Namespace = namespaceName
|
||||
Expect(k8sClient.Create(ctx, &app)).Should(BeNil())
|
||||
|
||||
verifySuccess("express-server-v1")
|
||||
By("scale again to targetSize 4")
|
||||
appKey := types.NamespacedName{Namespace: namespaceName, Name: app.Name}
|
||||
checkApp := &v1beta1.Application{}
|
||||
Eventually(func() error {
|
||||
if err = k8sClient.Get(ctx, appKey, checkApp); err != nil {
|
||||
return err
|
||||
}
|
||||
// scale up without rollout batches, test rollout controller will fill default batches
|
||||
checkApp.Spec.Components[0].Traits[0].Properties.Raw =
|
||||
[]byte(`{"targetSize":4}`)
|
||||
if err = k8sClient.Update(ctx, checkApp); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}, 30*time.Second, 300*time.Millisecond).Should(BeNil())
|
||||
Eventually(func() error {
|
||||
checkRollout := v1alpha1.Rollout{}
|
||||
if err := k8sClient.Get(ctx, types.NamespacedName{Namespace: namespaceName, Name: componentName}, &checkRollout); err != nil {
|
||||
return err
|
||||
}
|
||||
if *checkRollout.Spec.RolloutPlan.TargetSize != 4 {
|
||||
return fmt.Errorf("rollout targetSize haven't update")
|
||||
}
|
||||
if len(checkRollout.Spec.RolloutPlan.RolloutBatches) != 1 {
|
||||
return fmt.Errorf("fail to fill rollout batches")
|
||||
}
|
||||
if checkRollout.Spec.RolloutPlan.RolloutBatches[0].Replicas != intstr.FromInt(2) {
|
||||
return fmt.Errorf("fill rollout batches missmatch")
|
||||
}
|
||||
return nil
|
||||
}, 30*time.Second, 300*time.Millisecond).Should(BeNil())
|
||||
verifySuccess("express-server-v1")
|
||||
checkApp = &v1beta1.Application{}
|
||||
By("update application upgrade to v2")
|
||||
Eventually(func() error {
|
||||
if err = k8sClient.Get(ctx, appKey, checkApp); err != nil {
|
||||
return err
|
||||
}
|
||||
checkApp.Spec.Components[0].Properties.Raw = []byte(`{"image":"stefanprodan/podinfo:4.0.3","cpu":"0.1"}`)
|
||||
checkApp.Spec.Components[0].Traits[0].Properties.Raw =
|
||||
[]byte(`{"firstBatchReplicas":2,"secondBatchReplicas":2,"targetSize":4}`)
|
||||
if err = k8sClient.Update(ctx, checkApp); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}, 30*time.Second, 300*time.Millisecond).Should(BeNil())
|
||||
verifySuccess("express-server-v2")
|
||||
|
||||
By("scale down to targetSize 2")
|
||||
appKey = types.NamespacedName{Namespace: namespaceName, Name: app.Name}
|
||||
checkApp = &v1beta1.Application{}
|
||||
Eventually(func() error {
|
||||
if err = k8sClient.Get(ctx, appKey, checkApp); err != nil {
|
||||
return err
|
||||
}
|
||||
// scale down without rollout batches, test rollout controller will fill default batches
|
||||
checkApp.Spec.Components[0].Traits[0].Properties.Raw =
|
||||
[]byte(`{"targetSize":2}`)
|
||||
if err = k8sClient.Update(ctx, checkApp); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}, 30*time.Second, 300*time.Millisecond).Should(BeNil())
|
||||
Eventually(func() error {
|
||||
checkRollout := v1alpha1.Rollout{}
|
||||
if err := k8sClient.Get(ctx, types.NamespacedName{Namespace: namespaceName, Name: componentName}, &checkRollout); err != nil {
|
||||
return err
|
||||
}
|
||||
if *checkRollout.Spec.RolloutPlan.TargetSize != 2 {
|
||||
return fmt.Errorf("rollout targetSize haven't update")
|
||||
}
|
||||
if len(checkRollout.Spec.RolloutPlan.RolloutBatches) != 1 {
|
||||
return fmt.Errorf("fail to fill rollout batches")
|
||||
}
|
||||
if checkRollout.Spec.RolloutPlan.RolloutBatches[0].Replicas != intstr.FromInt(2) {
|
||||
return fmt.Errorf("fill rollout batches missmatch")
|
||||
}
|
||||
return nil
|
||||
}, 30*time.Second, 300*time.Millisecond).Should(BeNil())
|
||||
verifySuccess("express-server-v2")
|
||||
})
|
||||
})
|
||||
|
||||
const (
|
||||
@@ -381,9 +485,11 @@ spec:
|
||||
componentName: context.name
|
||||
rolloutPlan: {
|
||||
rolloutStrategy: "DecreaseFirst"
|
||||
rolloutBatches:[
|
||||
if parameter.firstBatchReplicas != _|_ && parameter.secondBatchReplicas != _|_ {
|
||||
rolloutBatches:[
|
||||
{ replicas: parameter.firstBatchReplicas},
|
||||
{ replicas: parameter.secondBatchReplicas}]
|
||||
}
|
||||
targetSize: parameter.targetSize
|
||||
if parameter["batchPartition"] != _|_ {
|
||||
batchPartition: parameter.batchPartition
|
||||
@@ -395,8 +501,8 @@ spec:
|
||||
parameter: {
|
||||
targetRevision: *context.revision|string
|
||||
targetSize: *2|int
|
||||
firstBatchReplicas: *1|int
|
||||
secondBatchReplicas: *1|int
|
||||
firstBatchReplicas?: int
|
||||
secondBatchReplicas?: int
|
||||
batchPartition?: int
|
||||
}`
|
||||
)
|
||||
|
||||
@@ -11,4 +11,6 @@ spec:
|
||||
traits:
|
||||
- type: rollout
|
||||
properties:
|
||||
targetSize: 2
|
||||
targetSize: 2
|
||||
firstBatchReplicas: 1
|
||||
secondBatchReplicas: 1
|
||||
@@ -5,7 +5,7 @@ metadata:
|
||||
addons.oam.dev/description: Kubernetes Terraform Controller for Alibaba Cloud
|
||||
addons.oam.dev/name: terraform/provider-alibaba
|
||||
name: terraform-provider-alibaba
|
||||
namespace: default
|
||||
namespace: vela-system
|
||||
spec:
|
||||
components:
|
||||
- name: alibaba-account-creds
|
||||
|
||||
@@ -5,7 +5,7 @@ metadata:
|
||||
addons.oam.dev/description: Kubernetes Terraform Controller for AWS
|
||||
addons.oam.dev/name: terraform/provider-aws
|
||||
name: terraform-provider-aws
|
||||
namespace: default
|
||||
namespace: vela-system
|
||||
spec:
|
||||
components:
|
||||
- name: aws-account-creds
|
||||
|
||||
@@ -5,7 +5,7 @@ metadata:
|
||||
addons.oam.dev/description: Kubernetes Terraform Controller for Azure
|
||||
addons.oam.dev/name: terraform/provider-azure
|
||||
name: terraform-provider-azure
|
||||
namespace: default
|
||||
namespace: vela-system
|
||||
spec:
|
||||
components:
|
||||
- name: azure-account-creds
|
||||
|
||||
@@ -5,7 +5,7 @@ metadata:
|
||||
addons.oam.dev/description: Kubernetes Terraform Controller for Alibaba Cloud
|
||||
addons.oam.dev/name: terraform/provider-alibaba
|
||||
name: terraform-provider-alibaba
|
||||
namespace: default
|
||||
namespace: vela-system
|
||||
spec:
|
||||
components:
|
||||
- name: alibaba-account-creds
|
||||
|
||||
@@ -5,7 +5,7 @@ metadata:
|
||||
addons.oam.dev/description: Kubernetes Terraform Controller for AWS
|
||||
addons.oam.dev/name: terraform/provider-aws
|
||||
name: terraform-provider-aws
|
||||
namespace: default
|
||||
namespace: vela-system
|
||||
spec:
|
||||
components:
|
||||
- name: aws-account-creds
|
||||
|
||||
@@ -5,7 +5,7 @@ metadata:
|
||||
addons.oam.dev/description: Kubernetes Terraform Controller for Azure
|
||||
addons.oam.dev/name: terraform/provider-azure
|
||||
name: terraform-provider-azure
|
||||
namespace: default
|
||||
namespace: vela-system
|
||||
spec:
|
||||
components:
|
||||
- name: azure-account-creds
|
||||
|
||||
@@ -20,8 +20,10 @@ template: {
|
||||
componentName: context.name
|
||||
rolloutPlan: {
|
||||
rolloutStrategy: "IncreaseFirst"
|
||||
rolloutBatches: parameter.rolloutBatches
|
||||
targetSize: parameter.targetSize
|
||||
if parameter.rolloutBatches != _|_ {
|
||||
rolloutBatches: parameter.rolloutBatches
|
||||
}
|
||||
targetSize: parameter.targetSize
|
||||
if parameter["batchPartition"] != _|_ {
|
||||
batchPartition: parameter.batchPartition
|
||||
}
|
||||
@@ -32,7 +34,7 @@ template: {
|
||||
parameter: {
|
||||
targetRevision: *context.revision | string
|
||||
targetSize: int
|
||||
rolloutBatches: [...rolloutBatch]
|
||||
rolloutBatches?: [...rolloutBatch]
|
||||
batchPartition?: int
|
||||
}
|
||||
|
||||
|
||||
@@ -93,17 +93,17 @@ template: {
|
||||
url?: string
|
||||
value?: string
|
||||
style?: string
|
||||
text?: text
|
||||
text?: textType
|
||||
confirm?: {
|
||||
title: text
|
||||
text: text
|
||||
confirm: text
|
||||
deny: text
|
||||
title: textType
|
||||
text: textType
|
||||
confirm: textType
|
||||
deny: textType
|
||||
style?: string
|
||||
}
|
||||
options?: [...option]
|
||||
initial_options?: [...option]
|
||||
placeholder?: text
|
||||
placeholder?: textType
|
||||
initial_date?: string
|
||||
image_url?: string
|
||||
alt_text?: string
|
||||
@@ -120,7 +120,7 @@ template: {
|
||||
}]
|
||||
}
|
||||
|
||||
text: {
|
||||
textType: {
|
||||
type: string
|
||||
text: string
|
||||
emoji?: bool
|
||||
@@ -128,9 +128,9 @@ template: {
|
||||
}
|
||||
|
||||
option: {
|
||||
text: text
|
||||
text: textType
|
||||
value: string
|
||||
description?: text
|
||||
description?: textType
|
||||
url?: string
|
||||
}
|
||||
|
||||
|
||||
Reference in New Issue
Block a user