mirror of
https://github.com/kubevela/kubevela.git
synced 2026-02-14 18:10:21 +00:00
move health check and status out of extention
This commit is contained in:
@@ -64,12 +64,26 @@ type WorkloadDefinitionSpec struct {
|
||||
// +optional
|
||||
PodSpecPath string `json:"podSpecPath,omitempty"`
|
||||
|
||||
// Status defines the custom health policy and status message for workload
|
||||
// +optional
|
||||
Status *Status `json:"status,omitempty"`
|
||||
|
||||
// Extension is used for extension needs by OAM platform builders
|
||||
// +optional
|
||||
// +kubebuilder:pruning:PreserveUnknownFields
|
||||
Extension *runtime.RawExtension `json:"extension,omitempty"`
|
||||
}
|
||||
|
||||
// Status defines the loop back status of the abstraction by using CUE template
|
||||
type Status struct {
|
||||
// CustomStatus defines the custom status message that could display to user
|
||||
// +optional
|
||||
CustomStatus string `json:"customStatus,omitempty"`
|
||||
// HealthPolicy defines the health check policy for the abstraction
|
||||
// +optional
|
||||
HealthPolicy string `json:"healthPolicy,omitempty"`
|
||||
}
|
||||
|
||||
// +kubebuilder:object:root=true
|
||||
|
||||
// A WorkloadDefinition registers a kind of Kubernetes custom resource as a
|
||||
@@ -126,6 +140,10 @@ type TraitDefinitionSpec struct {
|
||||
// +optional
|
||||
ConflictsWith []string `json:"conflictsWith,omitempty"`
|
||||
|
||||
// Status defines the custom health policy and status message for trait
|
||||
// +optional
|
||||
Status *Status `json:"status,omitempty"`
|
||||
|
||||
// Extension is used for extension needs by OAM platform builders
|
||||
// +optional
|
||||
// +kubebuilder:pruning:PreserveUnknownFields
|
||||
|
||||
@@ -1649,6 +1649,21 @@ func (in *SecretKeySelector) DeepCopy() *SecretKeySelector {
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *Status) DeepCopyInto(out *Status) {
|
||||
*out = *in
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Status.
|
||||
func (in *Status) DeepCopy() *Status {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(Status)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *TCPSocketProbe) DeepCopyInto(out *TCPSocketProbe) {
|
||||
*out = *in
|
||||
@@ -1736,6 +1751,11 @@ func (in *TraitDefinitionSpec) DeepCopyInto(out *TraitDefinitionSpec) {
|
||||
*out = make([]string, len(*in))
|
||||
copy(*out, *in)
|
||||
}
|
||||
if in.Status != nil {
|
||||
in, out := &in.Status, &out.Status
|
||||
*out = new(Status)
|
||||
**out = **in
|
||||
}
|
||||
if in.Extension != nil {
|
||||
in, out := &in.Extension, &out.Extension
|
||||
*out = new(runtime.RawExtension)
|
||||
@@ -1884,6 +1904,11 @@ func (in *WorkloadDefinitionSpec) DeepCopyInto(out *WorkloadDefinitionSpec) {
|
||||
(*in)[i].DeepCopyInto(&(*out)[i])
|
||||
}
|
||||
}
|
||||
if in.Status != nil {
|
||||
in, out := &in.Status, &out.Status
|
||||
*out = new(Status)
|
||||
**out = **in
|
||||
}
|
||||
if in.Extension != nil {
|
||||
in, out := &in.Extension, &out.Extension
|
||||
*out = new(runtime.RawExtension)
|
||||
|
||||
@@ -68,6 +68,16 @@ spec:
|
||||
revisionEnabled:
|
||||
description: Revision indicates whether a trait is aware of component revision
|
||||
type: boolean
|
||||
status:
|
||||
description: Status defines the custom health policy and status message for trait
|
||||
properties:
|
||||
customStatus:
|
||||
description: CustomStatus defines the custom status message that could display to user
|
||||
type: string
|
||||
healthPolicy:
|
||||
description: HealthPolicy defines the health check policy for the abstraction
|
||||
type: string
|
||||
type: object
|
||||
workloadRefPath:
|
||||
description: WorkloadRefPath indicates where/if a trait accepts a workloadRef object
|
||||
type: string
|
||||
|
||||
@@ -82,6 +82,16 @@ spec:
|
||||
revisionLabel:
|
||||
description: RevisionLabel indicates which label for underlying resources(e.g. pods) of this workload can be used by trait to create resource selectors(e.g. label selector for pods).
|
||||
type: string
|
||||
status:
|
||||
description: Status defines the custom health policy and status message for workload
|
||||
properties:
|
||||
customStatus:
|
||||
description: CustomStatus defines the custom status message that could display to user
|
||||
type: string
|
||||
healthPolicy:
|
||||
description: HealthPolicy defines the health check policy for the abstraction
|
||||
type: string
|
||||
type: object
|
||||
required:
|
||||
- definitionRef
|
||||
type: object
|
||||
|
||||
@@ -7,6 +7,16 @@ metadata:
|
||||
Please use route trait in cap center for advanced usage."
|
||||
name: ingress
|
||||
spec:
|
||||
status:
|
||||
customStatus: |-
|
||||
if len(context.outputs.ingress.status.loadBalancer.ingress) > 0 {
|
||||
message: "Visiting URL: " + context.outputs.ingress.spec.rules[0].host + ", IP: " + context.outputs.ingress.status.loadBalancer.ingress[0].ip
|
||||
}
|
||||
if len(context.outputs.ingress.status.loadBalancer.ingress) == 0 {
|
||||
message: "No loadBalancer found, visiting by using 'vela port-forward " + context.appName + " --route'\n"
|
||||
}
|
||||
healthPolicy: |
|
||||
isHealth: len(context.outputs.service.spec.clusterIP) > 0
|
||||
appliesToWorkloads:
|
||||
- webservice
|
||||
- worker
|
||||
|
||||
@@ -1,4 +1,3 @@
|
||||
# Code generated by KubeVela templates. DO NOT EDIT.
|
||||
apiVersion: core.oam.dev/v1alpha2
|
||||
kind: WorkloadDefinition
|
||||
metadata:
|
||||
@@ -8,11 +7,12 @@ metadata:
|
||||
spec:
|
||||
definitionRef:
|
||||
name: deployments.apps
|
||||
extension:
|
||||
status:
|
||||
healthPolicy: |
|
||||
isHealth: (context.output.status.readyReplicas > 0) && (context.output.status.readyReplicas == context.output.status.replicas)
|
||||
customStatus: |-
|
||||
message: "type: " + context.output.spec.template.spec.containers[0].image + ",\t enemies:" + context.outputs.gameconfig.data.enemies
|
||||
extension:
|
||||
template: |
|
||||
output: {
|
||||
apiVersion: "apps/v1"
|
||||
@@ -73,11 +73,12 @@ kind: TraitDefinition
|
||||
metadata:
|
||||
name: ingress
|
||||
spec:
|
||||
extension:
|
||||
status:
|
||||
customStatus: |-
|
||||
message: "type: "+ context.outputs.service.spec.type +",\t clusterIP:"+ context.outputs.service.spec.clusterIP+",\t ports:"+ "\(context.outputs.service.spec.ports[0].port)"+",\t domain"+context.outputs.ingress.spec.rules[0].host
|
||||
healthPolicy: |
|
||||
isHealth: len(context.outputs.service.spec.clusterIP) > 0
|
||||
extension:
|
||||
template: |
|
||||
parameter: {
|
||||
domain: string
|
||||
|
||||
@@ -46,8 +46,7 @@ Services:
|
||||
Created at: ...
|
||||
Updated at: ...
|
||||
Traits:
|
||||
- ✅ ingress: domain=testsvc.example.com
|
||||
http=map[/:8000]
|
||||
- ✅ ingress: Visiting URL: testsvc.example.com, IP: <your IP address>
|
||||
```
|
||||
|
||||
**In [kind cluster setup](./install.md#kind)**, you can visit the service via localhost. In other setups, replace localhost with ingress address accordingly.
|
||||
|
||||
@@ -6,6 +6,16 @@ metadata:
|
||||
Please use route trait in cap center for advanced usage."
|
||||
name: ingress
|
||||
spec:
|
||||
status:
|
||||
customStatus: |-
|
||||
if len(context.outputs.ingress.status.loadBalancer.ingress) > 0 {
|
||||
message: "Visiting URL: " + context.outputs.ingress.spec.rules[0].host + ", IP: " + context.outputs.ingress.status.loadBalancer.ingress[0].ip
|
||||
}
|
||||
if len(context.outputs.ingress.status.loadBalancer.ingress) == 0 {
|
||||
message: "No loadBalancer found, visiting by using 'vela port-forward " + context.appName + " --route'\n"
|
||||
}
|
||||
healthPolicy: |
|
||||
isHealth: len(context.outputs.service.spec.clusterIP) > 0
|
||||
appliesToWorkloads:
|
||||
- webservice
|
||||
- worker
|
||||
|
||||
@@ -67,6 +67,16 @@ spec:
|
||||
revisionEnabled:
|
||||
description: Revision indicates whether a trait is aware of component revision
|
||||
type: boolean
|
||||
status:
|
||||
description: Status defines the custom health policy and status message for trait
|
||||
properties:
|
||||
customStatus:
|
||||
description: CustomStatus defines the custom status message that could display to user
|
||||
type: string
|
||||
healthPolicy:
|
||||
description: HealthPolicy defines the health check policy for the abstraction
|
||||
type: string
|
||||
type: object
|
||||
workloadRefPath:
|
||||
description: WorkloadRefPath indicates where/if a trait accepts a workloadRef object
|
||||
type: string
|
||||
|
||||
@@ -81,6 +81,16 @@ spec:
|
||||
revisionLabel:
|
||||
description: RevisionLabel indicates which label for underlying resources(e.g. pods) of this workload can be used by trait to create resource selectors(e.g. label selector for pods).
|
||||
type: string
|
||||
status:
|
||||
description: Status defines the custom health policy and status message for workload
|
||||
properties:
|
||||
customStatus:
|
||||
description: CustomStatus defines the custom status message that could display to user
|
||||
type: string
|
||||
healthPolicy:
|
||||
description: HealthPolicy defines the health check policy for the abstraction
|
||||
type: string
|
||||
type: object
|
||||
required:
|
||||
- definitionRef
|
||||
type: object
|
||||
|
||||
@@ -7,8 +7,6 @@ import (
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/oam-dev/kubevela/pkg/oam/util"
|
||||
|
||||
"github.com/fatih/color"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/spf13/cobra"
|
||||
@@ -19,6 +17,7 @@ import (
|
||||
"github.com/oam-dev/kubevela/pkg/appfile"
|
||||
"github.com/oam-dev/kubevela/pkg/appfile/api"
|
||||
cmdutil "github.com/oam-dev/kubevela/pkg/commands/util"
|
||||
"github.com/oam-dev/kubevela/pkg/oam/util"
|
||||
)
|
||||
|
||||
// HealthStatus represents health status strings.
|
||||
@@ -274,7 +273,7 @@ func TrackDeployStatus(ctx context.Context, c client.Client, compName, appName s
|
||||
|
||||
// trackHealthCheckingStatus will check health status from health scope
|
||||
func trackHealthCheckingStatus(ctx context.Context, c client.Client, compName, appName string, env *types.EnvMeta) (CompStatus, HealthStatus, string, error) {
|
||||
app, err := loadRemoteApplication(c, appName, env.Namespace)
|
||||
app, err := loadRemoteApplication(c, env.Namespace, appName)
|
||||
if err != nil {
|
||||
return compStatusUnknown, HealthStatusNotDiagnosed, "", err
|
||||
}
|
||||
|
||||
@@ -110,7 +110,7 @@ func (r *Reconciler) Reconcile(req ctrl.Request) (ctrl.Result, error) {
|
||||
}
|
||||
|
||||
app.Status.SetConditions(readyCondition("Built"))
|
||||
applog.Info("apply applicationconfig & component to the cluster")
|
||||
applog.Info("apply appConfig & component to the cluster")
|
||||
// apply appConfig & component to the cluster
|
||||
if err := handler.apply(ctx, ac, comps); err != nil {
|
||||
handler.l.Error(err, "[Handle apply]")
|
||||
|
||||
@@ -825,54 +825,110 @@ var _ = Describe("Test Application Controller", func() {
|
||||
|
||||
It("app with health policy and custom status for workload", func() {
|
||||
By("change workload and trait definition with health policy")
|
||||
nwd, owd := &v1alpha2.WorkloadDefinition{}, &v1alpha2.WorkloadDefinition{}
|
||||
wDDefJson, _ := yaml.YAMLToJSON([]byte(wDDefWithHealthYaml))
|
||||
nwd := &v1alpha2.WorkloadDefinition{}
|
||||
wDDefJson, _ := yaml.YAMLToJSON([]byte(wdDefWithHealthStatusYaml))
|
||||
Expect(json.Unmarshal(wDDefJson, nwd)).Should(BeNil())
|
||||
Expect(k8sClient.Get(ctx, client.ObjectKey{Name: "worker"}, owd)).Should(BeNil())
|
||||
nwd.ResourceVersion = owd.ResourceVersion
|
||||
Expect(k8sClient.Update(ctx, nwd)).Should(SatisfyAny(BeNil(), &util.AlreadyExistMatcher{}))
|
||||
ntd, otd := &v1alpha2.TraitDefinition{}, &v1alpha2.TraitDefinition{}
|
||||
tDDefJson, _ := yaml.YAMLToJSON([]byte(tDDefWithHealthYaml))
|
||||
Expect(k8sClient.Create(ctx, nwd)).Should(SatisfyAny(BeNil(), &util.AlreadyExistMatcher{}))
|
||||
ntd := &v1alpha2.TraitDefinition{}
|
||||
tDDefJson, _ := yaml.YAMLToJSON([]byte(tDDefWithHealthStatusYaml))
|
||||
Expect(json.Unmarshal(tDDefJson, ntd)).Should(BeNil())
|
||||
Expect(k8sClient.Get(ctx, client.ObjectKey{Name: "scaler"}, otd)).Should(BeNil())
|
||||
ntd.ResourceVersion = otd.ResourceVersion
|
||||
Expect(k8sClient.Update(ctx, ntd)).Should(SatisfyAny(BeNil(), &util.AlreadyExistMatcher{}))
|
||||
compName := "myweb-health"
|
||||
expDeployment := getExpDeployment(compName, appWithTrait.Name)
|
||||
Expect(k8sClient.Create(ctx, ntd)).Should(SatisfyAny(BeNil(), &util.AlreadyExistMatcher{}))
|
||||
compName := "myweb-health-status"
|
||||
appWithTraitHealthStatus := appWithTrait.DeepCopy()
|
||||
appWithTraitHealthStatus.Name = "app-trait-health-status"
|
||||
expDeployment := getExpDeployment(compName, appWithTraitHealthStatus.Name)
|
||||
|
||||
By("create the new namespace")
|
||||
ns := &corev1.Namespace{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "vela-test-with-health",
|
||||
Name: "vela-test-with-health-status",
|
||||
},
|
||||
}
|
||||
appWithTrait.SetNamespace(ns.Name)
|
||||
appWithTraitHealthStatus.SetNamespace(ns.Name)
|
||||
Expect(k8sClient.Create(ctx, ns)).Should(BeNil())
|
||||
|
||||
app := appWithTrait.DeepCopy()
|
||||
app := appWithTraitHealthStatus.DeepCopy()
|
||||
app.Spec.Components[0].Name = compName
|
||||
app.Spec.Components[0].WorkloadType = "nworker"
|
||||
app.Spec.Components[0].Settings = runtime.RawExtension{Raw: []byte(`{"cmd":["sleep","1000"],"image":"busybox3","lives":"3","enemies":"alain"}`)}
|
||||
app.Spec.Components[0].Traits[0].Name = "ingress"
|
||||
app.Spec.Components[0].Traits[0].Properties = runtime.RawExtension{Raw: []byte(`{"domain":"example.com","http":{"/":80}}`)}
|
||||
|
||||
expDeployment.Name = app.Name
|
||||
expDeployment.Namespace = ns.Name
|
||||
expDeployment.Labels[oam.LabelAppName] = app.Name
|
||||
expDeployment.Labels[oam.LabelAppComponent] = compName
|
||||
expDeployment.Labels["app.oam.dev/resourceType"] = "WORKLOAD"
|
||||
Expect(k8sClient.Create(ctx, expDeployment)).Should(BeNil())
|
||||
expTrait := expectScalerTrait(compName, app.Name)
|
||||
expTrait.SetName(app.Name)
|
||||
|
||||
expWorkloadTrait := unstructured.Unstructured{Object: map[string]interface{}{
|
||||
"apiVersion": "v1",
|
||||
"kind": "ConfigMap",
|
||||
"metadata": map[string]interface{}{
|
||||
"labels": map[string]interface{}{
|
||||
"trait.oam.dev/type": "AuxiliaryWorkload",
|
||||
"app.oam.dev/component": compName,
|
||||
"app.oam.dev/name": app.Name,
|
||||
"trait.oam.dev/resource": "gameconfig",
|
||||
},
|
||||
},
|
||||
"data": map[string]interface{}{
|
||||
"enemies": "alien",
|
||||
"lives": "3",
|
||||
},
|
||||
}}
|
||||
expWorkloadTrait.SetName("myweb-health-statusgame-config")
|
||||
expWorkloadTrait.SetNamespace(app.Namespace)
|
||||
Expect(k8sClient.Create(ctx, &expWorkloadTrait)).Should(BeNil())
|
||||
|
||||
expTrait := unstructured.Unstructured{Object: map[string]interface{}{
|
||||
"apiVersion": "networking.k8s.io/v1beta1",
|
||||
"kind": "Ingress",
|
||||
"metadata": map[string]interface{}{
|
||||
"labels": map[string]interface{}{
|
||||
"trait.oam.dev/type": "ingress",
|
||||
"trait.oam.dev/resource": "ingress",
|
||||
"app.oam.dev/component": compName,
|
||||
"app.oam.dev/name": app.Name,
|
||||
},
|
||||
},
|
||||
"spec": map[string]interface{}{
|
||||
"rules": []interface{}{
|
||||
map[string]interface{}{
|
||||
"host": "example.com",
|
||||
},
|
||||
},
|
||||
},
|
||||
}}
|
||||
expTrait.SetName(compName)
|
||||
expTrait.SetNamespace(app.Namespace)
|
||||
expTrait.SetLabels(map[string]string{
|
||||
oam.LabelAppName: app.Name,
|
||||
"trait.oam.dev/type": "scaler",
|
||||
"app.oam.dev/component": "myweb-health",
|
||||
})
|
||||
(expTrait.Object["spec"].(map[string]interface{}))["workloadRef"] = map[string]interface{}{
|
||||
"apiVersion": "apps/v1",
|
||||
"kind": "Deployment",
|
||||
"name": app.Name,
|
||||
}
|
||||
Expect(k8sClient.Create(ctx, &expTrait)).Should(BeNil())
|
||||
|
||||
By("enrich the status of deployment and scaler trait")
|
||||
expTrait2 := unstructured.Unstructured{Object: map[string]interface{}{
|
||||
"apiVersion": "v1",
|
||||
"kind": "Service",
|
||||
"metadata": map[string]interface{}{
|
||||
"labels": map[string]interface{}{
|
||||
"trait.oam.dev/type": "ingress",
|
||||
"trait.oam.dev/resource": "service",
|
||||
"app.oam.dev/component": compName,
|
||||
"app.oam.dev/name": app.Name,
|
||||
},
|
||||
},
|
||||
"spec": map[string]interface{}{
|
||||
"clusterIP": "10.0.0.4",
|
||||
"ports": []interface{}{
|
||||
map[string]interface{}{
|
||||
"port": 80,
|
||||
},
|
||||
},
|
||||
},
|
||||
}}
|
||||
expTrait2.SetName(app.Name)
|
||||
expTrait2.SetNamespace(app.Namespace)
|
||||
Expect(k8sClient.Create(ctx, &expTrait2)).Should(BeNil())
|
||||
|
||||
By("enrich the status of deployment and ingress trait")
|
||||
expDeployment.Status.Replicas = 1
|
||||
expDeployment.Status.ReadyReplicas = 1
|
||||
Expect(k8sClient.Status().Update(ctx, expDeployment)).Should(BeNil())
|
||||
@@ -881,20 +937,6 @@ var _ = Describe("Test Application Controller", func() {
|
||||
Namespace: app.Namespace,
|
||||
Name: app.Name,
|
||||
}, got)).Should(BeNil())
|
||||
expTrait.Object["status"] = v1alpha1.ConditionedStatus{
|
||||
Conditions: []v1alpha1.Condition{{
|
||||
Status: corev1.ConditionTrue,
|
||||
LastTransitionTime: metav1.Now(),
|
||||
}},
|
||||
}
|
||||
Expect(k8sClient.Status().Update(ctx, &expTrait)).Should(BeNil())
|
||||
tGot := &unstructured.Unstructured{}
|
||||
tGot.SetAPIVersion("core.oam.dev/v1alpha2")
|
||||
tGot.SetKind("ManualScalerTrait")
|
||||
Expect(k8sClient.Get(ctx, client.ObjectKey{
|
||||
Namespace: app.Namespace,
|
||||
Name: app.Name,
|
||||
}, tGot)).Should(BeNil())
|
||||
|
||||
By("apply appfile")
|
||||
Expect(k8sClient.Create(ctx, app)).Should(BeNil())
|
||||
@@ -905,13 +947,12 @@ var _ = Describe("Test Application Controller", func() {
|
||||
reconcileRetry(reconciler, reconcile.Request{NamespacedName: appKey})
|
||||
|
||||
By("Check App running successfully")
|
||||
|
||||
checkApp := &v1alpha2.Application{}
|
||||
Eventually(func() string {
|
||||
_, err := reconciler.Reconcile(reconcile.Request{NamespacedName: appKey})
|
||||
if err != nil {
|
||||
return err.Error()
|
||||
}
|
||||
checkApp := &v1alpha2.Application{}
|
||||
err = k8sClient.Get(ctx, appKey, checkApp)
|
||||
if err != nil {
|
||||
return err.Error()
|
||||
@@ -921,7 +962,20 @@ var _ = Describe("Test Application Controller", func() {
|
||||
}
|
||||
return string(checkApp.Status.Phase)
|
||||
}(), 5*time.Second, time.Second).Should(BeEquivalentTo(v1alpha2.ApplicationRunning))
|
||||
|
||||
Expect(checkApp.Status.Services).Should(BeEquivalentTo([]v1alpha2.ApplicationComponentStatus{
|
||||
{
|
||||
Name: compName,
|
||||
Healthy: true,
|
||||
Message: "type: busybox,\t enemies:alien",
|
||||
Traits: []v1alpha2.ApplicationTraitStatus{
|
||||
{
|
||||
Type: "ingress",
|
||||
Healthy: true,
|
||||
Message: "type: ClusterIP,\t clusterIP:10.0.0.4,\t ports:80,\t domainexample.com",
|
||||
},
|
||||
},
|
||||
},
|
||||
}))
|
||||
Expect(k8sClient.Delete(ctx, app)).Should(BeNil())
|
||||
})
|
||||
})
|
||||
@@ -1158,6 +1212,73 @@ spec:
|
||||
|
||||
cmd?: [...string]
|
||||
}
|
||||
`
|
||||
wdDefWithHealthStatusYaml = `apiVersion: core.oam.dev/v1alpha2
|
||||
kind: WorkloadDefinition
|
||||
metadata:
|
||||
name: nworker
|
||||
annotations:
|
||||
definition.oam.dev/description: "Describes long-running, scalable, containerized services that running at backend. They do NOT have network endpoint to receive external network traffic."
|
||||
spec:
|
||||
definitionRef:
|
||||
name: deployments.apps
|
||||
status:
|
||||
healthPolicy: |
|
||||
isHealth: (context.output.status.readyReplicas > 0) && (context.output.status.readyReplicas == context.output.status.replicas)
|
||||
customStatus: |-
|
||||
message: "type: " + context.output.spec.template.spec.containers[0].image + ",\t enemies:" + context.outputs.gameconfig.data.enemies
|
||||
extension:
|
||||
template: |
|
||||
output: {
|
||||
apiVersion: "apps/v1"
|
||||
kind: "Deployment"
|
||||
spec: {
|
||||
selector: matchLabels: {
|
||||
"app.oam.dev/component": context.name
|
||||
}
|
||||
|
||||
template: {
|
||||
metadata: labels: {
|
||||
"app.oam.dev/component": context.name
|
||||
}
|
||||
|
||||
spec: {
|
||||
containers: [{
|
||||
name: context.name
|
||||
image: parameter.image
|
||||
envFrom: [{
|
||||
configMapRef: name: context.name + "game-config"
|
||||
}]
|
||||
if parameter["cmd"] != _|_ {
|
||||
command: parameter.cmd
|
||||
}
|
||||
}]
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
outputs: gameconfig: {
|
||||
apiVersion: "v1"
|
||||
kind: "ConfigMap"
|
||||
metadata: {
|
||||
name: context.name + "game-config"
|
||||
}
|
||||
data: {
|
||||
enemies: parameter.enemies
|
||||
lives: parameter.lives
|
||||
}
|
||||
}
|
||||
|
||||
parameter: {
|
||||
// +usage=Which image would you like to use for your service
|
||||
// +short=i
|
||||
image: string
|
||||
// +usage=Commands to run in the container
|
||||
cmd?: [...string]
|
||||
lives: string
|
||||
enemies: string
|
||||
}
|
||||
`
|
||||
tDDefYaml = `
|
||||
apiVersion: core.oam.dev/v1alpha2
|
||||
@@ -1262,6 +1383,60 @@ spec:
|
||||
replicas: *1 | int
|
||||
}
|
||||
`
|
||||
|
||||
tDDefWithHealthStatusYaml = `apiVersion: core.oam.dev/v1alpha2
|
||||
kind: TraitDefinition
|
||||
metadata:
|
||||
name: ingress
|
||||
spec:
|
||||
status:
|
||||
customStatus: |-
|
||||
message: "type: "+ context.outputs.service.spec.type +",\t clusterIP:"+ context.outputs.service.spec.clusterIP+",\t ports:"+ "\(context.outputs.service.spec.ports[0].port)"+",\t domain"+context.outputs.ingress.spec.rules[0].host
|
||||
healthPolicy: |
|
||||
isHealth: len(context.outputs.service.spec.clusterIP) > 0
|
||||
extension:
|
||||
template: |
|
||||
parameter: {
|
||||
domain: string
|
||||
http: [string]: int
|
||||
}
|
||||
// trait template can have multiple outputs in one trait
|
||||
outputs: service: {
|
||||
apiVersion: "v1"
|
||||
kind: "Service"
|
||||
spec: {
|
||||
selector:
|
||||
app: context.name
|
||||
ports: [
|
||||
for k, v in parameter.http {
|
||||
port: v
|
||||
targetPort: v
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
outputs: ingress: {
|
||||
apiVersion: "networking.k8s.io/v1beta1"
|
||||
kind: "Ingress"
|
||||
metadata:
|
||||
name: context.name
|
||||
spec: {
|
||||
rules: [{
|
||||
host: parameter.domain
|
||||
http: {
|
||||
paths: [
|
||||
for k, v in parameter.http {
|
||||
path: k
|
||||
backend: {
|
||||
serviceName: context.name
|
||||
servicePort: v
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
}]
|
||||
}
|
||||
}`
|
||||
)
|
||||
|
||||
func NewMock() *httptest.Server {
|
||||
|
||||
@@ -337,6 +337,37 @@ func TestGetStatus(t *testing.T) {
|
||||
statusTemp: `message: "type: " + context.outputs.service.spec.type + " clusterIP:" + context.outputs.service.spec.clusterIP + " ports:" + "\(context.outputs.service.spec.ports[0].port)" + " domain:" + context.outputs.ingress.rules[0].host`,
|
||||
expMessage: "type: NodePort clusterIP:10.0.0.1 ports:80 domain:example.com",
|
||||
},
|
||||
"complex status": {
|
||||
tpContext: map[string]interface{}{
|
||||
"outputs": map[string]interface{}{
|
||||
"ingress": map[string]interface{}{
|
||||
"spec": map[string]interface{}{
|
||||
"rules": []interface{}{
|
||||
map[string]interface{}{
|
||||
"host": "example.com",
|
||||
},
|
||||
},
|
||||
},
|
||||
"status": map[string]interface{}{
|
||||
"loadBalancer": map[string]interface{}{
|
||||
"ingress": []interface{}{
|
||||
map[string]interface{}{
|
||||
"ip": "10.0.0.1",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
statusTemp: `if len(context.outputs.ingress.status.loadBalancer.ingress) > 0 {
|
||||
message: "Visiting URL: " + context.outputs.ingress.spec.rules[0].host + ", IP: " + context.outputs.ingress.status.loadBalancer.ingress[0].ip
|
||||
}
|
||||
if len(context.outputs.ingress.status.loadBalancer.ingress) == 0 {
|
||||
message: "No loadBalancer found, visiting by using 'vela port-forward " + context.appName + " --route'\n"
|
||||
}`,
|
||||
expMessage: "Visiting URL: example.com, IP: 10.0.0.1",
|
||||
},
|
||||
}
|
||||
for message, ca := range cases {
|
||||
gotMessage, err := getStatusMessage(ca.tpContext, ca.statusTemp)
|
||||
|
||||
@@ -6,6 +6,7 @@ import (
|
||||
"fmt"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
|
||||
@@ -47,7 +48,7 @@ func LoadTemplate(cli client.Reader, key string, kd types.CapType) (*Template, e
|
||||
if wd.Annotations["type"] == string(types.TerraformCategory) {
|
||||
capabilityCategory = types.TerraformCategory
|
||||
}
|
||||
tmpl, err := getTemplate(wd.Spec.Extension.Raw)
|
||||
tmpl, err := NewTemplate(wd.Spec.Extension, wd.Spec.Status)
|
||||
if err != nil {
|
||||
return nil, errors.WithMessagef(err, "LoadTemplate [%s] ", key)
|
||||
}
|
||||
@@ -66,7 +67,7 @@ func LoadTemplate(cli client.Reader, key string, kd types.CapType) (*Template, e
|
||||
if td.Annotations["type"] == string(types.TerraformCategory) {
|
||||
capabilityCategory = types.TerraformCategory
|
||||
}
|
||||
tmpl, err := getTemplate(td.Spec.Extension.Raw)
|
||||
tmpl, err := NewTemplate(td.Spec.Extension, td.Spec.Status)
|
||||
if err != nil {
|
||||
return nil, errors.WithMessagef(err, "LoadTemplate [%s] ", key)
|
||||
}
|
||||
@@ -82,24 +83,18 @@ func LoadTemplate(cli client.Reader, key string, kd types.CapType) (*Template, e
|
||||
return nil, fmt.Errorf("kind(%s) of %s not supported", kd, key)
|
||||
}
|
||||
|
||||
func getTemplate(raw []byte) (*Template, error) {
|
||||
_tmp := map[string]interface{}{}
|
||||
if err := json.Unmarshal(raw, &_tmp); err != nil {
|
||||
// NewTemplate will create CUE template for inner AbstractEngine using.
|
||||
func NewTemplate(raw *runtime.RawExtension, status *v1alpha2.Status) (*Template, error) {
|
||||
extension := map[string]interface{}{}
|
||||
if err := json.Unmarshal(raw.Raw, &extension); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
var (
|
||||
health string
|
||||
status string
|
||||
)
|
||||
if _, ok := _tmp["healthPolicy"]; ok {
|
||||
health = fmt.Sprint(_tmp["healthPolicy"])
|
||||
tmp := &Template{
|
||||
TemplateStr: fmt.Sprint(extension["template"]),
|
||||
}
|
||||
if _, ok := _tmp["customStatus"]; ok {
|
||||
status = fmt.Sprint(_tmp["customStatus"])
|
||||
if status != nil {
|
||||
tmp.CustomStatus = status.CustomStatus
|
||||
tmp.Health = status.HealthPolicy
|
||||
}
|
||||
return &Template{
|
||||
TemplateStr: fmt.Sprint(_tmp["template"]),
|
||||
Health: health,
|
||||
CustomStatus: status,
|
||||
}, nil
|
||||
return tmp, nil
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user