init Helm module (#1131)

use settings from App to override Values

add discovery util to get workload created by helm module

add helm typed structs into workloadDef and traitDef

update doc & refine code

add e2e test for helm module application

use oam.dev/catalog as helm repo

Signed-off-by: roywang <seiwy2010@gmail.com>
This commit is contained in:
Yue Wang
2021-03-15 16:54:43 +09:00
committed by GitHub
parent dd0979b912
commit 1652b4cf0c
35 changed files with 1982 additions and 81 deletions

View File

@@ -123,6 +123,7 @@ docker-push:
docker push ${IMG}
e2e-setup:
helm install --create-namespace -n flux-system helm-flux http://oam.dev/catalog/helm-flux2-0.1.0.tgz
helm install kruise https://github.com/openkruise/kruise/releases/download/v0.7.0/kruise-chart.tgz
helm repo add jetstack https://charts.jetstack.io
helm repo update
@@ -131,6 +132,8 @@ e2e-setup:
ginkgo version
ginkgo -v -r e2e/setup
kubectl wait --for=condition=Ready pod -l app.kubernetes.io/name=vela-core,app.kubernetes.io/instance=kubevela -n vela-system --timeout=600s
kubectl wait --for=condition=Ready pod -l app=source-controller -n flux-system --timeout=600s
kubectl wait --for=condition=Ready pod -l app=helm-controller -n flux-system --timeout=600s
bin/vela dashboard &
e2e-api-test:

View File

@@ -34,7 +34,10 @@ type CUE struct {
// the encapsulation can be defined in different ways, e.g. CUE/HCL(terraform)/KUBE(K8s Object)/HELM, etc...
type Schematic struct {
CUE *CUE `json:"cue,omitempty"`
// TODO(wonderflow): support HCL(terraform)/KUBE(K8s Object)/HELM here.
HELM *Helm `json:"helm,omitempty"`
// TODO(wonderflow): support HCL(terraform)/KUBE(K8s Object) here.
}
// A DefinitionReference refers to a CustomResourceDefinition by name.
@@ -307,6 +310,10 @@ type ComponentSpec struct {
// +kubebuilder:pruning:PreserveUnknownFields
Workload runtime.RawExtension `json:"workload"`
// HelmRelease records a Helm release used by a Helm module workload.
// +optional
Helm *Helm `json:"helm,omitempty"`
// Parameters exposed by this component. ApplicationConfigurations that
// reference this component may specify values for these parameters, which
// will in turn be injected into the embedded workload.
@@ -314,6 +321,17 @@ type ComponentSpec struct {
Parameters []ComponentParameter `json:"parameters,omitempty"`
}
// A Helm represents resources used by a Helm module
type Helm struct {
// Release records a Helm release used by a Helm module workload.
// +kubebuilder:pruning:PreserveUnknownFields
Release runtime.RawExtension `json:"release"`
// HelmRelease records a Helm repository used by a Helm module workload.
// +kubebuilder:pruning:PreserveUnknownFields
Repository runtime.RawExtension `json:"repository"`
}
// A ComponentStatus represents the observed state of a Component.
type ComponentStatus struct {
// The generation observed by the component controller.

View File

@@ -774,6 +774,11 @@ func (in *ComponentScope) DeepCopy() *ComponentScope {
func (in *ComponentSpec) DeepCopyInto(out *ComponentSpec) {
*out = *in
in.Workload.DeepCopyInto(&out.Workload)
if in.Helm != nil {
in, out := &in.Helm, &out.Helm
*out = new(Helm)
(*in).DeepCopyInto(*out)
}
if in.Parameters != nil {
in, out := &in.Parameters, &out.Parameters
*out = make([]ComponentParameter, len(*in))
@@ -1585,6 +1590,23 @@ func (in *HealthScopeStatus) DeepCopy() *HealthScopeStatus {
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *Helm) DeepCopyInto(out *Helm) {
*out = *in
in.Release.DeepCopyInto(&out.Release)
in.Repository.DeepCopyInto(&out.Repository)
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Helm.
func (in *Helm) DeepCopy() *Helm {
if in == nil {
return nil
}
out := new(Helm)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *HistoryWorkload) DeepCopyInto(out *HistoryWorkload) {
*out = *in
@@ -1731,6 +1753,11 @@ func (in *Schematic) DeepCopyInto(out *Schematic) {
*out = new(CUE)
**out = **in
}
if in.HELM != nil {
in, out := &in.HELM, &out.HELM
*out = new(Helm)
(*in).DeepCopyInto(*out)
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Schematic.

View File

@@ -102,6 +102,8 @@ type CapabilityCategory string
const (
// TerraformCategory means the capability is in Terraform format
TerraformCategory CapabilityCategory = "terraform"
// HelmCategory means the capability is a helm capability
HelmCategory CapabilityCategory = "helm"
)
// Parameter defines a parameter for cli from capability template

View File

@@ -78,6 +78,21 @@ spec:
required:
- template
type: object
helm:
description: A Helm represents resources used by a Helm module
properties:
release:
description: Release records a Helm release used by a Helm module workload.
type: object
x-kubernetes-preserve-unknown-fields: true
repository:
description: HelmRelease records a Helm repository used by a Helm module workload.
type: object
x-kubernetes-preserve-unknown-fields: true
required:
- release
- repository
type: object
type: object
status:
description: Status defines the custom health policy and status message for workload

View File

@@ -42,6 +42,21 @@ spec:
spec:
description: A ComponentSpec defines the desired state of a Component.
properties:
helm:
description: HelmRelease records a Helm release used by a Helm module workload.
properties:
release:
description: Release records a Helm release used by a Helm module workload.
type: object
x-kubernetes-preserve-unknown-fields: true
repository:
description: HelmRelease records a Helm repository used by a Helm module workload.
type: object
x-kubernetes-preserve-unknown-fields: true
required:
- release
- repository
type: object
parameters:
description: Parameters exposed by this component. ApplicationConfigurations that reference this component may specify values for these parameters, which will in turn be injected into the embedded workload.
items:

View File

@@ -80,6 +80,21 @@ spec:
required:
- template
type: object
helm:
description: A Helm represents resources used by a Helm module
properties:
release:
description: Release records a Helm release used by a Helm module workload.
type: object
x-kubernetes-preserve-unknown-fields: true
repository:
description: HelmRelease records a Helm repository used by a Helm module workload.
type: object
x-kubernetes-preserve-unknown-fields: true
required:
- release
- repository
type: object
type: object
status:
description: Status defines the custom health policy and status message for trait

View File

@@ -94,6 +94,21 @@ spec:
required:
- template
type: object
helm:
description: A Helm represents resources used by a Helm module
properties:
release:
description: Release records a Helm release used by a Helm module workload.
type: object
x-kubernetes-preserve-unknown-fields: true
repository:
description: HelmRelease records a Helm repository used by a Helm module workload.
type: object
x-kubernetes-preserve-unknown-fields: true
required:
- release
- repository
type: object
type: object
status:
description: Status defines the custom health policy and status message for workload

View File

@@ -17,6 +17,8 @@
- [Workload Type](/en/cue/workload-type.md)
- [Trait](/en/cue/trait.md)
- [Advanced Features](/en/cue/status.md)
- HELM
- [Chart As Capability](/en/helm/chart-as-capability.md)
- Roadmap
- [KubeVela Roadmap](/en/roadmap.md)

View File

@@ -0,0 +1,146 @@
# Use Helm chart as capability module
Here is an example of how to use Helm chart as workload capability module.
## Install fluxcd/flux2 as dependencies
This feature depends on several CRDs and controllers from [fluxcd/flux2](https://github.com/fluxcd/flux2), so we prepared a simplified Helm chart to install dependencies.
It's worth noting that, flux2 doesn't offer an official Helm chart to install.
And this chart only includes minimum dependencies this feature relies on, not all of flux2.
```shell
helm install --create-namespace -n flux-system helm-flux http://oam.dev/catalog/helm-flux2-0.1.0.tgz
```
## Write WorkloadDefinition
Here is an example `WorkloadDefinition` with only required data of a Helm module.
Comparing to existing workload definition based on CUE template, several points worth attention in Helm module.
- `.spec.definitionRef` is required to indicate the workload GVK in your Helm chart. For example, in our sample chart, the core workload is `deployments.apps/v1`.
- `.spec.schematic.helm` contains information of Helm release & repository.
```yaml
apiVersion: core.oam.dev/v1alpha2
kind: WorkloadDefinition
metadata:
name: webapp-chart
annotations:
definition.oam.dev/description: helm chart for webapp
spec:
definitionRef:
name: deployments.apps
version: v1
schematic:
helm:
release:
chart:
spec:
chart: "podinfo"
version: "5.1.4"
repository:
url: "http://oam.dev/catalog/"
```
Specifically, the definition follows the APIs from `fluxcd/flux2`, [HelmReleaseSpec](https://github.com/fluxcd/helm-controller/blob/main/docs/api/helmrelease.md) and [HelmRepositorySpec](https://github.com/fluxcd/source-controller/blob/main/docs/api/source.md#source.toolkit.fluxcd.io/v1beta1.HelmRepository).
However, the fields shown in the sample are almost enough to describe a Helm chart release and its repository.
## Define Application & Deploy
Here is an example `Application`.
```yaml
apiVersion: core.oam.dev/v1alpha2
kind: Application
metadata:
name: myapp
namespace: default
spec:
components:
- name: demo-podinfo
type: webapp-chart
settings:
image:
tag: "5.1.2"
```
Helm module workload will use data in `settings` as [Helm chart values](https://github.com/captainroy-hy/podinfo/blob/master/charts/podinfo/values.yaml).
You can read the README.md of the Helm chart, and the arguments are totally align with [values.yaml](https://github.com/captainroy-hy/podinfo/blob/master/charts/podinfo/values.yaml) of the chart.
Now we can deploy the application.
```shell
kubectl apply -f webapp-chart-wd.yaml
kubectl apply -f myapp.yaml
```
After several minutes (it takes time to fetch Helm chart from the repo, render and install), you can check the Helm release is installed.
```shell
helm ls -A
myapp-demo-podinfo default 1 2021-03-05 02:02:18.692317102 +0000 UTC deployed podinfo-5.1.4 5.1.4
```
And check the deployment defined in the chart.
```shell
kubectl get deploy
NAME READY UP-TO-DATE AVAILABLE AGE
myapp-demo-podinfo 1/1 1 1 66m
```
## Use existing Trait system
A Helm module workload can fully work with Traits in the same way as existing workloads.
For example, we add two exemplary traits, scaler and [virtualgroup](https://github.com/oam-dev/kubevela/blob/master/docs/examples/helm-module/virtual-group-td.yaml), to a Helm module workload.
```yaml
apiVersion: core.oam.dev/v1alpha2
kind: Application
metadata:
name: myapp
namespace: default
spec:
components:
- name: demo-podinfo
type: webapp-chart
settings:
image:
tag: "5.1.2"
traits:
- name: scaler
properties:
replicas: 4
- name: virtualgroup
properties:
group: "my-group1"
type: "cluster"
```
> If vela webhook is enabled, remember to add `deployments.apps` into the trait definition's `.spec.appliesToWorkloads` list
:exclamation: Only one thing you should pay attention when use Trait system with Helm module workload, **make sure the target workload in your Helm chart strictly follows the qualified-full-name convention in Helm.**
[As the sample chart shows](https://github.com/captainroy-hy/podinfo/blob/c2b9603036f1f033ec2534ca0edee8eff8f5b335/charts/podinfo/templates/deployment.yaml#L4), the workload name is composed of [release name and chart name](https://github.com/captainroy-hy/podinfo/blob/c2b9603036f1f033ec2534ca0edee8eff8f5b335/charts/podinfo/templates/_helpers.tpl#L13).
KubeVela will generate a release name based on your Application name and component name automatically, so you just make sure not overried the full name template in your Helm chart.
KubeVela relies on the name to discovery the workload, otherwise it cannot apply traits to the workload.
### Verify applications with traits
You may wait a bit more time to check the trait works after deploying the application.
Because KubeVela may not discovery the target workload immediately when it's created because of reconciliation interval.
Check the scaler trait.
```shell
kubectl get manualscalertrait
NAME AGE
demo-podinfo-scaler-d8f78c6fc 13m
```
Check the virtualgroup trait.
```shell
kubectl get deployment myapp-demo-podinfo -o json | jq .spec.template.metadata.labels
{
"app.cluster.virtual.group": "my-group1",
"app.kubernetes.io/name": "myapp-demo-podinfo"
}
```

View File

@@ -0,0 +1,20 @@
apiVersion: core.oam.dev/v1alpha2
kind: Application
metadata:
name: myapp
namespace: default
spec:
components:
- name: demo-podinfo
type: webapp-chart
settings:
image:
tag: "5.1.2"
traits:
- name: scaler
properties:
replicas: 2
- name: virtualgroup
properties:
group: "my-group1"
type: "cluster"

View File

@@ -0,0 +1,29 @@
apiVersion: core.oam.dev/v1alpha2
kind: TraitDefinition
metadata:
annotations:
definition.oam.dev/description: "Add virtual group labels"
name: virtualgroup
spec:
appliesToWorkloads:
- webservice
- worker
- deployments.apps
extension:
template: |-
patch: {
spec: template: {
metadata: labels: {
if parameter.type == "namespace" {
"app.namespace.virtual.group": parameter.group
}
if parameter.type == "cluster" {
"app.cluster.virtual.group": parameter.group
}
}
}
}
parameter: {
group: *"default" | string
type: *"namespace" | string
}

View File

@@ -0,0 +1,19 @@
apiVersion: core.oam.dev/v1alpha2
kind: WorkloadDefinition
metadata:
name: webapp-chart
annotations:
definition.oam.dev/description: helm chart for webapp
spec:
definitionRef:
name: deployments.apps
version: v1
schematic:
helm:
release:
chart:
spec:
chart: "podinfo"
version: "5.1.4"
repository:
url: "http://oam.dev/catalog/"

View File

@@ -78,6 +78,21 @@ spec:
required:
- template
type: object
helm:
description: A Helm represents resources used by a Helm module
properties:
release:
description: Release records a Helm release used by a Helm module workload.
type: object
repository:
description: HelmRelease records a Helm repository used by a Helm module workload.
type: object
required:
- release
- repository
type: object
type: object
status:
description: Status defines the custom health policy and status message for workload

View File

@@ -42,6 +42,21 @@ spec:
spec:
description: A ComponentSpec defines the desired state of a Component.
properties:
helm:
description: HelmRelease records a Helm release used by a Helm module workload.
properties:
release:
description: Release records a Helm release used by a Helm module workload.
type: object
repository:
description: HelmRelease records a Helm repository used by a Helm module workload.
type: object
required:
- release
- repository
type: object
parameters:
description: Parameters exposed by this component. ApplicationConfigurations that reference this component may specify values for these parameters, which will in turn be injected into the embedded workload.
items:

View File

@@ -80,6 +80,21 @@ spec:
required:
- template
type: object
helm:
description: A Helm represents resources used by a Helm module
properties:
release:
description: Release records a Helm release used by a Helm module workload.
type: object
repository:
description: HelmRelease records a Helm repository used by a Helm module workload.
type: object
required:
- release
- repository
type: object
type: object
status:
description: Status defines the custom health policy and status message for trait

View File

@@ -93,6 +93,21 @@ spec:
required:
- template
type: object
helm:
description: A Helm represents resources used by a Helm module
properties:
release:
description: Release records a Helm release used by a Helm module workload.
type: object
repository:
description: HelmRelease records a Helm repository used by a Helm module workload.
type: object
required:
- release
- repository
type: object
type: object
status:
description: Status defines the custom health policy and status message for workload

View File

@@ -0,0 +1,38 @@
// Package apis contains typed structs from fluxcd/helm-controller and fluxcd/source-controller.
// Because we cannot solve dependency inconsistencies between KubeVela and fluxcd/gotk,
// so we pick up those APIs used in KubeVela to install helm resources.
package apis
import (
"k8s.io/apimachinery/pkg/runtime/schema"
)
const (
// HelmRepositoryKind is the kind name of fluxcd/helmrepository
HelmRepositoryKind = "HelmRepository"
)
// HelmSpec includes information to install a Helm chart
type HelmSpec struct {
HelmReleaseSpec `json:"release"`
HelmRepositorySpec `json:"repository"`
}
var (
// HelmReleaseGVK refers to GVK of fluxcd/helmrelease
HelmReleaseGVK = schema.GroupVersionKind{
Group: "helm.toolkit.fluxcd.io",
Version: "v2beta1",
Kind: "HelmRelease",
}
// HelmRepositoryGVK refers to GVK of fluxcd/helmrepository
HelmRepositoryGVK = schema.GroupVersionKind{
Group: "source.toolkit.fluxcd.io",
Version: "v1beta1",
Kind: "HelmRepository",
}
// HelmChartNamePath indicates the field to path in HelmRelease to get the chart name
HelmChartNamePath = []string{"spec", "chart", "spec", "chart"}
)

View File

@@ -0,0 +1,377 @@
package apis
// APIs copied from fluxcd/helm-controller/api/v2beta1 @ api/v0.7.0
/*
Copyright 2020 The Flux CD contributors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
import (
apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
// HelmReleaseSpec defines the desired state of a Helm release.
type HelmReleaseSpec struct {
// Chart defines the template of the v1beta1.HelmChart that should be created
// for this HelmRelease.
// +required
Chart HelmChartTemplate `json:"chart"`
// Interval at which to reconcile the Helm release.
// make it optional in KubeVela
// +optional
Interval *metav1.Duration `json:"interval,omitempty"`
// KubeConfig for reconciling the HelmRelease on a remote cluster.
// When specified, KubeConfig takes precedence over ServiceAccountName.
// +optional
// KubeConfig *KubeConfig `json:"kubeConfig,omitempty"`
// Suspend tells the controller to suspend reconciliation for this HelmRelease,
// it does not apply to already started reconciliations. Defaults to false.
// +optional
Suspend bool `json:"suspend,omitempty"`
// ReleaseName used for the Helm release. Defaults to a composition of
// '[TargetNamespace-]Name'.
// +kubebuilder:validation:MinLength=1
// +kubebuilder:validation:MaxLength=53
// +kubebuilder:validation:Optional
// +optional
ReleaseName string `json:"releaseName,omitempty"`
// TargetNamespace to target when performing operations for the HelmRelease.
// Defaults to the namespace of the HelmRelease.
// +kubebuilder:validation:MinLength=1
// +kubebuilder:validation:MaxLength=63
// +kubebuilder:validation:Optional
// +optional
TargetNamespace string `json:"targetNamespace,omitempty"`
// StorageNamespace used for the Helm storage.
// Defaults to the namespace of the HelmRelease.
// +kubebuilder:validation:MinLength=1
// +kubebuilder:validation:MaxLength=63
// +kubebuilder:validation:Optional
// +optional
StorageNamespace string `json:"storageNamespace,omitempty"`
// DependsOn may contain a dependency.CrossNamespaceDependencyReference slice with
// references to HelmRelease resources that must be ready before this HelmRelease
// can be reconciled.
// +optional
// DependsOn []dependency.CrossNamespaceDependencyReference `json:"dependsOn,omitempty"`
// Timeout is the time to wait for any individual Kubernetes operation (like Jobs
// for hooks) during the performance of a Helm action. Defaults to '5m0s'.
// +optional
Timeout *metav1.Duration `json:"timeout,omitempty"`
// MaxHistory is the number of revisions saved by Helm for this HelmRelease.
// Use '0' for an unlimited number of revisions; defaults to '10'.
// +optional
MaxHistory *int `json:"maxHistory,omitempty"`
// The name of the Kubernetes service account to impersonate
// when reconciling this HelmRelease.
// +optional
ServiceAccountName string `json:"serviceAccountName,omitempty"`
// Install holds the configuration for Helm install actions for this HelmRelease.
// +optional
Install *Install `json:"install,omitempty"`
// Upgrade holds the configuration for Helm upgrade actions for this HelmRelease.
// +optional
Upgrade *Upgrade `json:"upgrade,omitempty"`
// Test holds the configuration for Helm test actions for this HelmRelease.
// +optional
Test *Test `json:"test,omitempty"`
// Rollback holds the configuration for Helm rollback actions for this HelmRelease.
// +optional
Rollback *Rollback `json:"rollback,omitempty"`
// Uninstall holds the configuration for Helm uninstall actions for this HelmRelease.
// +optional
Uninstall *Uninstall `json:"uninstall,omitempty"`
// ValuesFrom holds references to resources containing Helm values for this HelmRelease,
// and information about how they should be merged.
// ValuesFrom []ValuesReference `json:"valuesFrom,omitempty"`
// Values holds the values for this Helm release.
// +optional
Values *apiextensionsv1.JSON `json:"values,omitempty"`
// PostRenderers holds an array of Helm PostRenderers, which will be applied in order
// of their definition.
// +optional
// PostRenderers []PostRenderer `json:"postRenderers,omitempty"`
}
// HelmChartTemplate defines the template from which the controller will
// generate a v1beta1.HelmChart object in the same namespace as the referenced
// v1beta1.Source.
type HelmChartTemplate struct {
// Spec holds the template for the v1beta1.HelmChartSpec for this HelmRelease.
// +required
Spec HelmChartTemplateSpec `json:"spec"`
}
// HelmChartTemplateSpec defines the template from which the controller will
// generate a v1beta1.HelmChartSpec object.
type HelmChartTemplateSpec struct {
// The name or path the Helm chart is available at in the SourceRef.
// +required
Chart string `json:"chart"`
// Version semver expression, ignored for charts from v1beta1.GitRepository and
// v1beta1.Bucket sources. Defaults to latest when omitted.
// +kubebuilder:default:=*
// +optional
Version string `json:"version,omitempty"`
// The name and namespace of the v1beta1.Source the chart is available at.
// +required
SourceRef CrossNamespaceObjectReference `json:"sourceRef"`
// Interval at which to check the v1beta1.Source for updates. Defaults to
// 'HelmReleaseSpec.Interval'.
// +optional
Interval *metav1.Duration `json:"interval,omitempty"`
// Alternative values file to use as the default chart values, expected to be a
// relative path in the SourceRef. Ignored when omitted.
// +optional
ValuesFile string `json:"valuesFile,omitempty"`
}
// Install holds the configuration for Helm install actions performed for this
// HelmRelease.
type Install struct {
// Timeout is the time to wait for any individual Kubernetes operation (like
// Jobs for hooks) during the performance of a Helm install action. Defaults to
// 'HelmReleaseSpec.Timeout'.
// +optional
Timeout *metav1.Duration `json:"timeout,omitempty"`
// Remediation holds the remediation configuration for when the Helm install
// action for the HelmRelease fails. The default is to not perform any action.
// +optional
Remediation *InstallRemediation `json:"remediation,omitempty"`
// DisableWait disables the waiting for resources to be ready after a Helm
// install has been performed.
// +optional
DisableWait bool `json:"disableWait,omitempty"`
// DisableHooks prevents hooks from running during the Helm install action.
// +optional
DisableHooks bool `json:"disableHooks,omitempty"`
// DisableOpenAPIValidation prevents the Helm install action from validating
// rendered templates against the Kubernetes OpenAPI Schema.
// +optional
DisableOpenAPIValidation bool `json:"disableOpenAPIValidation,omitempty"`
// Replace tells the Helm install action to re-use the 'ReleaseName', but only
// if that name is a deleted release which remains in the history.
// +optional
Replace bool `json:"replace,omitempty"`
// SkipCRDs tells the Helm install action to not install any CRDs. By default,
// CRDs are installed if not already present.
// +optional
SkipCRDs bool `json:"skipCRDs,omitempty"`
// CreateNamespace tells the Helm install action to create the
// HelmReleaseSpec.TargetNamespace if it does not exist yet.
// On uninstall, the namespace will not be garbage collected.
// +optional
CreateNamespace bool `json:"createNamespace,omitempty"`
}
// InstallRemediation holds the configuration for Helm install remediation.
type InstallRemediation struct {
// Retries is the number of retries that should be attempted on failures before
// bailing. Remediation, using an uninstall, is performed between each attempt.
// Defaults to '0', a negative integer equals to unlimited retries.
// +optional
Retries int `json:"retries,omitempty"`
// IgnoreTestFailures tells the controller to skip remediation when the Helm
// tests are run after an install action but fail. Defaults to
// 'Test.IgnoreFailures'.
// +optional
IgnoreTestFailures *bool `json:"ignoreTestFailures,omitempty"`
// RemediateLastFailure tells the controller to remediate the last failure, when
// no retries remain. Defaults to 'false'.
// +optional
RemediateLastFailure *bool `json:"remediateLastFailure,omitempty"`
}
// Upgrade holds the configuration for Helm upgrade actions for this
// HelmRelease.
type Upgrade struct {
// Timeout is the time to wait for any individual Kubernetes operation (like
// Jobs for hooks) during the performance of a Helm upgrade action. Defaults to
// 'HelmReleaseSpec.Timeout'.
// +optional
Timeout *metav1.Duration `json:"timeout,omitempty"`
// Remediation holds the remediation configuration for when the Helm upgrade
// action for the HelmRelease fails. The default is to not perform any action.
// +optional
Remediation *UpgradeRemediation `json:"remediation,omitempty"`
// DisableWait disables the waiting for resources to be ready after a Helm
// upgrade has been performed.
// +optional
DisableWait bool `json:"disableWait,omitempty"`
// DisableHooks prevents hooks from running during the Helm upgrade action.
// +optional
DisableHooks bool `json:"disableHooks,omitempty"`
// DisableOpenAPIValidation prevents the Helm upgrade action from validating
// rendered templates against the Kubernetes OpenAPI Schema.
// +optional
DisableOpenAPIValidation bool `json:"disableOpenAPIValidation,omitempty"`
// Force forces resource updates through a replacement strategy.
// +optional
Force bool `json:"force,omitempty"`
// PreserveValues will make Helm reuse the last release's values and merge in
// overrides from 'Values'. Setting this flag makes the HelmRelease
// non-declarative.
// +optional
PreserveValues bool `json:"preserveValues,omitempty"`
// CleanupOnFail allows deletion of new resources created during the Helm
// upgrade action when it fails.
// +optional
CleanupOnFail bool `json:"cleanupOnFail,omitempty"`
}
// UpgradeRemediation holds the configuration for Helm upgrade remediation.
type UpgradeRemediation struct {
// Retries is the number of retries that should be attempted on failures before
// bailing. Remediation, using 'Strategy', is performed between each attempt.
// Defaults to '0', a negative integer equals to unlimited retries.
// +optional
Retries int `json:"retries,omitempty"`
// IgnoreTestFailures tells the controller to skip remediation when the Helm
// tests are run after an upgrade action but fail.
// Defaults to 'Test.IgnoreFailures'.
// +optional
IgnoreTestFailures *bool `json:"ignoreTestFailures,omitempty"`
// RemediateLastFailure tells the controller to remediate the last failure, when
// no retries remain. Defaults to 'false' unless 'Retries' is greater than 0.
// +optional
RemediateLastFailure *bool `json:"remediateLastFailure,omitempty"`
// Strategy to use for failure remediation. Defaults to 'rollback'.
// +kubebuilder:validation:Enum=rollback;uninstall
// +optional
Strategy *RemediationStrategy `json:"strategy,omitempty"`
}
// RemediationStrategy returns the strategy to use to remediate a failed install
// or upgrade.
type RemediationStrategy string
const (
// RollbackRemediationStrategy represents a Helm remediation strategy of Helm
// rollback.
RollbackRemediationStrategy RemediationStrategy = "rollback"
// UninstallRemediationStrategy represents a Helm remediation strategy of Helm
// uninstall.
UninstallRemediationStrategy RemediationStrategy = "uninstall"
)
// Test holds the configuration for Helm test actions for this HelmRelease.
type Test struct {
// Enable enables Helm test actions for this HelmRelease after an Helm install
// or upgrade action has been performed.
// +optional
Enable bool `json:"enable,omitempty"`
// Timeout is the time to wait for any individual Kubernetes operation during
// the performance of a Helm test action. Defaults to 'HelmReleaseSpec.Timeout'.
// +optional
Timeout *metav1.Duration `json:"timeout,omitempty"`
// IgnoreFailures tells the controller to skip remediation when the Helm tests
// are run but fail. Can be overwritten for tests run after install or upgrade
// actions in 'Install.IgnoreTestFailures' and 'Upgrade.IgnoreTestFailures'.
// +optional
IgnoreFailures bool `json:"ignoreFailures,omitempty"`
}
// Rollback holds the configuration for Helm rollback actions for this
// HelmRelease.
type Rollback struct {
// Timeout is the time to wait for any individual Kubernetes operation (like
// Jobs for hooks) during the performance of a Helm rollback action. Defaults to
// 'HelmReleaseSpec.Timeout'.
// +optional
Timeout *metav1.Duration `json:"timeout,omitempty"`
// DisableWait disables the waiting for resources to be ready after a Helm
// rollback has been performed.
// +optional
DisableWait bool `json:"disableWait,omitempty"`
// DisableHooks prevents hooks from running during the Helm rollback action.
// +optional
DisableHooks bool `json:"disableHooks,omitempty"`
// Recreate performs pod restarts for the resource if applicable.
// +optional
Recreate bool `json:"recreate,omitempty"`
// Force forces resource updates through a replacement strategy.
// +optional
Force bool `json:"force,omitempty"`
// CleanupOnFail allows deletion of new resources created during the Helm
// rollback action when it fails.
// +optional
CleanupOnFail bool `json:"cleanupOnFail,omitempty"`
}
// Uninstall holds the configuration for Helm uninstall actions for this
// HelmRelease.
type Uninstall struct {
// Timeout is the time to wait for any individual Kubernetes operation (like
// Jobs for hooks) during the performance of a Helm uninstall action. Defaults
// to 'HelmReleaseSpec.Timeout'.
// +optional
Timeout *metav1.Duration `json:"timeout,omitempty"`
// DisableHooks prevents hooks from running during the Helm rollback action.
// +optional
DisableHooks bool `json:"disableHooks,omitempty"`
// KeepHistory tells Helm to remove all associated resources and mark the
// release as deleted, but retain the release history.
// +optional
KeepHistory bool `json:"keepHistory,omitempty"`
}

View File

@@ -0,0 +1,50 @@
package apis
import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
// APIs copied from fluxcd/source-controller/api/v1beta1 @ api/v0.7.4
/*
Copyright 2020 The Flux CD contributors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// HelmRepositorySpec defines the reference to a Helm repository.
type HelmRepositorySpec struct {
// The Helm repository URL, a valid URL contains at least a protocol and host.
// +required
URL string `json:"url"`
// The name of the secret containing authentication credentials for the Helm
// repository.
// For HTTP/S basic auth the secret must contain username and
// password fields.
// For TLS the secret must contain a certFile and keyFile, and/or
// caCert fields.
// +optional
// SecretRef *meta.LocalObjectReference `json:"secretRef,omitempty"`
// The interval at which to check the upstream for updates.
// make it optional in KubeVela
// +optional
Interval *metav1.Duration `json:"interval,omitempty"`
// The timeout of index downloading, defaults to 60s.
// +kubebuilder:default:="60s"
// +optional
Timeout *metav1.Duration `json:"timeout,omitempty"`
// This flag tells the controller to suspend the reconciliation of this source.
// +optional
Suspend bool `json:"suspend,omitempty"`
}

View File

@@ -0,0 +1,75 @@
package apis
// APIs copied from fluxcd/helmcontroller
/*
Copyright 2020 The Flux CD contributors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// CrossNamespaceObjectReference contains enough information to let you locate
// the typed referenced object at cluster level.
type CrossNamespaceObjectReference struct {
// APIVersion of the referent.
// +optional
APIVersion string `json:"apiVersion,omitempty"`
// Kind of the referent.
// +kubebuilder:validation:Enum=HelmRepository;GitRepository;Bucket
// +required
Kind string `json:"kind,omitempty"`
// Name of the referent.
// +kubebuilder:validation:MinLength=1
// +kubebuilder:validation:MaxLength=253
// +required
Name string `json:"name"`
// Namespace of the referent.
// +kubebuilder:validation:MinLength=1
// +kubebuilder:validation:MaxLength=63
// +kubebuilder:validation:Optional
// +optional
Namespace string `json:"namespace,omitempty"`
}
// ValuesReference contains a reference to a resource containing Helm values,
// and optionally the key they can be found at.
type ValuesReference struct {
// Kind of the values referent, valid values are ('Secret', 'ConfigMap').
// +kubebuilder:validation:Enum=Secret;ConfigMap
// +required
Kind string `json:"kind"`
// Name of the values referent. Should reside in the same namespace as the
// referring resource.
// +kubebuilder:validation:MinLength=1
// +kubebuilder:validation:MaxLength=253
// +required
Name string `json:"name"`
// ValuesKey is the data key where the values.yaml or a specific value can be
// found at. Defaults to 'values.yaml'.
// +optional
ValuesKey string `json:"valuesKey,omitempty"`
// TargetPath is the YAML dot notation path the value should be merged at. When
// set, the ValuesKey is expected to be a single flat value. Defaults to 'None',
// which results in the values getting merged at the root.
// +optional
TargetPath string `json:"targetPath,omitempty"`
// Optional marks this ValuesReference as optional. When set, a not found error
// for the values reference is ignored, but any ValuesKey, TargetPath or
// transient error will still result in a reconciliation failure.
// +optional
Optional bool `json:"optional,omitempty"`
}

104
pkg/appfile/helm/helm.go Normal file
View File

@@ -0,0 +1,104 @@
package helm
import (
"encoding/json"
"fmt"
"time"
apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/apimachinery/pkg/runtime/schema"
"github.com/pkg/errors"
"github.com/oam-dev/kubevela/apis/core.oam.dev/v1alpha2"
helmapi "github.com/oam-dev/kubevela/pkg/appfile/helm/flux2apis"
)
var (
// DefaultIntervalDuration is the interval that flux controller reconcile HelmRelease and HelmRepository
DefaultIntervalDuration = &metav1.Duration{Duration: 5 * time.Minute}
)
// RenderHelmReleaseAndHelmRepo constructs HelmRelease and HelmRepository in unstructured format
func RenderHelmReleaseAndHelmRepo(helmSpec *v1alpha2.Helm, compName, appName, ns string, values map[string]interface{}) (*unstructured.Unstructured, *unstructured.Unstructured, error) {
releaseSpec := &helmapi.HelmReleaseSpec{}
if err := json.Unmarshal(helmSpec.Release.Raw, releaseSpec); err != nil {
return nil, nil, err
}
if releaseSpec.Interval == nil {
releaseSpec.Interval = DefaultIntervalDuration
}
repoSpec := &helmapi.HelmRepositorySpec{}
if err := json.Unmarshal(helmSpec.Repository.Raw, repoSpec); err != nil {
return nil, nil, err
}
if repoSpec.Interval == nil {
repoSpec.Interval = DefaultIntervalDuration
}
// construct unstructured HelmRepository object
repoName := fmt.Sprintf("%s-%s", appName, compName)
helmRepo := generateUnstructuredObj(repoName, ns, helmapi.HelmRepositoryGVK)
if err := setSpecObjIntoUnstructuredObj(repoSpec, helmRepo); err != nil {
return nil, nil, errors.Wrap(err, "cannot set spec to HelmRepository")
}
// construct unstructured HelmRelease object
rlsName := fmt.Sprintf("%s-%s", appName, compName)
helmRelease := generateUnstructuredObj(rlsName, ns, helmapi.HelmReleaseGVK)
// construct HelmRelease chart values
chartValues := map[string]interface{}{}
if releaseSpec.Values != nil {
if err := json.Unmarshal(releaseSpec.Values.Raw, &chartValues); err != nil {
return nil, nil, errors.Wrap(err, "cannot get chart values")
}
}
for k, v := range values {
// override values with settings from application
chartValues[k] = v
}
if len(chartValues) > 0 {
// avoid an empty map
vJSON, err := json.Marshal(chartValues)
if err != nil {
return nil, nil, errors.Wrap(err, "cannot get chart values")
}
releaseSpec.Values = &apiextensionsv1.JSON{Raw: vJSON}
}
// reference HelmRepository by HelmRelease
releaseSpec.Chart.Spec.SourceRef = helmapi.CrossNamespaceObjectReference{
Kind: helmapi.HelmRepositoryKind,
Namespace: ns,
Name: repoName,
}
if err := setSpecObjIntoUnstructuredObj(releaseSpec, helmRelease); err != nil {
return nil, nil, errors.Wrap(err, "cannot set spec to HelmRelease")
}
return helmRelease, helmRepo, nil
}
func generateUnstructuredObj(name, ns string, gvk schema.GroupVersionKind) *unstructured.Unstructured {
u := &unstructured.Unstructured{}
u.SetGroupVersionKind(gvk)
u.SetName(name)
u.SetNamespace(ns)
return u
}
func setSpecObjIntoUnstructuredObj(spec interface{}, u *unstructured.Unstructured) error {
bts, err := json.Marshal(spec)
if err != nil {
return err
}
data := make(map[string]interface{})
if err := json.Unmarshal(bts, &data); err != nil {
return err
}
_ = unstructured.SetNestedMap(u.Object, data, "spec")
return nil
}

View File

@@ -0,0 +1,78 @@
package helm
import (
"testing"
"github.com/ghodss/yaml"
"github.com/google/go-cmp/cmp"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"github.com/oam-dev/kubevela/apis/core.oam.dev/v1alpha2"
helmapi "github.com/oam-dev/kubevela/pkg/appfile/helm/flux2apis"
)
func TestRenderHelmReleaseAndHelmRepo(t *testing.T) {
h := testData()
chartValues := map[string]interface{}{
"image": map[string]interface{}{
"tag": "1.0.1",
},
}
rls, repo, err := RenderHelmReleaseAndHelmRepo(h, "test-comp", "test-app", "test-ns", chartValues)
if err != nil {
t.Fatalf("want: nil, got: %v", err)
}
expectRls := &unstructured.Unstructured{}
expectRls.SetGroupVersionKind(helmapi.HelmReleaseGVK)
expectRls.SetName("test-app-test-comp")
expectRls.SetNamespace("test-ns")
unstructured.SetNestedMap(expectRls.Object, map[string]interface{}{
"chart": map[string]interface{}{
"spec": map[string]interface{}{
"chart": "podinfo",
"version": "1.0.0",
"sourceRef": map[string]interface{}{
"kind": "HelmRepository",
"name": "test-app-test-comp",
"namespace": "test-ns",
},
},
},
"interval": "5m0s",
"values": map[string]interface{}{"image": map[string]interface{}{"tag": "1.0.1"}},
}, "spec")
if diff := cmp.Diff(expectRls, rls); diff != "" {
t.Errorf("\n%s\nApply(...): -want , +got \n%s\n", "render HelmRelease", diff)
}
expectRepo := &unstructured.Unstructured{}
expectRepo.SetGroupVersionKind(helmapi.HelmRepositoryGVK)
expectRepo.SetName("test-app-test-comp")
expectRepo.SetNamespace("test-ns")
unstructured.SetNestedMap(expectRepo.Object, map[string]interface{}{
"url": "test.com",
"interval": "5m0s",
}, "spec")
if diff := cmp.Diff(expectRepo, repo); diff != "" {
t.Errorf("\n%s\nApply(...): -want , +got \n%s\n", "render HelmRepository", diff)
}
}
func testData() *v1alpha2.Helm {
rlsStr :=
`chart:
spec:
chart: "podinfo"
version: "1.0.0"`
repoStr := `url: "test.com"`
rlsJson, _ := yaml.YAMLToJSON([]byte(rlsStr))
repoJson, _ := yaml.YAMLToJSON([]byte(repoStr))
h := &v1alpha2.Helm{}
h.Release.Raw = rlsJson
h.Repository.Raw = repoJson
return h
}

View File

@@ -0,0 +1,5 @@
chart:
spec:
chart: "podinfo"
version: "1.0.0"
url: "test.com"

View File

@@ -2,16 +2,20 @@ package appfile
import (
"context"
"encoding/json"
"fmt"
"github.com/crossplane/crossplane-runtime/apis/core/v1alpha1"
"github.com/pkg/errors"
kerrors "k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
"sigs.k8s.io/controller-runtime/pkg/client"
"github.com/oam-dev/kubevela/apis/core.oam.dev/v1alpha2"
"github.com/oam-dev/kubevela/apis/types"
"github.com/oam-dev/kubevela/pkg/appfile/config"
"github.com/oam-dev/kubevela/pkg/appfile/helm"
"github.com/oam-dev/kubevela/pkg/controller/utils"
"github.com/oam-dev/kubevela/pkg/dsl/definition"
"github.com/oam-dev/kubevela/pkg/dsl/process"
@@ -37,6 +41,9 @@ type Workload struct {
Template string
HealthCheckPolicy string
CustomStatusFormat string
Helm *v1alpha2.Helm
DefinitionReference v1alpha2.DefinitionReference
}
// GetUserConfigName get user config from AppFile, it will contain config file in it.
@@ -159,6 +166,8 @@ func (p *Parser) parseWorkload(ctx context.Context, comp v1alpha2.ApplicationCom
workload.Template = templ.TemplateStr
workload.HealthCheckPolicy = templ.Health
workload.CustomStatusFormat = templ.CustomStatus
workload.DefinitionReference = templ.Reference
workload.Helm = templ.Helm
settings, err := util.RawExtension2Map(&comp.Settings)
if err != nil {
return nil, errors.WithMessagef(err, "fail to parse settings for %s", comp.Name)
@@ -223,43 +232,104 @@ func (p *Parser) GenerateApplicationConfiguration(app *Appfile, ns string) (*v1a
var components []*v1alpha2.Component
for _, wl := range app.Workloads {
pCtx, err := PrepareProcessContext(p.client, wl, app.Name, app.RevisionName, ns)
if err != nil {
return nil, nil, err
}
for _, tr := range wl.Traits {
if err := tr.EvalContext(pCtx); err != nil {
return nil, nil, errors.Wrapf(err, "evaluate template trait=%s app=%s", tr.Name, wl.Name)
var comp *v1alpha2.Component
var acComp *v1alpha2.ApplicationConfigurationComponent
var err error
switch wl.CapabilityCategory {
case types.HelmCategory:
comp, acComp, err = generateComponentFromHelmModule(p.client, p.dm, wl, app.Name, app.RevisionName, ns)
if err != nil {
return nil, nil, err
}
default:
comp, acComp, err = generateComponentFromCUEModule(p.client, wl, app.Name, app.RevisionName, ns)
if err != nil {
return nil, nil, err
}
}
comp, acComp, err := evalWorkloadWithContext(pCtx, wl, app.Name, wl.Name)
if err != nil {
return nil, nil, err
}
comp.Name = wl.Name
acComp.ComponentName = comp.Name
for _, sc := range wl.Scopes {
acComp.Scopes = append(acComp.Scopes, v1alpha2.ComponentScope{ScopeReference: v1alpha1.TypedReference{
APIVersion: sc.GVK.GroupVersion().String(),
Kind: sc.GVK.Kind,
Name: sc.Name,
}})
}
comp.Namespace = ns
if comp.Labels == nil {
comp.Labels = map[string]string{}
}
comp.Labels[oam.LabelAppName] = app.Name
comp.SetGroupVersionKind(v1alpha2.ComponentGroupVersionKind)
components = append(components, comp)
appconfig.Spec.Components = append(appconfig.Spec.Components, *acComp)
}
return appconfig, components, nil
}
func generateComponentFromCUEModule(c client.Client, wl *Workload, appName, revision, ns string) (*v1alpha2.Component, *v1alpha2.ApplicationConfigurationComponent, error) {
pCtx, err := PrepareProcessContext(c, wl, appName, revision, ns)
if err != nil {
return nil, nil, err
}
for _, tr := range wl.Traits {
if err := tr.EvalContext(pCtx); err != nil {
return nil, nil, errors.Wrapf(err, "evaluate template trait=%s app=%s", tr.Name, wl.Name)
}
}
var comp *v1alpha2.Component
var acComp *v1alpha2.ApplicationConfigurationComponent
comp, acComp, err = evalWorkloadWithContext(pCtx, wl, appName, wl.Name)
if err != nil {
return nil, nil, err
}
comp.Name = wl.Name
acComp.ComponentName = comp.Name
for _, sc := range wl.Scopes {
acComp.Scopes = append(acComp.Scopes, v1alpha2.ComponentScope{ScopeReference: v1alpha1.TypedReference{
APIVersion: sc.GVK.GroupVersion().String(),
Kind: sc.GVK.Kind,
Name: sc.Name,
}})
}
comp.Namespace = ns
if comp.Labels == nil {
comp.Labels = map[string]string{}
}
comp.Labels[oam.LabelAppName] = appName
comp.SetGroupVersionKind(v1alpha2.ComponentGroupVersionKind)
return comp, acComp, nil
}
func generateComponentFromHelmModule(c client.Client, dm discoverymapper.DiscoveryMapper, wl *Workload, appName, revision, ns string) (*v1alpha2.Component, *v1alpha2.ApplicationConfigurationComponent, error) {
targetWokrloadGVK, err := util.GetGVKFromDefinition(dm, wl.DefinitionReference)
if err != nil {
return nil, nil, err
}
// NOTE this is a hack way to enable using CUE module capabilities on Helm module workload
// construct an empty base workload according to its GVK
wl.Template = fmt.Sprintf(`
output: {
apiVersion: "%s"
kind: "%s"
}`, targetWokrloadGVK.GroupVersion().String(), targetWokrloadGVK.Kind)
// re-use the way CUE module generates comp & acComp
comp, acComp, err := generateComponentFromCUEModule(c, wl, appName, revision, ns)
if err != nil {
return nil, nil, err
}
release, repo, err := helm.RenderHelmReleaseAndHelmRepo(wl.Helm, wl.Name, appName, ns, wl.Params)
if err != nil {
return nil, nil, err
}
rlsBytes, err := json.Marshal(release.Object)
if err != nil {
return nil, nil, err
}
repoBytes, err := json.Marshal(repo.Object)
if err != nil {
return nil, nil, err
}
comp.Spec.Helm = &v1alpha2.Helm{
Release: runtime.RawExtension{Raw: rlsBytes},
Repository: runtime.RawExtension{Raw: repoBytes},
}
return comp, acComp, nil
}
// evalWorkloadWithContext evaluate the workload's template to generate component and ACComponent
func evalWorkloadWithContext(pCtx process.Context, wl *Workload, appName, compName string) (*v1alpha2.Component, *v1alpha2.ApplicationConfigurationComponent, error) {
base, assists := pCtx.Output()

View File

@@ -36,6 +36,7 @@ import (
"k8s.io/apimachinery/pkg/types"
"github.com/oam-dev/kubevela/apis/core.oam.dev/v1alpha2"
oamtypes "github.com/oam-dev/kubevela/apis/types"
"github.com/oam-dev/kubevela/pkg/oam"
"github.com/oam-dev/kubevela/pkg/oam/util"
)
@@ -489,3 +490,175 @@ var _ = Describe("Test appFile parser", func() {
})
})
var _ = Describe("Test appfile parser to parse helm module", func() {
var (
appName = "test-app"
compName = "test-comp"
)
appFile := &Appfile{
Name: appName,
Workloads: []*Workload{
{
Name: compName,
Type: "webapp-chart",
CapabilityCategory: oamtypes.HelmCategory,
Params: map[string]interface{}{
"image": map[string]interface{}{
"tag": "5.1.2",
},
},
Traits: []*Trait{
{
Name: "scaler",
Params: map[string]interface{}{
"replicas": float64(10),
},
Template: `
outputs: scaler: {
apiVersion: "core.oam.dev/v1alpha2"
kind: "ManualScalerTrait"
spec: {
replicaCount: parameter.replicas
}
}
parameter: {
//+short=r
replicas: *1 | int
}
`,
},
},
Helm: &v1alpha2.Helm{
Release: util.Object2RawExtension(map[string]interface{}{
"chart": map[string]interface{}{
"spec": map[string]interface{}{
"chart": "podinfo",
"version": "5.1.4",
},
},
}),
Repository: util.Object2RawExtension(map[string]interface{}{
"url": "http://oam.dev/catalog/",
}),
},
DefinitionReference: v1alpha2.DefinitionReference{
Name: "deployments.apps",
Version: "v1",
},
},
},
}
It("Test application containing helm module", func() {
By("Generate ApplicationConfiguration and Components")
ac, components, err := NewApplicationParser(k8sClient, dm).GenerateApplicationConfiguration(appFile, "default")
Expect(err).To(BeNil())
manuscaler := util.Object2RawExtension(&unstructured.Unstructured{
Object: map[string]interface{}{
"apiVersion": "core.oam.dev/v1alpha2",
"kind": "ManualScalerTrait",
"metadata": map[string]interface{}{
"labels": map[string]interface{}{
"app.oam.dev/component": compName,
"app.oam.dev/name": appName,
"trait.oam.dev/type": "scaler",
"trait.oam.dev/resource": "scaler",
},
},
"spec": map[string]interface{}{"replicaCount": int64(10)},
},
})
expectAppConfig := &v1alpha2.ApplicationConfiguration{
TypeMeta: metav1.TypeMeta{
Kind: "ApplicationConfiguration",
APIVersion: "core.oam.dev/v1alpha2",
}, ObjectMeta: metav1.ObjectMeta{
Name: appName,
Namespace: "default",
Labels: map[string]string{oam.LabelAppName: appName},
},
Spec: v1alpha2.ApplicationConfigurationSpec{
Components: []v1alpha2.ApplicationConfigurationComponent{
{
ComponentName: compName,
Traits: []v1alpha2.ComponentTrait{
{
Trait: manuscaler,
},
},
},
},
},
}
expectComponent := &v1alpha2.Component{
TypeMeta: metav1.TypeMeta{
Kind: "Component",
APIVersion: "core.oam.dev/v1alpha2",
}, ObjectMeta: metav1.ObjectMeta{
Name: compName,
Namespace: "default",
Labels: map[string]string{oam.LabelAppName: appName},
},
Spec: v1alpha2.ComponentSpec{
Helm: &v1alpha2.Helm{
Release: util.Object2RawExtension(map[string]interface{}{
"apiVersion": "helm.toolkit.fluxcd.io/v2beta1",
"kind": "HelmRelease",
"metadata": map[string]interface{}{
"name": fmt.Sprintf("%s-%s", appName, compName),
"namespace": "default",
},
"spec": map[string]interface{}{
"chart": map[string]interface{}{
"spec": map[string]interface{}{
"sourceRef": map[string]interface{}{
"kind": "HelmRepository",
"name": fmt.Sprintf("%s-%s", appName, compName),
"namespace": "default",
},
},
},
"interval": "5m0s",
"values": map[string]interface{}{
"image": map[string]interface{}{
"tag": "5.1.2",
},
},
},
}),
Repository: util.Object2RawExtension(map[string]interface{}{
"apiVersion": "source.toolkit.fluxcd.io/v1beta1",
"kind": "HelmRepository",
"metadata": map[string]interface{}{
"name": fmt.Sprintf("%s-%s", appName, compName),
"namespace": "default",
},
"spec": map[string]interface{}{
"url": "http://oam.dev/catalog/",
},
}),
},
Workload: util.Object2RawExtension(map[string]interface{}{
"apiVersion": "apps/v1",
"kind": "Deployment",
"metadata": map[string]interface{}{
"labels": map[string]interface{}{
"workload.oam.dev/type": "webapp-chart",
"app.oam.dev/component": compName,
"app.oam.dev/name": appName,
},
},
}),
},
}
By("Verify expected ApplicationConfiguration")
diff := cmp.Diff(ac, expectAppConfig)
Expect(diff).Should(BeEmpty())
By("Verify expected Component")
diff = cmp.Diff(components[0], expectComponent)
Expect(diff).ShouldNot(BeEmpty())
})
})

View File

@@ -17,6 +17,7 @@ import (
"sigs.k8s.io/controller-runtime/pkg/log/zap"
corev1alpha2 "github.com/oam-dev/kubevela/apis/core.oam.dev"
"github.com/oam-dev/kubevela/pkg/oam/discoverymapper"
// +kubebuilder:scaffold:imports
)
@@ -24,6 +25,7 @@ var cfg *rest.Config
var scheme *runtime.Scheme
var k8sClient client.Client
var testEnv *envtest.Environment
var dm discoverymapper.DiscoveryMapper
func TestAppFile(t *testing.T) {
RegisterFailHandler(Fail)
@@ -53,6 +55,9 @@ var _ = BeforeSuite(func(done Done) {
k8sClient, err = client.New(cfg, client.Options{Scheme: scheme})
Expect(err).ToNot(HaveOccurred())
Expect(k8sClient).ToNot(BeNil())
dm, err = discoverymapper.New(cfg)
Expect(err).ToNot(HaveOccurred())
Expect(dm).ToNot(BeNil())
close(done)
}, 60)

View File

@@ -37,6 +37,7 @@ import (
core "github.com/oam-dev/kubevela/pkg/controller/core.oam.dev"
"github.com/oam-dev/kubevela/pkg/oam/discoverymapper"
oamutil "github.com/oam-dev/kubevela/pkg/oam/util"
apply "github.com/oam-dev/kubevela/pkg/utils/apply"
)
// RolloutReconcileWaitTime is the time to wait before reconcile again an application still in rollout phase
@@ -45,9 +46,10 @@ const RolloutReconcileWaitTime = time.Second * 3
// Reconciler reconciles a Application object
type Reconciler struct {
client.Client
dm discoverymapper.DiscoveryMapper
Log logr.Logger
Scheme *runtime.Scheme
dm discoverymapper.DiscoveryMapper
Log logr.Logger
Scheme *runtime.Scheme
applicator apply.Applicator
}
// +kubebuilder:rbac:groups=core.oam.dev,resources=applications,verbs=get;list;watch;create;update;patch;delete
@@ -172,10 +174,11 @@ func Setup(mgr ctrl.Manager, _ core.Args, _ logging.Logger) error {
return fmt.Errorf("create discovery dm fail %w", err)
}
reconciler := Reconciler{
Client: mgr.GetClient(),
Log: ctrl.Log.WithName("Application"),
Scheme: mgr.GetScheme(),
dm: dm,
Client: mgr.GetClient(),
Log: ctrl.Log.WithName("Application"),
Scheme: mgr.GetScheme(),
dm: dm,
applicator: apply.NewAPIApplicator(mgr.GetClient()),
}
return reconciler.SetupWithManager(mgr)
}

View File

@@ -17,6 +17,7 @@ import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
ctypes "k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/klog/v2"
"k8s.io/utils/pointer"
ctrl "sigs.k8s.io/controller-runtime"
@@ -97,6 +98,11 @@ func (h *appHandler) apply(ctx context.Context, ac *v1alpha2.ApplicationConfigur
ac.Spec.Components[i].ComponentName = ""
}
}
if comp.Spec.Helm != nil {
if err := h.applyHelmModuleResources(ctx, comp, owners); err != nil {
return errors.Wrap(err, "cannot apply Helm module resources")
}
}
}
if err := h.createOrUpdateAppConfig(ctx, ac); err != nil {
@@ -198,6 +204,10 @@ func (h *appHandler) createOrUpdateComponent(ctx context.Context, comp *v1alpha2
// object is persisted as Raw data after going through api server
updatedComp := comp.DeepCopy()
updatedComp.Spec.Workload.Object = nil
if updatedComp.Spec.Helm != nil {
updatedComp.Spec.Helm.Release.Object = nil
updatedComp.Spec.Helm.Repository.Object = nil
}
if len(preRevisionName) != 0 {
needNewRevision, err := utils.CompareWithRevision(ctx, h.r,
logging.NewLogrLogger(h.logger), compName, compNameSpace, preRevisionName, &updatedComp.Spec)
@@ -336,3 +346,28 @@ func (h *appHandler) createNewAppConfig(ctx context.Context, appConfig *v1alpha2
// it ok if the create failed, we will create again in the next loop
return h.r.Create(ctx, appConfig)
}
func (h *appHandler) applyHelmModuleResources(ctx context.Context, comp *v1alpha2.Component, owners []metav1.OwnerReference) error {
klog.Info("Process a Helm module component")
repo, err := oamutil.RawExtension2Unstructured(&comp.Spec.Helm.Repository)
if err != nil {
return err
}
release, err := oamutil.RawExtension2Unstructured(&comp.Spec.Helm.Release)
if err != nil {
return err
}
release.SetOwnerReferences(owners)
repo.SetOwnerReferences(owners)
if err := h.r.applicator.Apply(ctx, repo); err != nil {
return err
}
klog.InfoS("Apply a HelmRepository", "namespace", repo.GetNamespace(), "name", repo.GetName())
if err := h.r.applicator.Apply(ctx, release); err != nil {
return err
}
klog.InfoS("Apply a HelmRelease", "namespace", release.GetNamespace(), "name", release.GetName())
return nil
}

View File

@@ -29,7 +29,7 @@ import (
"github.com/crossplane/crossplane-runtime/pkg/resource"
"github.com/pkg/errors"
corev1 "k8s.io/api/core/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
kerrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/apimachinery/pkg/types"
@@ -39,6 +39,7 @@ import (
"sigs.k8s.io/controller-runtime/pkg/client"
"github.com/oam-dev/kubevela/apis/core.oam.dev/v1alpha2"
helmapi "github.com/oam-dev/kubevela/pkg/appfile/helm/flux2apis"
"github.com/oam-dev/kubevela/pkg/controller/common"
"github.com/oam-dev/kubevela/pkg/controller/utils"
"github.com/oam-dev/kubevela/pkg/oam"
@@ -177,6 +178,7 @@ func (r *components) renderComponent(ctx context.Context, acc v1alpha2.Applicati
if err != nil {
return nil, errors.Wrapf(err, errFmtRenderWorkload, acc.ComponentName)
}
compInfoLabels := map[string]string{
oam.LabelAppName: ac.Name,
oam.LabelAppComponent: acc.ComponentName,
@@ -225,23 +227,39 @@ func (r *components) renderComponent(ctx context.Context, acc v1alpha2.Applicati
return nil, err
}
} else {
// we have a completely different approach on workload name for application generated appConfig
revision, err := utils.ExtractRevision(acc.RevisionName)
if err != nil {
return nil, err
}
SetAppWorkloadInstanceName(acc.ComponentName, w, revision)
if isComponentRolling && needRolloutTemplate {
// we have a special logic to emit the workload as a template so that the rollout
// controller can take over.
// TODO: We might need to add the owner reference to the existing object in case the resource
// is going to be shared (ie. CloneSet)
if err := prepWorkloadInstanceForRollout(w); err != nil {
// we have completely different approaches on workload name for application generated appConfig
if c.Spec.Helm != nil {
// for helm workload, make sure the workload is already generated by Helm successfully
existingWorkloadByHelm, err := discoverHelmModuleWorkload(ctx, r.client, c, ac.GetNamespace())
if err != nil {
klog.ErrorS(err, "Could not get the workload created by Helm module",
"component name", acc.ComponentName, "component revision", acc.RevisionName)
return nil, errors.Wrap(err, "cannot get the workload created by a Helm module")
}
klog.InfoS("Successfully discovered the workload created by Helm",
"component name", acc.ComponentName, "component revision", acc.RevisionName,
"workload name", existingWorkloadByHelm.GetName())
// use the name already generated instead of setting a new one
w.SetName(existingWorkloadByHelm.GetName())
} else {
// for non-helm workload, we generate a workload name based on component name and revision
revision, err := utils.ExtractRevision(acc.RevisionName)
if err != nil {
return nil, err
}
// yield the controller to the rollout
ref.Controller = pointer.BoolPtr(false)
klog.InfoS("Successfully rendered a workload instance for rollout", "workload", w.GetName())
SetAppWorkloadInstanceName(acc.ComponentName, w, revision)
if isComponentRolling && needRolloutTemplate {
// we have a special logic to emit the workload as a template so that the rollout
// controller can take over.
// TODO: We might need to add the owner reference to the existing object in case the resource
// is going to be shared (ie. CloneSet)
if err := prepWorkloadInstanceForRollout(w); err != nil {
return nil, err
}
// yield the controller to the rollout
ref.Controller = pointer.BoolPtr(false)
klog.InfoS("Successfully rendered a workload instance for rollout", "workload", w.GetName())
}
}
}
// set the owner reference after its ref is edited
@@ -290,7 +308,7 @@ func (r *components) renderTrait(ctx context.Context, ct v1alpha2.ComponentTrait
}
traitDef, err := util.FetchTraitDefinition(ctx, r.client, r.dm, t)
if err != nil {
if !apierrors.IsNotFound(err) {
if !kerrors.IsNotFound(err) {
return nil, nil, errors.Wrapf(err, errFmtGetTraitDefinition, t.GetAPIVersion(), t.GetKind(), t.GetName())
}
traitDef = util.GetDummyTraitDefinition(t)
@@ -906,3 +924,58 @@ func (r *components) getExistingWorkload(ctx context.Context, ac *v1alpha2.Appli
}
return existingWorkload, nil
}
// discoverHelmModuleWorkload will get the workload created by flux/helm-controller
func discoverHelmModuleWorkload(ctx context.Context, c client.Reader, comp *v1alpha2.Component, ns string) (*unstructured.Unstructured, error) {
if comp == nil || comp.Spec.Helm == nil {
return nil, errors.New("the component has no valid helm module")
}
rls, err := util.RawExtension2Unstructured(&comp.Spec.Helm.Release)
if err != nil {
return nil, errors.Wrap(err, "cannot get helm release from component")
}
rlsName := rls.GetName()
chartName, ok, err := unstructured.NestedString(rls.Object, helmapi.HelmChartNamePath...)
if err != nil || !ok {
return nil, errors.New("cannot get helm chart name")
}
// qualifiedFullName is used as the name of target workload.
// It strictly follows the convention that Helm generate default full name as below:
// > We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
// > If release name contains chart name it will be used as a full name.
qualifiedWorkloadName := rlsName
if !strings.Contains(rlsName, chartName) {
qualifiedWorkloadName = fmt.Sprintf("%s-%s", rlsName, chartName)
if len(qualifiedWorkloadName) > 63 {
qualifiedWorkloadName = strings.TrimSuffix(qualifiedWorkloadName[:63], "-")
}
}
wl, err := util.RawExtension2Unstructured(&comp.Spec.Workload)
if err != nil {
return nil, errors.Wrap(err, "cannot get workload from component")
}
if err := c.Get(ctx, client.ObjectKey{Namespace: ns, Name: qualifiedWorkloadName}, wl); err != nil {
return nil, err
}
// check it's created by helm and match the release info
annots := wl.GetAnnotations()
labels := wl.GetLabels()
if annots == nil || labels == nil ||
annots["meta.helm.sh/release-name"] != rlsName ||
annots["meta.helm.sh/release-namespace"] != ns ||
labels["app.kubernetes.io/managed-by"] != "Helm" {
err := fmt.Errorf("the workload is found but not match with helm info(meta.helm.sh/release-name: %s, meta.helm.sh/namespace: %s, app.kubernetes.io/managed-by: Helm)",
rlsName, ns)
klog.ErrorS(err, "Found a name-matched workload but not managed by Helm", "name", qualifiedWorkloadName,
"annotations", annots, "labels", labels)
return nil, err
}
return wl, nil
}

View File

@@ -19,6 +19,7 @@ package applicationconfiguration
import (
"context"
"encoding/json"
"fmt"
"strconv"
"testing"
@@ -41,6 +42,7 @@ import (
core "github.com/oam-dev/kubevela/apis/core.oam.dev"
"github.com/oam-dev/kubevela/apis/core.oam.dev/v1alpha2"
helmapi "github.com/oam-dev/kubevela/pkg/appfile/helm/flux2apis"
"github.com/oam-dev/kubevela/pkg/oam"
"github.com/oam-dev/kubevela/pkg/oam/mock"
"github.com/oam-dev/kubevela/pkg/oam/util"
@@ -2144,3 +2146,128 @@ func TestMatchValue(t *testing.T) {
})
}
}
func TestDiscoverHelmModuleWorkload(t *testing.T) {
ns := "test-ns"
releaseName := "test-rls"
chartName := "test-chart"
release := &unstructured.Unstructured{}
release.SetGroupVersionKind(helmapi.HelmReleaseGVK)
release.SetName(releaseName)
unstructured.SetNestedMap(release.Object, map[string]interface{}{
"chart": map[string]interface{}{
"spec": map[string]interface{}{
"chart": chartName,
"version": "1.0.0",
},
},
}, "spec")
releaseRaw, _ := release.MarshalJSON()
rlsWithoutChart := release.DeepCopy()
unstructured.SetNestedMap(rlsWithoutChart.Object, nil, "spec", "chart")
rlsWithoutChartRaw, _ := rlsWithoutChart.MarshalJSON()
wl := &unstructured.Unstructured{}
wl.SetLabels(map[string]string{
"app.kubernetes.io/managed-by": "Helm",
})
wl.SetAnnotations(map[string]string{
"meta.helm.sh/release-name": releaseName,
"meta.helm.sh/release-namespace": ns,
})
tests := map[string]struct {
reason string
c client.Reader
helm *v1alpha2.Helm
workloadInComp *unstructured.Unstructured
wantWorkload *unstructured.Unstructured
wantErr error
}{
"CompHasNoHelm": {
reason: "An error should occur because component has no Helm module",
wantErr: errors.New("the component has no valid helm module"),
},
"CannotGetReleaseFromComp": {
reason: "An error should occur because cannot get release",
helm: &v1alpha2.Helm{
Release: runtime.RawExtension{Raw: []byte("boom")},
},
wantErr: errors.Wrap(errors.New("invalid character 'b' looking for beginning of value"),
"cannot get helm release from component"),
},
"CannotGetChartFromRelease": {
reason: "An error should occur because cannot get chart info",
helm: &v1alpha2.Helm{
Release: runtime.RawExtension{Raw: rlsWithoutChartRaw},
},
wantErr: errors.New("cannot get helm chart name"),
},
"CannotGetWLFromComp": {
reason: "An error should occur because cannot get workload from component",
helm: &v1alpha2.Helm{
Release: runtime.RawExtension{Raw: releaseRaw},
},
wantErr: errors.Wrap(errors.New("unexpected end of JSON input"),
"cannot get workload from component"),
},
"CannotGetWorkload": {
reason: "An error should occur because cannot get workload from k8s cluster",
helm: &v1alpha2.Helm{
Release: runtime.RawExtension{Raw: releaseRaw},
},
workloadInComp: &unstructured.Unstructured{},
c: &test.MockClient{MockGet: test.NewMockGetFn(errors.New("boom"))},
wantErr: errors.New("boom"),
},
"GetNotMatchedWorkload": {
reason: "An error should occur because the found workload is not managed by Helm",
helm: &v1alpha2.Helm{
Release: runtime.RawExtension{Raw: releaseRaw},
},
workloadInComp: &unstructured.Unstructured{},
c: &test.MockClient{MockGet: test.NewMockGetFn(nil, func(obj runtime.Object) error {
o, _ := obj.(*unstructured.Unstructured)
*o = unstructured.Unstructured{}
o.SetLabels(map[string]string{
"app.kubernetes.io/managed-by": "non-helm",
})
return nil
})},
wantErr: fmt.Errorf("the workload is found but not match with helm info(meta.helm.sh/release-name: %s, meta.helm.sh/namespace: %s, app.kubernetes.io/managed-by: Helm)", "test-rls", "test-ns"),
},
"DiscoverSuccessfully": {
reason: "No error should occur and the workload shoud be returned",
c: &test.MockClient{MockGet: test.NewMockGetFn(nil, func(obj runtime.Object) error {
o, _ := obj.(*unstructured.Unstructured)
*o = *wl.DeepCopy()
return nil
})},
workloadInComp: wl.DeepCopy(),
helm: &v1alpha2.Helm{
Release: runtime.RawExtension{Raw: releaseRaw},
},
wantWorkload: wl.DeepCopy(),
wantErr: nil,
},
}
for caseName, tc := range tests {
t.Run(caseName, func(t *testing.T) {
comp := &v1alpha2.Component{}
if tc.workloadInComp != nil {
wlRaw, _ := tc.workloadInComp.MarshalJSON()
comp.Spec.Workload = runtime.RawExtension{Raw: wlRaw}
}
comp.Spec.Helm = tc.helm
wl, err := discoverHelmModuleWorkload(context.Background(), tc.c, comp, ns)
if diff := cmp.Diff(tc.wantWorkload, wl); diff != "" {
t.Errorf("\n%s\ndiscoverHelmModuleWorkload(...)(...): -want object, +got object\n%s\n", tc.reason, diff)
}
if diff := cmp.Diff(tc.wantErr, err, test.EquateErrors()); diff != "" {
t.Errorf("\n%s\nApply(...): -want , +got \n%s\n", tc.reason, diff)
}
})
}
}

View File

@@ -12,9 +12,9 @@ import (
"github.com/crossplane/crossplane-runtime/pkg/fieldpath"
"github.com/crossplane/crossplane-runtime/pkg/logging"
mapset "github.com/deckarep/golang-set"
v12 "k8s.io/api/apps/v1"
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/errors"
appsv1 "k8s.io/api/apps/v1"
corev1 "k8s.io/api/core/v1"
kerrors "k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/apimachinery/pkg/util/intstr"
"k8s.io/apimachinery/pkg/util/wait"
@@ -70,7 +70,7 @@ func DiscoveryFromPodSpec(w *unstructured.Unstructured, fieldPath string) ([]int
if err != nil {
return nil, fmt.Errorf("discovery podSpec from %s in workload %v err %w", fieldPath, w.GetName(), err)
}
var spec v1.PodSpec
var spec corev1.PodSpec
err = json.Unmarshal(data, &spec)
if err != nil {
return nil, fmt.Errorf("discovery podSpec from %s in workload %v err %w", fieldPath, w.GetName(), err)
@@ -92,7 +92,7 @@ func DiscoveryFromPodTemplate(w *unstructured.Unstructured, fields ...string) ([
if err != nil {
return nil, nil, fmt.Errorf("workload %v convert object err %w", w.GetName(), err)
}
var spec v1.PodTemplateSpec
var spec corev1.PodTemplateSpec
err = json.Unmarshal(data, &spec)
if err != nil {
return nil, nil, fmt.Errorf("workload %v convert object to PodTemplate err %w", w.GetName(), err)
@@ -104,7 +104,7 @@ func DiscoveryFromPodTemplate(w *unstructured.Unstructured, fields ...string) ([
return ports, spec.Labels, nil
}
func getContainerPorts(cs []v1.Container) []intstr.IntOrString {
func getContainerPorts(cs []corev1.Container) []intstr.IntOrString {
var ports []intstr.IntOrString
// TODO(wonderflow): exclude some sidecars
for _, container := range cs {
@@ -198,11 +198,11 @@ func ExtractRevision(revisionName string) (int, error) {
// CompareWithRevision compares a component's spec with the component's latest revision content
func CompareWithRevision(ctx context.Context, c client.Client, logger logging.Logger, componentName, nameSpace,
latestRevision string, curCompSpec *v1alpha2.ComponentSpec) (bool, error) {
oldRev := &v12.ControllerRevision{}
oldRev := &appsv1.ControllerRevision{}
// retry on NotFound since we update the component last revision first
err := wait.ExponentialBackoff(retry.DefaultBackoff, func() (bool, error) {
err := c.Get(ctx, client.ObjectKey{Namespace: nameSpace, Name: latestRevision}, oldRev)
if err != nil && !errors.IsNotFound(err) {
if err != nil && !kerrors.IsNotFound(err) {
logger.Info(fmt.Sprintf("get old controllerRevision %s error %v",
latestRevision, err), "componentName", componentName)
return false, err

View File

@@ -21,6 +21,8 @@ type Template struct {
Health string
CustomStatus string
CapabilityCategory types.CapabilityCategory
Reference v1alpha2.DefinitionReference
Helm *v1alpha2.Helm
}
// GetScopeGVK Get ScopeDefinition
@@ -45,10 +47,6 @@ func LoadTemplate(ctx context.Context, cli client.Reader, key string, kd types.C
if err != nil {
return nil, errors.WithMessagef(err, "LoadTemplate [%s] ", key)
}
var capabilityCategory types.CapabilityCategory
if wd.Annotations["type"] == string(types.TerraformCategory) {
capabilityCategory = types.TerraformCategory
}
tmpl, err := NewTemplate(wd.Spec.Schematic, wd.Spec.Status, wd.Spec.Extension)
if err != nil {
return nil, errors.WithMessagef(err, "LoadTemplate [%s] ", key)
@@ -56,7 +54,10 @@ func LoadTemplate(ctx context.Context, cli client.Reader, key string, kd types.C
if tmpl == nil {
return nil, errors.New("no template found in definition")
}
tmpl.CapabilityCategory = capabilityCategory
tmpl.Reference = wd.Spec.Reference
if wd.Annotations["type"] == string(types.TerraformCategory) {
tmpl.CapabilityCategory = types.TerraformCategory
}
return tmpl, nil
case types.TypeTrait:
@@ -76,6 +77,7 @@ func LoadTemplate(ctx context.Context, cli client.Reader, key string, kd types.C
if tmpl == nil {
return nil, errors.New("no template found in definition")
}
tmpl.Reference = td.Spec.Reference
tmpl.CapabilityCategory = capabilityCategory
return tmpl, nil
case types.TypeScope:
@@ -84,16 +86,29 @@ func LoadTemplate(ctx context.Context, cli client.Reader, key string, kd types.C
return nil, fmt.Errorf("kind(%s) of %s not supported", kd, key)
}
// NewTemplate will create CUE template for inner AbstractEngine using.
// NewTemplate will create template for inner AbstractEngine using.
func NewTemplate(schematic *v1alpha2.Schematic, status *v1alpha2.Status, raw *runtime.RawExtension) (*Template, error) {
var template string
if schematic != nil && schematic.CUE != nil {
template = schematic.CUE.Template
tmp := &Template{}
if status != nil {
tmp.CustomStatus = status.CustomStatus
tmp.Health = status.HealthPolicy
}
if schematic != nil {
if schematic.CUE != nil {
tmp.TemplateStr = schematic.CUE.Template
// CUE module has highest priority
// no need to check other schematic types
return tmp, nil
}
if schematic.HELM != nil {
tmp.Helm = schematic.HELM
tmp.CapabilityCategory = types.HelmCategory
return tmp, nil
}
}
extension := map[string]interface{}{}
tmp := &Template{
TemplateStr: template,
}
if tmp.TemplateStr == "" && raw != nil {
if err := json.Unmarshal(raw.Raw, &extension); err != nil {
return nil, err
@@ -104,10 +119,6 @@ func NewTemplate(schematic *v1alpha2.Schematic, status *v1alpha2.Status, raw *ru
}
}
}
if status != nil {
tmp.CustomStatus = status.CustomStatus
tmp.Health = status.HealthPolicy
}
return tmp, nil
}

View File

@@ -446,9 +446,13 @@ var _ = Describe("Versioning mechanism of components", func() {
By("Create Component v2")
var comp2 v1alpha2.Component
Expect(readYaml("testdata/revision/comp-v2.yaml", &comp2)).Should(BeNil())
k8sClient.Get(ctx, client.ObjectKey{Namespace: namespace, Name: componentName}, &comp1)
comp2.ResourceVersion = comp1.ResourceVersion
Expect(k8sClient.Update(ctx, &comp2)).Should(Succeed())
Eventually(func() error {
tmp := &v1alpha2.Component{}
k8sClient.Get(ctx, client.ObjectKey{Namespace: namespace, Name: componentName}, tmp)
updatedComp := comp2.DeepCopy()
updatedComp.ResourceVersion = tmp.ResourceVersion
return k8sClient.Update(ctx, updatedComp)
}, 5*time.Second, time.Second).Should(Succeed())
By("Workload exist with revisionName v2")
var w2 unstructured.Unstructured

View File

@@ -0,0 +1,287 @@
package controllers_test
import (
"context"
"fmt"
"strings"
"time"
appsv1 "k8s.io/api/apps/v1"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"sigs.k8s.io/controller-runtime/pkg/client"
"github.com/oam-dev/kubevela/apis/core.oam.dev/v1alpha2"
"github.com/oam-dev/kubevela/pkg/oam/util"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
)
var _ = Describe("Test application containing helm module", func() {
ctx := context.Background()
var (
namespace = "helm-test-ns"
appName = "test-app"
compName = "test-comp"
wdName = "webapp-chart"
tdName = "virtualgroup"
)
var app v1alpha2.Application
var ns = corev1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: namespace}}
BeforeEach(func() {
Eventually(
func() error {
return k8sClient.Delete(ctx, &ns, client.PropagationPolicy(metav1.DeletePropagationForeground))
},
time.Second*120, time.Millisecond*500).Should(SatisfyAny(BeNil(), &util.NotFoundMatcher{}))
By("make sure all the resources are removed")
objectKey := client.ObjectKey{
Name: namespace,
}
Eventually(
func() error {
return k8sClient.Get(ctx, objectKey, &corev1.Namespace{})
},
time.Second*120, time.Millisecond*500).Should(&util.NotFoundMatcher{})
Eventually(
func() error {
return k8sClient.Create(ctx, &ns)
},
time.Second*3, time.Millisecond*300).Should(SatisfyAny(BeNil(), &util.AlreadyExistMatcher{}))
wd := v1alpha2.WorkloadDefinition{}
wd.SetName(wdName)
wd.SetNamespace(namespace)
wd.Spec.Reference = v1alpha2.DefinitionReference{Name: "deployments.apps", Version: "v1"}
wd.Spec.Schematic = &v1alpha2.Schematic{
HELM: &v1alpha2.Helm{
Release: util.Object2RawExtension(map[string]interface{}{
"chart": map[string]interface{}{
"spec": map[string]interface{}{
"chart": "podinfo",
"version": "5.1.4",
},
},
}),
Repository: util.Object2RawExtension(map[string]interface{}{
"url": "http://oam.dev/catalog/",
}),
},
}
Expect(k8sClient.Create(ctx, &wd)).Should(Succeed())
By("Install a patch trait used to test CUE module")
td := v1alpha2.TraitDefinition{}
td.SetName(tdName)
td.SetNamespace(namespace)
td.Spec.AppliesToWorkloads = []string{"deployments.apps"}
td.Spec.Schematic = &v1alpha2.Schematic{
CUE: &v1alpha2.CUE{
Template: `patch: {
spec: template: {
metadata: labels: {
if parameter.type == "namespace" {
"app.namespace.virtual.group": parameter.group
}
if parameter.type == "cluster" {
"app.cluster.virtual.group": parameter.group
}
}
}
}
parameter: {
group: *"default" | string
type: *"namespace" | string
}`,
},
}
Expect(k8sClient.Create(ctx, &td)).Should(Succeed())
By("Add 'deployments.apps' to scaler's appliesToWorkloads")
scalerTd := v1alpha2.TraitDefinition{}
Expect(k8sClient.Get(ctx, client.ObjectKey{Name: "scaler", Namespace: "vela-system"}, &scalerTd)).Should(Succeed())
scalerTd.Spec.AppliesToWorkloads = []string{"deployments.apps", "webservice", "worker"}
scalerTd.SetResourceVersion("")
Expect(k8sClient.Patch(ctx, &scalerTd, client.Merge)).Should(Succeed())
})
AfterEach(func() {
By("Clean up resources after a test")
k8sClient.DeleteAllOf(ctx, &v1alpha2.Application{}, client.InNamespace(namespace))
k8sClient.DeleteAllOf(ctx, &v1alpha2.WorkloadDefinition{}, client.InNamespace(namespace))
k8sClient.DeleteAllOf(ctx, &v1alpha2.TraitDefinition{}, client.InNamespace(namespace))
Expect(k8sClient.Delete(ctx, &ns, client.PropagationPolicy(metav1.DeletePropagationForeground))).Should(Succeed())
time.Sleep(15 * time.Second)
By("Remove 'deployments.apps' from scaler's appliesToWorkloads")
scalerTd := v1alpha2.TraitDefinition{}
Expect(k8sClient.Get(ctx, client.ObjectKey{Name: "scaler", Namespace: "vela-system"}, &scalerTd)).Should(Succeed())
scalerTd.Spec.AppliesToWorkloads = []string{"webservice", "worker"}
scalerTd.SetResourceVersion("")
Expect(k8sClient.Patch(ctx, &scalerTd, client.Merge)).Should(Succeed())
})
It("Test deploy an application containing helm module", func() {
app = v1alpha2.Application{
ObjectMeta: metav1.ObjectMeta{
Name: appName,
Namespace: namespace,
},
Spec: v1alpha2.ApplicationSpec{
Components: []v1alpha2.ApplicationComponent{
{
Name: compName,
WorkloadType: wdName,
Settings: util.Object2RawExtension(map[string]interface{}{
"image": map[string]interface{}{
"tag": "5.1.2",
},
}),
Traits: []v1alpha2.ApplicationTrait{
{
Name: "scaler",
Properties: util.Object2RawExtension(map[string]interface{}{
"replicas": 2,
}),
},
{
Name: tdName,
Properties: util.Object2RawExtension(map[string]interface{}{
"group": "my-group",
"type": "cluster",
}),
},
},
},
},
},
}
By("Create application")
Expect(k8sClient.Create(ctx, &app)).Should(Succeed())
ac := &v1alpha2.ApplicationConfiguration{}
acName := fmt.Sprintf("%s-v1", appName)
By("Verify the AppConfig is created successfully")
Eventually(func() error {
return k8sClient.Get(ctx, client.ObjectKey{Name: acName, Namespace: namespace}, ac)
}, 30*time.Second, time.Second).Should(Succeed())
By("Verify the workload(deployment) is created successfully by Helm")
deploy := &appsv1.Deployment{}
deployName := fmt.Sprintf("%s-%s-podinfo", appName, compName)
Eventually(func() error {
return k8sClient.Get(ctx, client.ObjectKey{Name: deployName, Namespace: namespace}, deploy)
}, 60*time.Second, 5*time.Second).Should(Succeed())
By("Veriify two traits are applied to the workload")
Eventually(func() bool {
acUpdate := ac.DeepCopy()
acUpdate.SetResourceVersion("")
acUpdate.SetAnnotations(map[string]string{
"app.oam.dev/requestreconcile": time.Now().String(),
})
// a workaround to trigger reconcile appconfig immediately
if err := k8sClient.Patch(ctx, acUpdate, client.Merge); err != nil {
By(err.Error())
return false
}
deploy := &appsv1.Deployment{}
if err := k8sClient.Get(ctx, client.ObjectKey{Name: deployName, Namespace: namespace}, deploy); err != nil {
return false
}
By("Verify patch trait is applied")
templateLabels := deploy.Spec.Template.Labels
if templateLabels["app.cluster.virtual.group"] != "my-group" {
return false
}
By("Verify scaler trait is applied")
if *deploy.Spec.Replicas != 2 {
return false
}
By("Verify application's settings override chart default values")
// the default value of 'image.tag' is 5.1.4 in the chart, but settings reset it to 5.1.2
return strings.HasSuffix(deploy.Spec.Template.Spec.Containers[0].Image, "5.1.2")
// it takes pretty long time to fetch chart and install the Helm release
}, 120*time.Second, 10*time.Second).Should(BeTrue())
By("Update the application")
app = v1alpha2.Application{
ObjectMeta: metav1.ObjectMeta{
Name: appName,
Namespace: namespace,
},
Spec: v1alpha2.ApplicationSpec{
Components: []v1alpha2.ApplicationComponent{
{
Name: compName,
WorkloadType: wdName,
Settings: util.Object2RawExtension(map[string]interface{}{
"image": map[string]interface{}{
"tag": "5.1.3", // change 5.1.4 => 5.1.3
},
}),
Traits: []v1alpha2.ApplicationTrait{
{
Name: "scaler",
Properties: util.Object2RawExtension(map[string]interface{}{
"replicas": 3, // change 2 => 3
}),
},
{
Name: tdName,
Properties: util.Object2RawExtension(map[string]interface{}{
"group": "my-group-0", // change my-group => my-group-0
"type": "cluster",
}),
},
},
},
},
},
}
Expect(k8sClient.Patch(ctx, &app, client.Merge)).Should(Succeed())
By("Verify the appconfig is updated")
deploy = &appsv1.Deployment{}
Eventually(func() bool {
ac = &v1alpha2.ApplicationConfiguration{}
if err := k8sClient.Get(ctx, client.ObjectKey{Name: acName, Namespace: namespace}, ac); err != nil {
return false
}
return ac.GetGeneration() == 2
}, 15*time.Second, 3*time.Second).Should(BeTrue())
By("Veriify the changes are applied to the workload")
Eventually(func() bool {
acUpdate := ac.DeepCopy()
acUpdate.SetResourceVersion("")
acUpdate.SetAnnotations(map[string]string{
"app.oam.dev/requestreconcile": time.Now().String(),
})
// a workaround to trigger reconcile appconfig immediately
if err := k8sClient.Patch(ctx, acUpdate, client.Merge); err != nil {
By(err.Error())
return false
}
deploy := &appsv1.Deployment{}
if err := k8sClient.Get(ctx, client.ObjectKey{Name: deployName, Namespace: namespace}, deploy); err != nil {
return false
}
By("Verify new patch trait is applied")
templateLabels := deploy.Spec.Template.Labels
if templateLabels["app.cluster.virtual.group"] != "my-group-0" {
return false
}
By("Verify new scaler trait is applied")
if *deploy.Spec.Replicas != 3 {
return false
}
By("Verify new application's settings override chart default values")
return strings.HasSuffix(deploy.Spec.Template.Spec.Containers[0].Image, "5.1.3")
}, 120*time.Second, 10*time.Second).Should(BeTrue())
})
})