Compare commits

..

2 Commits

Author SHA1 Message Date
wyike
89d8e37c7c disable rollout and deploy docs (#1860) 2021-07-01 13:38:05 +08:00
yangsoon
446f682f6c fix invalid metadata.labels error when specify the version of the trait in the app (#1855) (#1857)
* fix trait name
* add test
2021-06-29 19:36:53 +08:00
7 changed files with 210 additions and 508 deletions

View File

@@ -1,238 +0,0 @@
---
title: Advanced Rollout Plan
---
The rollout plan feature in KubeVela is essentially provided by `AppRollout` API.
## AppRollout
Below is an example for rolling update an application from v1 to v2 in three batches. The
first batch contains only 1 pod while the rest of the batches split the rest.
```yaml
apiVersion: core.oam.dev/v1beta1
kind: AppRollout
metadata:
name: rolling-example
spec:
sourceAppRevisionName: test-rolling-v1
targetAppRevisionName: test-rolling-v2
componentList:
- metrics-provider
rolloutPlan:
rolloutStrategy: "IncreaseFirst"
rolloutBatches:
- replicas: 1
- replicas: 50%
- replicas: 50%
batchPartition: 1
```
## Basic Usage
1. Deploy application
```yaml
apiVersion: core.oam.dev/v1beta1
kind: Application
metadata:
name: test-rolling
annotations:
"app.oam.dev/rolling-components": "metrics-provider"
"app.oam.dev/rollout-template": "true"
spec:
components:
- name: metrics-provider
type: worker
properties:
cmd:
- ./podinfo
- stress-cpu=1
image: stefanprodan/podinfo:4.0.6
port: 8080
replicas: 5
```
Verify AppRevision `test-rolling-v1` have generated
```shell
$ kubectl get apprev test-rolling-v1
NAME AGE
test-rolling-v1 9s
```
2. Attach the following rollout plan to upgrade the application to v1
```yaml
apiVersion: core.oam.dev/v1beta1
kind: AppRollout
metadata:
name: rolling-example
spec:
# application (revision) reference
targetAppRevisionName: test-rolling-v1
componentList:
- metrics-provider
rolloutPlan:
rolloutStrategy: "IncreaseFirst"
rolloutBatches:
- replicas: 10%
- replicas: 40%
- replicas: 50%
targetSize: 5
```
Use can check the status of the ApplicationRollout and wait for the rollout to complete.
3. User can continue to modify the application image tag and apply.This will generate new AppRevision `test-rolling-v2`
```yaml
apiVersion: core.oam.dev/v1beta1
kind: Application
metadata:
name: test-rolling
annotations:
"app.oam.dev/rolling-components": "metrics-provider"
"app.oam.dev/rollout-template": "true"
spec:
components:
- name: metrics-provider
type: worker
properties:
cmd:
- ./podinfo
- stress-cpu=1
image: stefanprodan/podinfo:5.0.2
port: 8080
replicas: 5
```
Verify AppRevision `test-rolling-v2` have generated
```shell
$ kubectl get apprev test-rolling-v2
NAME AGE
test-rolling-v2 7s
```
4. Apply the application rollout that upgrade the application from v1 to v2
```yaml
apiVersion: core.oam.dev/v1beta1
kind: AppRollout
metadata:
name: rolling-example
spec:
# application (revision) reference
sourceAppRevisionName: test-rolling-v1
targetAppRevisionName: test-rolling-v2
componentList:
- metrics-provider
rolloutPlan:
rolloutStrategy: "IncreaseFirst"
rolloutBatches:
- replicas: 1
- replicas: 2
- replicas: 2
```
User can check the status of the ApplicationRollout and see the rollout completes, and the
ApplicationRollout's "Rolling State" becomes `rolloutSucceed`
## Advanced Usage
Using `AppRollout` separately can enable some advanced use case.
### Revert
5. Apply the application rollout that revert the application from v2 to v1
```yaml
apiVersion: core.oam.dev/v1beta1
kind: AppRollout
metadata:
name: rolling-example
spec:
# application (revision) reference
sourceAppRevisionName: test-rolling-v2
targetAppRevisionName: test-rolling-v1
componentList:
- metrics-provider
rolloutPlan:
rolloutStrategy: "IncreaseFirst"
rolloutBatches:
- replicas: 1
- replicas: 2
- replicas: 2
```
### Skip Revision Rollout
6. User can apply this yaml continue to modify the application image tag.This will generate new AppRevision `test-rolling-v3`
```yaml
apiVersion: core.oam.dev/v1beta1
kind: Application
metadata:
name: test-rolling
annotations:
"app.oam.dev/rolling-components": "metrics-provider"
"app.oam.dev/rollout-template": "true"
spec:
components:
- name: metrics-provider
type: worker
properties:
cmd:
- ./podinfo
- stress-cpu=1
image: stefanprodan/podinfo:5.2.0
port: 8080
replicas: 5
```
Verify AppRevision `test-rolling-v3` have generated
```shell
$ kubectl get apprev test-rolling-v3
NAME AGE
test-rolling-v3 7s
```
7. Apply the application rollout that rollout the application from v1 to v3
```yaml
apiVersion: core.oam.dev/v1beta1
kind: AppRollout
metadata:
name: rolling-example
spec:
# application (revision) reference
sourceAppRevisionName: test-rolling-v1
targetAppRevisionName: test-rolling-v3
componentList:
- metrics-provider
rolloutPlan:
rolloutStrategy: "IncreaseFirst"
rolloutBatches:
- replicas: 1
- replicas: 2
- replicas: 2
```
## More Details About `AppRollout`
### Design Principles and Goals
There are several attempts at solving rollout problem in the cloud native community. However, none
of them provide a true rolling style upgrade. For example, flagger supports Blue/Green, Canary
and A/B testing. Therefore, we decide to add support for batch based rolling upgrade as
our first style to support in KubeVela.
We design KubeVela rollout solutions with the following principles in mind
- First, we want all flavors of rollout controllers share the same core rollout
related logic. The trait and application related logic can be easily encapsulated into its own
package.
- Second, the core rollout related logic is easily extensible to support different type of
workloads, i.e. Deployment, CloneSet, Statefulset, DaemonSet or even customized workloads.
- Thirdly, the core rollout related logic has a well documented state machine that
does state transition explicitly.
- Finally, the controllers can support all the rollout/upgrade needs of an application running
in a production environment including Blue/Green, Canary and A/B testing.
### State Transition
Here is the high level state transition graph
![](../../resources/approllout-status-transition.jpg)
### Roadmap
Our recent roadmap for rollout plan is [here](./roadmap).

View File

@@ -1,230 +0,0 @@
---
title: Placement
---
## Introduction
In this section, we will introduce how to use KubeVela to place application across multiple clusters with traffic management enabled. For traffic management, KubeVela currently allows you to split the traffic onto both the old and new revisions during rolling update and verify the new version while preserving service availability.
### AppDeployment
The `AppDeployment` API in KubeVela is provided to satisfy such requirements. Here's an overview of the API:
```yaml
apiVersion: core.oam.dev/v1beta1
kind: AppDeployment
metadata:
name: sample-appdeploy
spec:
traffic:
hosts:
- example.com
http:
- match:
# match any requests to 'example.com/example-app'
- uri:
prefix: "/example-app"
# split traffic 50/50 on v1/v2 versions of the app
weightedTargets:
- revisionName: example-app-v1
componentName: testsvc
port: 80
weight: 50
- revisionName: example-app-v2
componentName: testsvc
port: 80
weight: 50
appRevisions:
- # Name of the AppRevision.
# Each modification to Application would generate a new AppRevision.
revisionName: example-app-v1
# Cluster specific workload placement config
placement:
- clusterSelector:
# You can select Clusters by name or labels.
# If multiple clusters is selected, one will be picked via a unique hashing algorithm.
labels:
tier: production
name: prod-cluster-1
distribution:
replicas: 5
- # If no clusterSelector is given, it will use the host cluster in which this CR exists
distribution:
replicas: 5
- revisionName: example-app-v2
placement:
- clusterSelector:
labels:
tier: production
name: prod-cluster-1
distribution:
replicas: 5
- distribution:
replicas: 5
```
### Cluster
The clusters selected in the `placement` part from above is defined in Cluster CRD. Here's what it looks like:
```yaml
apiVersion: core.oam.dev/v1beta1
kind: Cluster
metadata:
name: prod-cluster-1
labels:
tier: production
spec:
kubeconfigSecretRef:
name: kubeconfig-cluster-1 # the secret name
```
The secret must contain the kubeconfig credentials in `config` field:
```yaml
apiVersion: v1
kind: Secret
metadata:
name: kubeconfig-cluster-1
data:
config: ... # kubeconfig data
```
## Quickstart
Here's a step-by-step tutorial for you to try out. All of the yaml files are from [`docs/examples/appdeployment/`](https://github.com/oam-dev/kubevela/tree/master/docs/examples/appdeployment).
You must run all commands in that directory.
1. Create an Application
```bash
$ cat <<EOF | kubectl apply -f -
apiVersion: core.oam.dev/v1beta1
kind: Application
metadata:
name: example-app
annotations:
app.oam.dev/revision-only: "true"
spec:
components:
- name: testsvc
type: webservice
properties:
addRevisionLabel: true
image: crccheck/hello-world
port: 8000
EOF
```
This will create `example-app-v1` AppRevision. Check it:
```bash
$ kubectl get applicationrevisions.core.oam.dev
NAME AGE
example-app-v1 116s
```
> Note: with `app.oam.dev/revision-only: "true"` annotation, above `Application` resource won't create any pod instances and leave the real deployment process to `AppDeployment`.
1. Then use the above AppRevision to create an AppDeployment.
```bash
$ kubectl apply -f appdeployment-1.yaml
```
> Note: in order to AppDeployment to work, your workload object must have a `spec.replicas` field for scaling.
1. Now you can check that there will 1 deployment and 2 pod instances deployed
```bash
$ kubectl get deploy
NAME READY UP-TO-DATE AVAILABLE AGE
testsvc-v1 2/2 2 0 27s
```
1. Update Application properties:
```bash
$ cat <<EOF | kubectl apply -f -
apiVersion: core.oam.dev/v1beta1
kind: Application
metadata:
name: example-app
annotations:
app.oam.dev/revision-only: "true"
spec:
components:
- name: testsvc
type: webservice
properties:
addRevisionLabel: true
image: nginx
port: 80
EOF
```
This will create a new `example-app-v2` AppRevision. Check it:
```bash
$ kubectl get applicationrevisions.core.oam.dev
NAME
example-app-v1
example-app-v2
```
1. Then use the two AppRevisions to update the AppDeployment:
```bash
$ kubectl apply -f appdeployment-2.yaml
```
(Optional) If you have Istio installed, you can apply the AppDeployment with traffic split:
```bash
# set up gateway if not yet
$ kubectl apply -f gateway.yaml
$ kubectl apply -f appdeployment-2-traffic.yaml
```
Note that for traffic split to work, your must set the following pod labels in workload cue templates (see [webservice.cue](https://github.com/oam-dev/kubevela/blob/master/hack/vela-templates/cue/webservice.cue)):
```shell
"app.oam.dev/component": context.name
"app.oam.dev/appRevision": context.appRevision
```
1. Now you can check that there will 1 deployment and 1 pod per revision.
```bash
$ kubectl get deploy
NAME READY UP-TO-DATE AVAILABLE AGE
testsvc-v1 1/1 1 1 2m14s
testsvc-v2 1/1 1 1 8s
```
(Optional) To verify traffic split:
```bash
# run this in another terminal
$ kubectl -n istio-system port-forward service/istio-ingressgateway 8080:80
Forwarding from 127.0.0.1:8080 -> 8080
Forwarding from [::1]:8080 -> 8080
# The command should return pages of either docker whale or nginx in 50/50
$ curl -H "Host: example-app.example.com" http://localhost:8080/
```
1. Cleanup:
```bash
kubectl delete appdeployments.core.oam.dev --all
kubectl delete applications.core.oam.dev --all
```

View File

@@ -62,10 +62,5 @@ spec:
User can check the status of the application and see the rollout completes, and the
application's `status.rollout.rollingState` becomes `rolloutSucceed`.
## Advanced Usage
If you want to control and rollout the specific application revisions, or do revert, please refer to [Advanced Usage](advanced-rollout) to learn more details.

View File

@@ -62,4 +62,4 @@ Here are some recommended next steps:
- Learn KubeVela's [core concepts](./concepts)
- Learn more details about [`Application`](end-user/application) and what it can do for you.
- Learn how to attach [rollout plan](end-user/scopes/rollout-plan) to this application, or [place it to multiple runtime clusters](end-user/scopes/appdeploy).
- Learn how to attach [rollout plan](end-user/scopes/rollout-plan) to this application.

View File

@@ -44,7 +44,6 @@ module.exports = {
'end-user/traits/more',
]
},
'end-user/scopes/appdeploy',
'end-user/scopes/rollout-plan',
{
'Observability': [

View File

@@ -180,15 +180,20 @@ func (p *Parser) parseTrait(ctx context.Context, name string, properties map[str
if err != nil {
return nil, err
}
traitName, err := util.ConvertDefinitionRevName(name)
if err != nil {
traitName = name
}
return &Trait{
Name: name,
Name: traitName,
CapabilityCategory: templ.CapabilityCategory,
Params: properties,
Template: templ.TemplateStr,
HealthCheckPolicy: templ.Health,
CustomStatusFormat: templ.CustomStatus,
FullTemplate: templ,
engine: definition.NewTraitAbstractEngine(name, p.pd),
engine: definition.NewTraitAbstractEngine(traitName, p.pd),
}, nil
}

View File

@@ -83,6 +83,35 @@ var _ = Describe("Test application of the specified definition version", func()
return nil
}, 40*time.Second, time.Second).Should(BeNil())
labelV2DefRev := new(v1beta1.DefinitionRevision)
Eventually(func() error {
return k8sClient.Get(ctx, client.ObjectKey{Name: "label-v2", Namespace: namespace}, labelV2DefRev)
}, 15*time.Second, time.Second).Should(BeNil())
webserviceV1 := webServiceWithNoTemplate.DeepCopy()
webserviceV1.Spec.Schematic.CUE.Template = webServiceV1Template
webserviceV1.SetNamespace(namespace)
Expect(k8sClient.Create(ctx, webserviceV1)).Should(Succeed())
webserviceV1DefRev := new(v1beta1.DefinitionRevision)
Eventually(func() error {
return k8sClient.Get(ctx, client.ObjectKey{Name: "webservice-v1", Namespace: namespace}, webserviceV1DefRev)
}, 15*time.Second, time.Second).Should(BeNil())
webserviceV2 := new(v1beta1.ComponentDefinition)
Eventually(func() error {
err := k8sClient.Get(ctx, client.ObjectKey{Name: "webservice", Namespace: namespace}, webserviceV2)
if err != nil {
return err
}
webserviceV2.Spec.Schematic.CUE.Template = webServiceV2Template
return k8sClient.Update(ctx, webserviceV2)
}, 15*time.Second, time.Second).Should(BeNil())
webserviceV2DefRev := new(v1beta1.DefinitionRevision)
Eventually(func() error {
return k8sClient.Get(ctx, client.ObjectKey{Name: "webservice-v2", Namespace: namespace}, webserviceV2DefRev)
}, 15*time.Second, time.Second).Should(BeNil())
})
AfterEach(func() {
@@ -146,37 +175,6 @@ var _ = Describe("Test application of the specified definition version", func()
return nil
}, 40*time.Second, time.Second).Should(BeNil())
webserviceV1 := webServiceWithNoTemplate.DeepCopy()
webserviceV1.Spec.Schematic.CUE.Template = webServiceV1Template
webserviceV1.SetNamespace(namespace)
Expect(k8sClient.Create(ctx, webserviceV1)).Should(Succeed())
Eventually(func() error {
err := k8sClient.Get(ctx, client.ObjectKey{Name: "webservice", Namespace: namespace}, webserviceV1)
if err != nil {
return err
}
webserviceV1.Spec.Schematic.CUE.Template = webServiceV2Template
return k8sClient.Update(ctx, webserviceV1)
}, 15*time.Second, time.Second).Should(BeNil())
webserviceDefRevList := new(v1beta1.DefinitionRevisionList)
webserviceDefRevListOpts := []client.ListOption{
client.InNamespace(namespace),
client.MatchingLabels{
oam.LabelComponentDefinitionName: "webservice",
},
}
Eventually(func() error {
err := k8sClient.List(ctx, webserviceDefRevList, webserviceDefRevListOpts...)
if err != nil {
return err
}
if len(webserviceDefRevList.Items) != 2 {
return fmt.Errorf("error defRevison number wants %d, actually %d", 2, len(webserviceDefRevList.Items))
}
return nil
}, 40*time.Second, time.Second).Should(BeNil())
app := v1beta1.Application{
ObjectMeta: metav1.ObjectMeta{
Name: appName,
@@ -709,6 +707,91 @@ var _ = Describe("Test application of the specified definition version", func()
Expect(k8sClient.Patch(ctx, &app, client.Merge)).Should(HaveOccurred())
})
// refer to https://github.com/oam-dev/kubevela/discussions/1810#discussioncomment-914295
It("Test k8s resources created by application whether with correct label", func() {
var (
appName = "test-resources-labels"
compName = "web"
)
exposeV1 := exposeWithNoTemplate.DeepCopy()
exposeV1.Spec.Schematic.CUE.Template = exposeV1Template
exposeV1.SetNamespace(namespace)
Expect(k8sClient.Create(ctx, exposeV1)).Should(Succeed())
exposeV1DefRev := new(v1beta1.DefinitionRevision)
Eventually(func() error {
return k8sClient.Get(ctx, client.ObjectKey{Name: "expose-v1", Namespace: namespace}, exposeV1DefRev)
}, 15*time.Second, time.Second).Should(BeNil())
exposeV2 := new(v1beta1.TraitDefinition)
Eventually(func() error {
err := k8sClient.Get(ctx, client.ObjectKey{Name: "expose", Namespace: namespace}, exposeV2)
if err != nil {
return err
}
exposeV2.Spec.Schematic.CUE.Template = exposeV2Templae
return k8sClient.Update(ctx, exposeV2)
}, 15*time.Second, time.Second).Should(BeNil())
exposeV2DefRev := new(v1beta1.DefinitionRevision)
Eventually(func() error {
return k8sClient.Get(ctx, client.ObjectKey{Name: "expose-v2", Namespace: namespace}, exposeV2DefRev)
}, 15*time.Second, time.Second).Should(BeNil())
app := v1beta1.Application{
ObjectMeta: metav1.ObjectMeta{
Name: appName,
Namespace: namespace,
},
Spec: v1beta1.ApplicationSpec{
Components: []v1beta1.ApplicationComponent{
{
Name: compName,
Type: "webservice@v1",
Properties: util.Object2RawExtension(map[string]interface{}{
"image": "crccheck/hello-world",
"port": 8000,
}),
Traits: []v1beta1.ApplicationTrait{
{
Type: "expose@v1",
Properties: util.Object2RawExtension(map[string]interface{}{
"port": []int{8000},
}),
},
},
},
},
},
}
By("Create application")
Eventually(func() error {
return k8sClient.Create(ctx, app.DeepCopy())
}, 10*time.Second, 500*time.Millisecond).Should(Succeed())
By("Verify the workload(deployment) is created successfully")
webServiceDeploy := &appsv1.Deployment{}
deployName := compName
Eventually(func() error {
return k8sClient.Get(ctx, client.ObjectKey{Name: deployName, Namespace: namespace}, webServiceDeploy)
}, 30*time.Second, 3*time.Second).Should(Succeed())
By("Verify the workload label generated by KubeVela")
workloadLabel := webServiceDeploy.GetLabels()[oam.WorkloadTypeLabel]
Expect(workloadLabel).Should(Equal("webservice-v1"))
By("Verify the trait(service) is created successfully")
exposeSVC := &corev1.Service{}
Eventually(func() error {
return k8sClient.Get(ctx, client.ObjectKey{Name: compName, Namespace: namespace}, exposeSVC)
}, 30*time.Second, 3*time.Second).Should(Succeed())
By("Verify the trait label generated by KubeVela")
traitLabel := exposeSVC.GetLabels()[oam.TraitTypeLabel]
Expect(traitLabel).Should(Equal("expose-v1"))
})
})
var webServiceWithNoTemplate = &v1beta1.ComponentDefinition{
@@ -788,6 +871,23 @@ var labelWithNoTemplate = &v1beta1.TraitDefinition{
},
}
var exposeWithNoTemplate = &v1beta1.TraitDefinition{
TypeMeta: metav1.TypeMeta{
Kind: "TraitDefinition",
APIVersion: "core.oam.dev/v1beta1",
},
ObjectMeta: metav1.ObjectMeta{
Name: "expose",
},
Spec: v1beta1.TraitDefinitionSpec{
Schematic: &common.Schematic{
CUE: &common.CUE{
Template: "",
},
},
},
}
func generateTemplate(template string) runtime.RawExtension {
b, _ := yaml.YAMLToJSON([]byte(template))
return runtime.RawExtension{Raw: b}
@@ -1032,3 +1132,74 @@ spec:
- "sleep"
- "1000"
`
var exposeV1Template = `
outputs: service: {
apiVersion: "v1"
kind: "Service"
metadata:
name: context.name
spec: {
selector:
"app.oam.dev/component": context.name
ports: [
for p in parameter.port {
port: p
targetPort: p
},
]
}
}
parameter: {
// +usage=Specify the exposion ports
port: [...int]
}
`
var exposeV2Templae = `
outputs: service: {
apiVersion: "v1"
kind: "Service"
metadata:
name: context.name
spec: {
selector: {
"app.oam.dev/component": context.name
}
ports: [
for k, v in parameter.http {
port: v
targetPort: v
},
]
}
}
outputs: ingress: {
apiVersion: "networking.k8s.io/v1beta1"
kind: "Ingress"
metadata:
name: context.name
spec: {
rules: [{
host: parameter.domain
http: {
paths: [
for k, v in parameter.http {
path: k
backend: {
serviceName: context.name
servicePort: v
}
},
]
}
}]
}
}
parameter: {
domain: string
http: [string]: int
}
`