Compare commits

..

20 Commits

Author SHA1 Message Date
Stupig
1444376b0c newline is missing for BaseContextFile (#1396) 2021-04-01 12:07:27 +08:00
yangsoon
04486f89bb fix crd: add additional print column and short Name for CRD (#1377)
* add additional print column

* add shortname

* fix printcolumn & test
2021-04-01 11:51:43 +08:00
Zheng Xi Zhou
d6d19a7c5a Remove outputSecretName in cloud service provisoing and consuming (#1393)
Refer to the reason https://github.com/oam-dev/kubevela/issues/1128#issuecomment-811590631
2021-04-01 11:13:19 +08:00
Jianbo Sun
6b8875012d use shell instead action to sync website && don't sync index.js and config.js (#1386)
* use shell instead action to sync website && don't sync index.js and config.js

* update test website build

* use bash instead sh
2021-04-01 10:06:34 +08:00
joelhy
7ae6a148a0 Fix grammar error: changed "an success story" to "a success story" (#1385) 2021-03-31 23:33:00 +08:00
Jianbo Sun
bbea29c8e5 fix kube-webhook-certgen to patch CRD conversation && remove cert-manager in CI e2e test && remove issuer create from CLI env (#1267)
* remove cert-manager in CI e2e test and remove issuer create from CLI env

* update job patch image(https://github.com/wonderflow/kube-webhook-certgen)
2021-03-31 21:21:28 +08:00
Robin Brämer
6e6c4c76a6 fix docs typo (#1384) 2021-03-31 19:56:36 +08:00
wyike
1bf673c3c1 upgrade webhook configuration to v1 (#1383) 2021-03-31 19:56:09 +08:00
yangsoon
352fe1ba5b remove useless script & update install.mdx (#1382) 2021-03-31 18:40:34 +08:00
Yue Wang
3bdf7a044f fix generate schema for helm values (#1375)
Signed-off-by: roy wang <seiwy2010@gmail.com>
2021-03-31 17:31:54 +08:00
yangsoon
3cb9fa79bf fix index.js (#1380) 2021-03-31 16:01:53 +08:00
wyike
20f6e0ab02 skip check cross ns resource owner, add tests (#1374) 2021-03-31 15:00:16 +08:00
Kinso
438ab96f95 fix: add "system-definition-namespace" flag to container args (#1371)
Co-authored-by: kinsolee <lijingzhao@forchange.tech>
2021-03-31 14:41:07 +08:00
yangsoon
b5bf7a4f62 fix siderbar (#1373) 2021-03-31 14:39:09 +08:00
Lei Zhang (Harry)
d473e41f20 Update readme and doc (#1376) 2021-03-31 14:38:46 +08:00
Jianbo Sun
96c1d0786a the application context should not own application object (#1370)
* the application context should not own application object

* add time for flaky test
2021-03-31 13:55:57 +08:00
yangsoon
f7196e10ca fix upgrade in install.mdx (#1364) 2021-03-30 21:37:48 +08:00
yangsoon
a2997a070d fix vela show (#1366) 2021-03-30 21:36:28 +08:00
Ryan Zhang
0629049e1f add initial finalizer and abandon support (#1362)
* add initial finalizer and abandon support

* fix lint
2021-03-30 02:04:38 -07:00
yangsoon
553b5d19eb add docs readme (#1359) 2021-03-30 16:37:46 +08:00
89 changed files with 1673 additions and 1011 deletions

View File

@@ -15,5 +15,7 @@ jobs:
with:
node-version: '12.x'
- name: Test Build
env:
VERSION: ${{ github.ref }}
run: |
sh ./hack/website/test-build.sh
bash ./hack/website/test-build.sh

View File

@@ -11,11 +11,13 @@ jobs:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v1
- uses: actions/setup-node@v1
with:
node-version: '12.x'
- name: Sync to kubevela.io Repo
uses: wonderflow/auto-docs-action@v0.2.3
env:
SSH_PRIVATE_KEY: ${{ secrets.GH_PAGES_DEPLOY }}
SSH_DEPLOY_KEY: ${{ secrets.GH_PAGES_DEPLOY }}
VERSION: ${{ github.ref }}
COMMIT_ID: ${{ github.sha }}
with:
gh-page: git@github.com:oam-dev/kubevela.io.git
run: |
bash ./hack/website/release.sh

3
.gitignore vendored
View File

@@ -55,3 +55,6 @@ references/dashboard/src/.umi-production/
# Swagger: generate Restful API
references/apiserver/docs/index.html
# check docs
git-page/

View File

@@ -18,7 +18,7 @@ contributing to `kubevela` or build a PoC (Proof of Concept).
3. ginkgo 1.14.0+ (just for [E2E test](./CONTRIBUTING.md#e2e-test))
4. golangci-lint 1.31.0+, it will install automatically if you run `make`, you can [install it manually](https://golangci-lint.run/usage/install/#local-installation) if the installation is too slow.
We also recommend you to learn about KubeVela's [design](https://kubevela.io/#/en/concepts) before dive into its code.
We also recommend you to learn about KubeVela's [design](https://kubevela.io/docs/concepts) before dive into its code.
### Build
@@ -86,7 +86,7 @@ helm uninstall -n vela-system kubevela
### Use
You can try use your local built binaries follow [the documentation](https://kubevela.io/#/en/quick-start).
You can try use your local built binaries follow [the documentation](https://kubevela.io/docs/quick-start).
## Testing
@@ -110,6 +110,22 @@ Start to test.
make e2e-test
```
### Contribute Docs
Please read [the documentation](https://github.com/oam-dev/kubevela/tree/master/docs/README.md) before contributing to the docs.
- Build docs
```shell script
make docs-build
```
- Local development and preview
```shell script
make docs-start
```
## Make a pull request
Remember to write unit-test and e2e-test after you have finished your code.

View File

@@ -62,6 +62,22 @@ doc-gen:
go run hack/docgen/gen.go
go run hack/references/generate.go
docs-build:
ifneq ($(wildcard git-page),)
rm -rf git-page
endif
sh ./hack/website/test-build.sh
docs-start:
ifeq ($(wildcard git-page),)
git clone --single-branch --depth 1 https://github.com/oam-dev/kubevela.io.git git-page
endif
rm -r git-page/docs && rm -r git-page/resources
rm git-page/sidebars.js
cat docs/sidebars.js > git-page/sidebars.js
cp -R docs/en git-page/docs && cp -R docs/resources git-page/resources
cd git-page && yarn install && yarn start
api-gen:
swag init -g references/apiserver/route.go --output references/apiserver/docs
swagger-codegen generate -l html2 -i references/apiserver/docs/swagger.yaml -o references/apiserver/docs
@@ -126,10 +142,8 @@ docker-push:
e2e-setup:
helm install --create-namespace -n flux-system helm-flux http://oam.dev/catalog/helm-flux2-0.1.0.tgz
helm install kruise https://github.com/openkruise/kruise/releases/download/v0.7.0/kruise-chart.tgz
helm repo add jetstack https://charts.jetstack.io
helm repo update
helm upgrade --install --create-namespace --namespace cert-manager cert-manager jetstack/cert-manager --version v1.2.0 --set installCRDs=true --wait
helm upgrade --install --create-namespace --namespace vela-system --set image.pullPolicy=IfNotPresent --set admissionWebhooks.certManager.enabled=true --set image.repository=vela-core-test --set image.tag=$(GIT_COMMIT) --wait kubevela ./charts/vela-core
helm upgrade --install --create-namespace --namespace vela-system --set image.pullPolicy=IfNotPresent --set image.repository=vela-core-test --set image.tag=$(GIT_COMMIT) --wait kubevela ./charts/vela-core
ginkgo version
ginkgo -v -r e2e/setup
kubectl wait --for=condition=Ready pod -l app.kubernetes.io/name=vela-core,app.kubernetes.io/instance=kubevela -n vela-system --timeout=600s

View File

@@ -14,7 +14,7 @@
# KubeVela
KubeVela is the platform engine to create *PaaS-like* experience on Kubernetes, in a scalable approach.
KubeVela is the platform engine to create *developer-centric* experience on Kubernetes, in a scalable approach.
## Community
@@ -22,21 +22,19 @@ KubeVela is the platform engine to create *PaaS-like* experience on Kubernetes,
- Gitter: [Discussion](https://gitter.im/oam-dev/community)
- Bi-weekly Community Call: [Meeting Notes](https://docs.google.com/document/d/1nqdFEyULekyksFHtFvgvFAYE-0AMHKoS3RMnaKsarjs)
> NOTE: KubeVela is still iterating quickly. It's currently under pre-beta release.
## What problems does it solve?
Building **developer-centric platforms** with Kubernetes requires higher level primitives which is out-of-scope of Kubernetes itself. Hence, we platform teams build abstractions.
However, great in flexibility and extensibility, the existing solutions such as IaC (Infrastructure-as-Code) and client-side templating tools all lead to ***Configuration Drift*** (i.e. the generated instances are not in line with the expected configuration) which is a nightmare in production.
KubeVela allows platform teams to create developer-centric abstractions with IaC but maintain them with the battle tested [Kubernetes Control Loop](https://kubernetes.io/docs/concepts/architecture/controller/). Think about a plug-in that turns your Kubernetes cluster into a *"PaaS"* via abstractions designed by yourself.
KubeVela allows platform teams to create developer-centric abstractions with IaC but maintain them with the battle tested [Kubernetes Control Loop](https://kubernetes.io/docs/concepts/architecture/controller/). Think about a plug-in that turns your Kubernetes cluster into a *Heroku* via abstractions designed by yourself.
## Getting Started
- [Installation](https://kubevela.io/#/en/install)
- [Quick start](https://kubevela.io/#/en/quick-start)
- [How it works](https://kubevela.io/#/en/concepts)
- [Installation](https://kubevela.io/docs/install)
- [Quick start](https://kubevela.io/docs/quick-start)
- [How it works](https://kubevela.io/docs/concepts)
## Features

View File

@@ -88,8 +88,14 @@ type ApplicationSpec struct {
// Application is the Schema for the applications API
// +kubebuilder:object:root=true
// +kubebuilder:resource:categories={oam}
// +kubebuilder:resource:categories={oam},shortName=apps
// +kubebuilder:subresource:status
// +kubebuilder:printcolumn:name="COMPONENT",type=string,JSONPath=`.spec.components[*].name`
// +kubebuilder:printcolumn:name="TYPE",type=string,JSONPath=`.spec.components[*].type`
// +kubebuilder:printcolumn:name="PHASE",type=string,JSONPath=`.status.status`
// +kubebuilder:printcolumn:name="HEALTHY",type=boolean,JSONPath=`.status.services[*].healthy`
// +kubebuilder:printcolumn:name="STATUS",type=string,JSONPath=`.status.services[*].message`
// +kubebuilder:printcolumn:name="AGE",type=date,JSONPath=".metadata.creationTimestamp"
type Application struct {
metav1.TypeMeta `json:",inline"`
metav1.ObjectMeta `json:"metadata,omitempty"`

View File

@@ -55,7 +55,8 @@ type ApplicationRevisionSpec struct {
// ApplicationRevision is the Schema for the ApplicationRevision API
// +kubebuilder:object:root=true
// +kubebuilder:shortName=apprev,resource:categories={oam}
// +kubebuilder:resource:categories={oam},shortName=apprev;revisions
// +kubebuilder:printcolumn:name="AGE",type=date,JSONPath=".metadata.creationTimestamp"
type ApplicationRevision struct {
metav1.TypeMeta `json:",inline"`
metav1.ObjectMeta `json:"metadata,omitempty"`

View File

@@ -61,8 +61,14 @@ type AppRolloutStatus struct {
// AppRollout is the Schema for the AppRollout API
// +kubebuilder:object:root=true
// +kubebuilder:resource:categories={oam}
// +kubebuilder:resource:categories={oam},shortName=approllout;rollout
// +kubebuilder:subresource:status
// +kubebuilder:printcolumn:name="TARGET",type=string,JSONPath=`.status.rolloutStatus.rolloutTargetSize`
// +kubebuilder:printcolumn:name="UPGRADED",type=string,JSONPath=`.status.rolloutStatus.upgradedReplicas`
// +kubebuilder:printcolumn:name="READY",type=string,JSONPath=`.status.rolloutStatus.upgradedReadyReplicas`
// +kubebuilder:printcolumn:name="BATCH-STATE",type=string,JSONPath=`.status.rolloutStatus.batchRollingState`
// +kubebuilder:printcolumn:name="ROLLING-STATE",type=string,JSONPath=`.status.rolloutStatus.rollingState`
// +kubebuilder:printcolumn:name="AGE",type=date,JSONPath=".metadata.creationTimestamp"
type AppRollout struct {
metav1.TypeMeta `json:",inline"`
metav1.ObjectMeta `json:"metadata,omitempty"`

View File

@@ -67,8 +67,11 @@ type ComponentDefinitionStatus struct {
// +kubebuilder:object:root=true
// ComponentDefinition is the Schema for the componentdefinitions API
// +kubebuilder:resource:scope=Namespaced,categories={oam}
// +kubebuilder:resource:scope=Namespaced,categories={oam},shortName=comp
// +kubebuilder:subresource:status
// +kubebuilder:printcolumn:name="WORKLOAD-KIND",type=string,JSONPath=".spec.workload.definition.kind"
// +kubebuilder:printcolumn:name="DESCRIPTION",type=string,JSONPath=".metadata.annotations.definition\\.oam\\.dev/description"
// +kubebuilder:printcolumn:name="AGE",type=date,JSONPath=".metadata.creationTimestamp"
type ComponentDefinition struct {
metav1.TypeMeta `json:",inline"`
metav1.ObjectMeta `json:"metadata,omitempty"`

View File

@@ -69,8 +69,9 @@ type WorkloadDefinitionStatus struct {
// valid OAM workload kind by referencing its CustomResourceDefinition. The CRD
// is used to validate the schema of the workload when it is embedded in an OAM
// Component.
// +kubebuilder:printcolumn:JSONPath=".spec.definitionRef.name",name=DEFINITION-NAME,type=string
// +kubebuilder:resource:scope=Namespaced,categories={oam}
// +kubebuilder:resource:scope=Namespaced,categories={oam},shortName=workload
// +kubebuilder:printcolumn:name="DEFINITION-NAME",type=string,JSONPath=".spec.definitionRef.name"
// +kubebuilder:printcolumn:name="AGE",type=date,JSONPath=".metadata.creationTimestamp"
type WorkloadDefinition struct {
metav1.TypeMeta `json:",inline"`
metav1.ObjectMeta `json:"metadata,omitempty"`
@@ -158,9 +159,11 @@ type TraitDefinitionStatus struct {
// OAM trait kind by referencing its CustomResourceDefinition. The CRD is used
// to validate the schema of the trait when it is embedded in an OAM
// ApplicationConfiguration.
// +kubebuilder:printcolumn:JSONPath=".spec.definitionRef.name",name=DEFINITION-NAME,type=string
// +kubebuilder:resource:scope=Namespaced,categories={oam}
// +kubebuilder:resource:scope=Namespaced,categories={oam},shortName=trait
// +kubebuilder:subresource:status
// +kubebuilder:printcolumn:name="APPLIES-TO",type=string,JSONPath=".spec.appliesToWorkloads"
// +kubebuilder:printcolumn:name="DESCRIPTION",type=string,JSONPath=".metadata.annotations.definition\\.oam\\.dev/description"
// +kubebuilder:printcolumn:name="AGE",type=date,JSONPath=".metadata.creationTimestamp"
type TraitDefinition struct {
metav1.TypeMeta `json:",inline"`
metav1.ObjectMeta `json:"metadata,omitempty"`

View File

@@ -178,7 +178,7 @@ type AppDeploymentStatus struct {
// AppDeployment is the Schema for the AppDeployment API
// +kubebuilder:object:root=true
// +kubebuilder:resource:categories={oam}
// +kubebuilder:resource:categories={oam},shortName=appdeploy
// +kubebuilder:subresource:status
type AppDeployment struct {
metav1.TypeMeta `json:",inline"`

View File

@@ -67,6 +67,13 @@ type ApplicationSpec struct {
// Application is the Schema for the applications API
// +kubebuilder:storageversion
// +kubebuilder:subresource:status
// +kubebuilder:resource:categories={oam},shortName=apps
// +kubebuilder:printcolumn:name="COMPONENT",type=string,JSONPath=`.spec.components[*].name`
// +kubebuilder:printcolumn:name="TYPE",type=string,JSONPath=`.spec.components[*].type`
// +kubebuilder:printcolumn:name="PHASE",type=string,JSONPath=`.status.status`
// +kubebuilder:printcolumn:name="HEALTHY",type=boolean,JSONPath=`.status.services[*].healthy`
// +kubebuilder:printcolumn:name="STATUS",type=string,JSONPath=`.status.services[*].message`
// +kubebuilder:printcolumn:name="AGE",type=date,JSONPath=".metadata.creationTimestamp"
type Application struct {
metav1.TypeMeta `json:",inline"`
metav1.ObjectMeta `json:"metadata,omitempty"`

View File

@@ -56,6 +56,8 @@ type ApplicationRevisionSpec struct {
// ApplicationRevision is the Schema for the ApplicationRevision API
// +kubebuilder:storageversion
// +kubebuilder:resource:categories={oam},shortName=apprev;revisions
// +kubebuilder:printcolumn:name="AGE",type=date,JSONPath=".metadata.creationTimestamp"
type ApplicationRevision struct {
metav1.TypeMeta `json:",inline"`
metav1.ObjectMeta `json:"metadata,omitempty"`

View File

@@ -41,10 +41,11 @@ type AppRolloutSpec struct {
// RolloutPlan is the details on how to rollout the resources
RolloutPlan v1alpha1.RolloutPlan `json:"rolloutPlan"`
// RevertOnDelete revert the rollout when the rollout CR is deleted
// It will remove the target app from the kubernetes if it's set to true
// RevertOnDelete revert the failed rollout when the rollout CR is deleted
// It will revert the change back to the source version at once (not in batches)
// Default is false
// +optional
RevertOnDelete *bool `json:"revertOnDelete,omitempty"`
RevertOnDelete bool `json:"revertOnDelete,omitempty"`
}
// AppRolloutStatus defines the observed state of AppRollout
@@ -62,9 +63,15 @@ type AppRolloutStatus struct {
// AppRollout is the Schema for the AppRollout API
// +kubebuilder:object:root=true
// +kubebuilder:resource:categories={oam}
// +kubebuilder:resource:categories={oam},shortName=approllout;rollout
// +kubebuilder:subresource:status
// +kubebuilder:storageversion
// +kubebuilder:printcolumn:name="TARGET",type=string,JSONPath=`.status.rolloutTargetSize`
// +kubebuilder:printcolumn:name="UPGRADED",type=string,JSONPath=`.status.upgradedReplicas`
// +kubebuilder:printcolumn:name="READY",type=string,JSONPath=`.status.upgradedReadyReplicas`
// +kubebuilder:printcolumn:name="BATCH-STATE",type=string,JSONPath=`.status.batchRollingState`
// +kubebuilder:printcolumn:name="ROLLING-STATE",type=string,JSONPath=`.status.rollingState`
// +kubebuilder:printcolumn:name="AGE",type=date,JSONPath=".metadata.creationTimestamp"
type AppRollout struct {
metav1.TypeMeta `json:",inline"`
metav1.ObjectMeta `json:"metadata,omitempty"`

View File

@@ -67,9 +67,12 @@ type ComponentDefinitionStatus struct {
// +kubebuilder:object:root=true
// ComponentDefinition is the Schema for the componentdefinitions API
// +kubebuilder:resource:scope=Namespaced,categories={oam}
// +kubebuilder:resource:scope=Namespaced,categories={oam},shortName=comp
// +kubebuilder:storageversion
// +kubebuilder:subresource:status
// +kubebuilder:printcolumn:name="WORKLOAD-KIND",type=string,JSONPath=".spec.workload.definition.kind"
// +kubebuilder:printcolumn:name="DESCRIPTION",type=string,JSONPath=".metadata.annotations.definition\\.oam\\.dev/description"
// +kubebuilder:printcolumn:name="AGE",type=date,JSONPath=".metadata.creationTimestamp"
type ComponentDefinition struct {
metav1.TypeMeta `json:",inline"`
metav1.ObjectMeta `json:"metadata,omitempty"`

View File

@@ -67,9 +67,11 @@ type WorkloadDefinitionStatus struct {
// valid OAM workload kind by referencing its CustomResourceDefinition. The CRD
// is used to validate the schema of the workload when it is embedded in an OAM
// Component.
// +kubebuilder:printcolumn:JSONPath=".spec.definitionRef.name",name=DEFINITION-NAME,type=string
// +kubebuilder:resource:scope=Namespaced,categories={oam}
// +kubebuilder:resource:scope=Namespaced,categories={oam},shortName=workload
// +kubebuilder:storageversion
// +kubebuilder:printcolumn:name="DEFINITION-NAME",type=string,JSONPath=".spec.definitionRef.name"
// +kubebuilder:printcolumn:name="DESCRIPTION",type=string,JSONPath=".metadata.annotations.definition\\.oam\\.dev/description"
// +kubebuilder:printcolumn:name="AGE",type=date,JSONPath=".metadata.creationTimestamp"
type WorkloadDefinition struct {
metav1.TypeMeta `json:",inline"`
metav1.ObjectMeta `json:"metadata,omitempty"`
@@ -157,10 +159,12 @@ type TraitDefinitionStatus struct {
// OAM trait kind by referencing its CustomResourceDefinition. The CRD is used
// to validate the schema of the trait when it is embedded in an OAM
// ApplicationConfiguration.
// +kubebuilder:printcolumn:JSONPath=".spec.definitionRef.name",name=DEFINITION-NAME,type=string
// +kubebuilder:resource:scope=Namespaced,categories={oam}
// +kubebuilder:resource:scope=Namespaced,categories={oam},shortName=trait
// +kubebuilder:subresource:status
// +kubebuilder:storageversion
// +kubebuilder:printcolumn:name="APPLIES-TO",type=string,JSONPath=".spec.appliesToWorkloads"
// +kubebuilder:printcolumn:name="DESCRIPTION",type=string,JSONPath=".metadata.annotations.definition\\.oam\\.dev/description"
// +kubebuilder:printcolumn:name="AGE",type=date,JSONPath=".metadata.creationTimestamp"
type TraitDefinition struct {
metav1.TypeMeta `json:",inline"`
metav1.ObjectMeta `json:"metadata,omitempty"`

View File

@@ -225,11 +225,6 @@ func (in *AppRolloutSpec) DeepCopyInto(out *AppRolloutSpec) {
copy(*out, *in)
}
in.RolloutPlan.DeepCopyInto(&out.RolloutPlan)
if in.RevertOnDelete != nil {
in, out := &in.RevertOnDelete, &out.RevertOnDelete
*out = new(bool)
**out = **in
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AppRolloutSpec.

View File

@@ -50,6 +50,9 @@ const (
type RollingState string
const (
// LocatingTargetAppState indicates that the rollout is in the stage of locating target app
// we use this state to make sure we special handle the target app successfully only once
LocatingTargetAppState RollingState = "locatingTargetApp"
// VerifyingSpecState indicates that the rollout is in the stage of verifying the rollout settings
// and the controller can locate both the target and the source
VerifyingSpecState RollingState = "verifyingSpec"
@@ -64,8 +67,12 @@ const (
RolloutFailingState RollingState = "rolloutFailing"
// RolloutSucceedState indicates that rollout successfully completed to match the desired target state
RolloutSucceedState RollingState = "rolloutSucceed"
// RolloutAbandoningState indicates that the rollout is abandoned, can be restarted. This is a terminal state
RolloutAbandoningState RollingState = "rolloutAbandoned"
// RolloutAbandoningState indicates that the rollout is being abandoned
// we need to finalize it by cleaning up the old resources, adjust traffic and return control back to its owner
RolloutAbandoningState RollingState = "rolloutAbandoning"
// RolloutDeletingState indicates that the rollout is being deleted
// we need to finalize it by cleaning up the old resources, adjust traffic and return control back to its owner
RolloutDeletingState RollingState = "RolloutDeletingState"
// RolloutFailedState indicates that rollout is failed, the target replica is not reached
// we can not move forward anymore, we will let the client to decide when or whether to revert.
RolloutFailedState RollingState = "rolloutFailed"

View File

@@ -36,9 +36,15 @@ const (
// RollingRetriableFailureEvent indicates that we encountered an unexpected but retriable error
RollingRetriableFailureEvent RolloutEvent = "RollingRetriableFailureEvent"
// AppLocatedEvent indicates that apps are located successfully
AppLocatedEvent RolloutEvent = "AppLocatedEvent"
// RollingModifiedEvent indicates that the rolling target or source has changed
RollingModifiedEvent RolloutEvent = "RollingModifiedEvent"
// RollingDeletedEvent indicates that the rolling is being deleted
RollingDeletedEvent RolloutEvent = "RollingDeletedEvent"
// RollingSpecVerifiedEvent indicates that we have successfully verified that the rollout spec
RollingSpecVerifiedEvent RolloutEvent = "RollingSpecVerifiedEvent"
@@ -86,6 +92,8 @@ const (
RolloutFailing runtimev1alpha1.ConditionType = "RolloutFailing"
// RolloutAbandoning means that the rollout is being abandoned.
RolloutAbandoning runtimev1alpha1.ConditionType = "RolloutAbandoning"
// RolloutDeleting means that the rollout is being deleted.
RolloutDeleting runtimev1alpha1.ConditionType = "RolloutDeleting"
// RolloutFailed means that the rollout failed.
RolloutFailed runtimev1alpha1.ConditionType = "RolloutFailed"
// RolloutSucceed means that the rollout is done.
@@ -166,8 +174,8 @@ func (r *RolloutStatus) getRolloutConditionType() runtimev1alpha1.ConditionType
case RolloutAbandoningState:
return RolloutAbandoning
case RolloutFailedState:
return RolloutFailed
case RolloutDeletingState:
return RolloutDeleting
case RolloutSucceedState:
return RolloutSucceed
@@ -203,7 +211,7 @@ func (r *RolloutStatus) ResetStatus() {
r.NewPodTemplateIdentifier = ""
r.RolloutTargetTotalSize = -1
r.LastAppliedPodTemplateIdentifier = ""
r.RollingState = VerifyingSpecState
r.RollingState = LocatingTargetAppState
r.BatchRollingState = BatchInitializingState
r.CurrentBatch = 0
r.UpgradedReplicas = 0
@@ -259,9 +267,29 @@ func (r *RolloutStatus) StateTransition(event RolloutEvent) {
if event == RollingModifiedEvent {
if r.RollingState == RolloutFailedState || r.RollingState == RolloutSucceedState {
r.ResetStatus()
} else {
} else if r.RollingState != RolloutDeletingState {
r.SetRolloutCondition(NewNegativeCondition(r.getRolloutConditionType(), "Rollout Spec is modified"))
r.RollingState = RolloutAbandoningState
r.BatchRollingState = BatchInitializingState
}
return
}
// special handle deleted event here, it can happen at many states
if event == RollingDeletedEvent {
if r.RollingState == RolloutFailedState || r.RollingState == RolloutSucceedState {
panic(fmt.Errorf(invalidRollingStateTransition, rollingState, event))
}
r.SetRolloutCondition(NewNegativeCondition(r.getRolloutConditionType(), "Rollout is being deleted"))
r.RollingState = RolloutDeletingState
r.BatchRollingState = BatchInitializingState
return
}
// special handle appLocatedEvent event here, it only applies to one state but it's legal to happen at other states
if event == AppLocatedEvent {
if r.RollingState == LocatingTargetAppState {
r.RollingState = VerifyingSpecState
}
return
}
@@ -296,6 +324,14 @@ func (r *RolloutStatus) StateTransition(event RolloutEvent) {
}
panic(fmt.Errorf(invalidRollingStateTransition, rollingState, event))
case RolloutDeletingState:
if event == RollingFinalizedEvent {
r.SetRolloutCondition(NewPositiveCondition(r.getRolloutConditionType()))
r.RollingState = RolloutFailedState
return
}
panic(fmt.Errorf(invalidRollingStateTransition, rollingState, event))
case FinalisingState:
if event == RollingFinalizedEvent {
r.SetRolloutCondition(NewPositiveCondition(r.getRolloutConditionType()))
@@ -312,10 +348,7 @@ func (r *RolloutStatus) StateTransition(event RolloutEvent) {
}
panic(fmt.Errorf(invalidRollingStateTransition, rollingState, event))
case RolloutSucceedState:
panic(fmt.Errorf(invalidRollingStateTransition, rollingState, event))
case RolloutFailedState:
case RolloutSucceedState, RolloutFailedState:
panic(fmt.Errorf(invalidRollingStateTransition, rollingState, event))
default:

View File

@@ -50,8 +50,6 @@ type EnvMeta struct {
Email string `json:"email,omitempty"`
Domain string `json:"domain,omitempty"`
// Below are not arguments, should be auto-generated
Issuer string `json:"issuer"`
Current string `json:"current,omitempty"`
}

View File

@@ -14,6 +14,8 @@ spec:
kind: AppDeployment
listKind: AppDeploymentList
plural: appdeployments
shortNames:
- appdeploy
singular: appdeployment
scope: Namespaced
versions:

View File

@@ -9,13 +9,22 @@ metadata:
spec:
group: core.oam.dev
names:
categories:
- oam
kind: ApplicationRevision
listKind: ApplicationRevisionList
plural: applicationrevisions
shortNames:
- apprev
- revisions
singular: applicationrevision
scope: Namespaced
versions:
- name: v1alpha2
- additionalPrinterColumns:
- jsonPath: .metadata.creationTimestamp
name: AGE
type: date
name: v1alpha2
schema:
openAPIV3Schema:
description: ApplicationRevision is the Schema for the ApplicationRevision API
@@ -1058,7 +1067,12 @@ spec:
type: object
served: true
storage: false
- name: v1beta1
subresources: {}
- additionalPrinterColumns:
- jsonPath: .metadata.creationTimestamp
name: AGE
type: date
name: v1beta1
schema:
openAPIV3Schema:
description: ApplicationRevision is the Schema for the ApplicationRevision API
@@ -2102,6 +2116,7 @@ spec:
type: object
served: true
storage: true
subresources: {}
status:
acceptedNames:
kind: ""

View File

@@ -25,10 +25,31 @@ spec:
kind: Application
listKind: ApplicationList
plural: applications
shortNames:
- apps
singular: application
scope: Namespaced
versions:
- name: v1alpha2
- additionalPrinterColumns:
- jsonPath: .spec.components[*].name
name: COMPONENT
type: string
- jsonPath: .spec.components[*].type
name: TYPE
type: string
- jsonPath: .status.status
name: PHASE
type: string
- jsonPath: .status.services[*].healthy
name: HEALTHY
type: boolean
- jsonPath: .status.services[*].message
name: STATUS
type: string
- jsonPath: .metadata.creationTimestamp
name: AGE
type: date
name: v1alpha2
schema:
openAPIV3Schema:
description: Application is the Schema for the applications API
@@ -463,7 +484,26 @@ spec:
storage: false
subresources:
status: {}
- name: v1beta1
- additionalPrinterColumns:
- jsonPath: .spec.components[*].name
name: COMPONENT
type: string
- jsonPath: .spec.components[*].type
name: TYPE
type: string
- jsonPath: .status.status
name: PHASE
type: string
- jsonPath: .status.services[*].healthy
name: HEALTHY
type: boolean
- jsonPath: .status.services[*].message
name: STATUS
type: string
- jsonPath: .metadata.creationTimestamp
name: AGE
type: date
name: v1beta1
schema:
openAPIV3Schema:
description: Application is the Schema for the applications API

View File

@@ -14,10 +14,32 @@ spec:
kind: AppRollout
listKind: AppRolloutList
plural: approllouts
shortNames:
- approllout
- rollout
singular: approllout
scope: Namespaced
versions:
- name: v1alpha2
- additionalPrinterColumns:
- jsonPath: .status.rolloutStatus.rolloutTargetSize
name: TARGET
type: string
- jsonPath: .status.rolloutStatus.upgradedReplicas
name: UPGRADED
type: string
- jsonPath: .status.rolloutStatus.upgradedReadyReplicas
name: READY
type: string
- jsonPath: .status.rolloutStatus.batchRollingState
name: BATCH-STATE
type: string
- jsonPath: .status.rolloutStatus.rollingState
name: ROLLING-STATE
type: string
- jsonPath: .metadata.creationTimestamp
name: AGE
type: date
name: v1alpha2
schema:
openAPIV3Schema:
description: AppRollout is the Schema for the AppRollout API
@@ -346,7 +368,26 @@ spec:
storage: false
subresources:
status: {}
- name: v1beta1
- additionalPrinterColumns:
- jsonPath: .status.rolloutTargetSize
name: TARGET
type: string
- jsonPath: .status.upgradedReplicas
name: UPGRADED
type: string
- jsonPath: .status.upgradedReadyReplicas
name: READY
type: string
- jsonPath: .status.batchRollingState
name: BATCH-STATE
type: string
- jsonPath: .status.rollingState
name: ROLLING-STATE
type: string
- jsonPath: .metadata.creationTimestamp
name: AGE
type: date
name: v1beta1
schema:
openAPIV3Schema:
description: AppRollout is the Schema for the AppRollout API
@@ -368,7 +409,7 @@ spec:
type: string
type: array
revertOnDelete:
description: RevertOnDelete revert the rollout when the rollout CR is deleted It will remove the target app from the kubernetes if it's set to true
description: RevertOnDelete revert the failed rollout when the rollout CR is deleted It will revert the change back to the source version at once (not in batches) Default is false
type: boolean
rolloutPlan:
description: RolloutPlan is the details on how to rollout the resources

View File

@@ -14,10 +14,22 @@ spec:
kind: ComponentDefinition
listKind: ComponentDefinitionList
plural: componentdefinitions
shortNames:
- comp
singular: componentdefinition
scope: Namespaced
versions:
- name: v1alpha2
- additionalPrinterColumns:
- jsonPath: .spec.workload.definition.kind
name: WORKLOAD-KIND
type: string
- jsonPath: .metadata.annotations.definition\.oam\.dev/description
name: DESCRIPTION
type: string
- jsonPath: .metadata.creationTimestamp
name: AGE
type: date
name: v1alpha2
schema:
openAPIV3Schema:
description: ComponentDefinition is the Schema for the componentdefinitions API
@@ -206,7 +218,17 @@ spec:
storage: false
subresources:
status: {}
- name: v1beta1
- additionalPrinterColumns:
- jsonPath: .spec.workload.definition.kind
name: WORKLOAD-KIND
type: string
- jsonPath: .metadata.annotations.definition\.oam\.dev/description
name: DESCRIPTION
type: string
- jsonPath: .metadata.creationTimestamp
name: AGE
type: date
name: v1beta1
schema:
openAPIV3Schema:
description: ComponentDefinition is the Schema for the componentdefinitions API

View File

@@ -14,13 +14,21 @@ spec:
kind: TraitDefinition
listKind: TraitDefinitionList
plural: traitdefinitions
shortNames:
- trait
singular: traitdefinition
scope: Namespaced
versions:
- additionalPrinterColumns:
- jsonPath: .spec.definitionRef.name
name: DEFINITION-NAME
- jsonPath: .spec.appliesToWorkloads
name: APPLIES-TO
type: string
- jsonPath: .metadata.annotations.definition\.oam\.dev/description
name: DESCRIPTION
type: string
- jsonPath: .metadata.creationTimestamp
name: AGE
type: date
name: v1alpha2
schema:
openAPIV3Schema:
@@ -192,9 +200,15 @@ spec:
subresources:
status: {}
- additionalPrinterColumns:
- jsonPath: .spec.definitionRef.name
name: DEFINITION-NAME
- jsonPath: .spec.appliesToWorkloads
name: APPLIES-TO
type: string
- jsonPath: .metadata.annotations.definition\.oam\.dev/description
name: DESCRIPTION
type: string
- jsonPath: .metadata.creationTimestamp
name: AGE
type: date
name: v1beta1
schema:
openAPIV3Schema:

View File

@@ -14,6 +14,8 @@ spec:
kind: WorkloadDefinition
listKind: WorkloadDefinitionList
plural: workloaddefinitions
shortNames:
- workload
singular: workloaddefinition
scope: Namespaced
versions:
@@ -21,6 +23,9 @@ spec:
- jsonPath: .spec.definitionRef.name
name: DEFINITION-NAME
type: string
- jsonPath: .metadata.creationTimestamp
name: AGE
type: date
name: v1alpha2
schema:
openAPIV3Schema:
@@ -204,6 +209,12 @@ spec:
- jsonPath: .spec.definitionRef.name
name: DEFINITION-NAME
type: string
- jsonPath: .metadata.annotations.definition\.oam\.dev/description
name: DESCRIPTION
type: string
- jsonPath: .metadata.creationTimestamp
name: AGE
type: date
name: v1beta1
schema:
openAPIV3Schema:

View File

@@ -18,4 +18,11 @@ rules:
verbs:
- get
- update
- apiGroups:
- apiextensions.k8s.io
resources:
- customresourcedefinitions
verbs:
- get
- update
{{- end }}

View File

@@ -32,6 +32,7 @@ spec:
- --namespace={{ .Release.Namespace }}
- --secret-name={{ template "kubevela.fullname" . }}-admission
- --patch-failure-policy={{ .Values.admissionWebhooks.failurePolicy }}
- --crds=applications.core.oam.dev
restartPolicy: OnFailure
serviceAccountName: {{ template "kubevela.fullname" . }}-admission
{{- with .Values.admissionWebhooks.patch.affinity }}

View File

@@ -1,5 +1,5 @@
{{- if .Values.admissionWebhooks.enabled -}}
apiVersion: admissionregistration.k8s.io/v1beta1
apiVersion: admissionregistration.k8s.io/v1
kind: MutatingWebhookConfiguration
metadata:
name: {{ template "kubevela.fullname" . }}-admission
@@ -21,6 +21,7 @@ webhooks:
failurePolicy: Fail
{{- end }}
name: mutating.core.oam.dev.v1alpha2.applicationconfigurations
sideEffects: None
rules:
- apiGroups:
- core.oam.dev
@@ -47,6 +48,7 @@ webhooks:
failurePolicy: Fail
{{- end }}
name: mutating.core.oam.dev.v1beta1.approllouts
sideEffects: None
rules:
- apiGroups:
- core.oam.dev
@@ -73,6 +75,7 @@ webhooks:
failurePolicy: Fails
{{- end }}
name: mutating.core.oam-dev.v1alpha2.components
sideEffects: None
rules:
- apiGroups:
- core.oam.dev
@@ -87,28 +90,6 @@ webhooks:
admissionReviewVersions:
- v1beta1
timeoutSeconds: 5
- clientConfig:
caBundle: Cg==
service:
name: {{ template "kubevela.name" . }}-webhook
namespace: {{ .Release.Namespace }}
path: /mutate-standard-oam-dev-v1alpha1-metricstrait
{{- if .Values.admissionWebhooks.patch.enabled }}
failurePolicy: Ignore
{{- else }}
failurePolicy: Fails
{{- end }}
name: mmetricstrait.kb.io
rules:
- apiGroups:
- standard.oam.dev
apiVersions:
- v1alpha1
operations:
- CREATE
- UPDATE
resources:
- metricstraits
- clientConfig:
caBundle: Cg==
service:
@@ -121,6 +102,9 @@ webhooks:
failurePolicy: Fails
{{- end }}
name: mcontainerized.kb.io
sideEffects: None
admissionReviewVersions:
- v1beta1
rules:
- apiGroups:
- standard.oam.dev

View File

@@ -1,5 +1,5 @@
{{- if .Values.admissionWebhooks.enabled -}}
apiVersion: admissionregistration.k8s.io/v1beta1
apiVersion: admissionregistration.k8s.io/v1
kind: ValidatingWebhookConfiguration
metadata:
name: {{ template "kubevela.fullname" . }}-admission
@@ -21,6 +21,7 @@ webhooks:
failurePolicy: {{ .Values.admissionWebhooks.failurePolicy }}
{{- end }}
name: validating.core.oam.dev.v1alpha2.applicationconfigurations
sideEffects: None
rules:
- apiGroups:
- core.oam.dev
@@ -47,6 +48,7 @@ webhooks:
failurePolicy: {{ .Values.admissionWebhooks.failurePolicy }}
{{- end }}
name: validating.core.oam.dev.v1beta1.approllouts
sideEffects: None
rules:
- apiGroups:
- core.oam.dev
@@ -73,6 +75,7 @@ webhooks:
failurePolicy: {{ .Values.admissionWebhooks.failurePolicy }}
{{- end }}
name: validating.core.oam.dev.v1alpha2.components
sideEffects: None
rules:
- apiGroups:
- core.oam.dev
@@ -99,6 +102,9 @@ webhooks:
failurePolicy: {{ .Values.admissionWebhooks.failurePolicy }}
{{- end }}
name: validating.core.oam.dev.v1alpha2.traitdefinitions
sideEffects: None
admissionReviewVersions:
- v1beta1
rules:
- apiGroups:
- core.oam.dev
@@ -113,29 +119,6 @@ webhooks:
admissionReviewVersions:
- v1beta1
timeoutSeconds: 5
- clientConfig:
caBundle: Cg==
service:
name: {{ template "kubevela.name" . }}-webhook
namespace: {{ .Release.Namespace }}
path: /validate-standard-oam-dev-v1alpha1-metricstrait
{{- if .Values.admissionWebhooks.patch.enabled }}
failurePolicy: Ignore
{{- else }}
failurePolicy: {{ .Values.admissionWebhooks.failurePolicy }}
{{- end }}
name: vmetricstrait.kb.io
rules:
- apiGroups:
- standard.oam.dev
apiVersions:
- v1alpha1
operations:
- CREATE
- UPDATE
- DELETE
resources:
- metricstraits
- clientConfig:
caBundle: Cg==
service:
@@ -144,6 +127,9 @@ webhooks:
path: /validate-standard-oam-dev-v1alpha1-podspecworkload
failurePolicy: Fail
name: vcontainerized.kb.io
admissionReviewVersions:
- v1beta1
sideEffects: None
rules:
- apiGroups:
- standard.oam.dev

View File

@@ -120,6 +120,7 @@ spec:
{{ if ne .Values.disableCaps "" }}
- "--disable-caps={{ .Values.disableCaps }}"
{{ end }}
- "--system-definition-namespace={{ .Values.systemDefinitionNamespace }}"
image: {{ .Values.image.repository }}:{{ .Values.image.tag }}
imagePullPolicy: {{ quote .Values.image.pullPolicy }}
resources:

View File

@@ -88,8 +88,8 @@ admissionWebhooks:
patch:
enabled: true
image:
repository: jettech/kube-webhook-certgen
tag: v1.5.0
repository: wonderflow/kube-webhook-certgen
tag: v2.1
pullPolicy: IfNotPresent
affinity: {}
tolerations: []

View File

@@ -1,8 +1,8 @@
# Route Trait Design
The main idea of route trait is to let users have an entrypoint to visit their App.
The main idea of [route trait](https://github.com/oam-dev/catalog/tree/master/traits/routetrait) is to let users have an entrypoint to visit their App.
In k8s world, if you want to do so, you have to understand K8s [Serivce](https://kubernetes.io/docs/concepts/services-networking/service/)
In k8s world, if you want to do so, you have to understand K8s [Service](https://kubernetes.io/docs/concepts/services-networking/service/)
, [Ingress](https://kubernetes.io/docs/concepts/services-networking/ingress/), [Ingress Controllers](https://kubernetes.io/docs/concepts/services-networking/ingress-controllers/).
It's not easy to get all of these things work well.

73
docs/README.md Normal file
View File

@@ -0,0 +1,73 @@
# Contributing to KubeVela Docs
[Here](https://github.com/oam-dev/kubevela.io) is the source code of [Kubevela website](http://kubevela.io/).
It's built by [Docusaurus 2](https://v2.docusaurus.io/), a modern static website generator.
Any files modifid here will trigger the `check-docs` Github action to run and validate the docs could be build successfully into the website.
Any changes on these files(`docs/en/*`, `resource/*`, `sidebars.js`) will be submitted to the corresponding locations of the repo
[kubevela.io](https://github.com/oam-dev/kubevela.io). The Github-Action there will parse the document and publish it to the Kubevela Website automatically.
Please follow our guides below to learn how to write the docs in the right way.
## Add or Update Docs
When you add or modify the docs, these three files(`docs/en/`, `resource/` and `sidebars.js`) should be taken into consideration.
1. `docs/en/`, the main English documentation files are mainly located in this folder. All markdown files need to follow the format,
that the title at the beginning should be in the following format:
```markdown
---
title: Title Name
---
```
When you want to add a link refer to any `.md` files inside the docs(`docs/en`), you need to use relative path and remove the `.md` suffix.
For example, the `en/helm/component.md` has a link refer to `en/platform-engineers/definition-and-templates.md`. Then the format should like:
```markdown
[the definition and template concepts](../platform-engineers/definition-and-templates)
```
2. `resource/`, image files are located in this folder. When you want to use link any image in documentation,
you should put the image resources here and use a relative path like below:
```markdown
![alt](../resources/concepts.png)
```
3. `sidebars.js`, this file contain the navigation information of the KubeVela website.
Please read [the official docs of docusaurus](https://docusaurus.io/docs/sidebar) to learn how to write `sidebar.js`.
```js
{
type: 'category',
label: 'Capability References',
items: [
// Note!: here must be add the path under "docs/en"
'developers/references/README',
'developers/references/workload-types/webservice',
'developers/references/workload-types/task',
...
],
},
```
[comment]: <> (TODO: ADD how to translate into Chinese or other language here.)
## Local Development
You can preview the website locally with the `node` and `yarn` installed.
Every time you modify the files under the docs, you need to re-run the following command, it will not sync automatically:
```shell
make docs-start
```
## Build in Local
You can build Kubevela website in local to test the correctness of docs, only run the following cmd:
```shell
make docs-build
```

View File

@@ -1,159 +0,0 @@
/** @type {import('@docusaurus/types').DocusaurusConfig} */
module.exports = {
title: 'KubeVela',
tagline: 'Make shipping applications more enjoyable.',
url: 'https://kubevela.io',
baseUrl: '/',
onBrokenLinks: 'throw',
onBrokenMarkdownLinks: 'warn',
favicon: 'img/favicon.ico',
organizationName: 'oam-dev', // Usually your GitHub org/user name.
projectName: 'kubevela.io', // Usually your repo name.
i18n: {
defaultLocale: 'en',
locales: ['en', 'zh'],
localeConfigs: {
en: {
label: 'English',
},
zh: {
label: '简体中文',
},
},
},
themeConfig: {
navbar: {
title: 'KubeVela',
logo: {
alt: 'KubeVela',
src: 'img/logo.svg',
srcDark: 'img/logoDark.svg',
},
items: [
{
type: 'docsVersionDropdown',
position: 'right',
},
{
to: 'docs/',
activeBasePath: 'docs',
label: 'Documentation',
position: 'left',
},
{
to: 'blog',
label: 'Blog',
position: 'left'
},
{
type: 'localeDropdown',
position: 'right',
},
{
href: 'https://github.com/oam-dev/kubevela',
className: 'header-githab-link',
position: 'right',
},
],
},
footer: {
links: [
{
title: 'Documentation',
items: [
{
label: 'Getting Started',
to: '/docs/install',
},
{
label: 'Platform Builder Guide',
to: '/docs/platform-engineers/overview',
},
{
label: 'Developer Experience Guide',
to: '/docs/quick-start-appfile',
},
],
},
{
title: 'Community',
items: [
{
label: 'Slack ( #kubevela channel )',
href: 'https://slack.cncf.io/'
},
{
label: 'Gitter',
href: 'https://gitter.im/oam-dev/community',
},
{
label: 'DingTalk (23310022)',
href: '.',
}
],
},
{
title: 'More',
items: [
{
label: 'GitHub',
href: 'https://github.com/oam-dev/kubevela',
},
{
label: 'Blog',
to: 'blog',
},
],
},
],
copyright: `
<br />
<strong>© KubeVela Authors ${new Date().getFullYear()} | Documentation Distributed under <a herf="https://creativecommons.org/licenses/by/4.0">CC-BY-4.0</a> </strong>
<br />
`,
},
prism: {
theme: require('prism-react-renderer/themes/dracula'),
},
},
plugins: [
[
require.resolve("@easyops-cn/docusaurus-search-local"),
{
hashed: true,
language: ["en", "zh"],
indexBlog: true,
},
],
],
presets: [
[
'@docusaurus/preset-classic',
{
docs: {
sidebarPath: require.resolve('./sidebars.js'),
editUrl:
'https://github.com/oam-dev/kubevela.io/edit/main/',
showLastUpdateAuthor: true,
showLastUpdateTime: true,
includeCurrentVersion: true,
lastVersion: 'current',
// versions: {
// current: {
// label: 'master',
// path: '/',
// },
// },
},
blog: {
showReadingTime: true,
editUrl:
'https://github.com/oam-dev/kubevela.io/tree/main/blog',
},
theme: {
customCss: require.resolve('./src/css/custom.css'),
},
},
],
],
};

View File

@@ -22,7 +22,7 @@ The reasons for KubeVela supports CUE as a first-class solution to design abstra
Please make sure below CLIs are present in your environment:
* [`cue` >=v0.2.2](https://cuelang.org/docs/install/)
* [`vela` (>v1.0.0)](https://kubevela.io/#/en/install?id=_3-optional-get-kubevela-cli)
* [`vela` (>v1.0.0)](../install#4-optional-get-kubevela-cli)
## CUE CLI Basic

View File

@@ -2,7 +2,8 @@
title: Setting Up Deployment Environment
---
A deployment environment is where you could configure the workspace, email for certificate issuer and domain for your applications globally. A typical set of deployment environment is `test`, `staging`, `prod`, etc.
A deployment environment is where you could configure the workspace, email for contact and domain for your applications globally.
A typical set of deployment environment is `test`, `staging`, `prod`, etc.
## Create environment

View File

@@ -5,7 +5,7 @@ title: Installation
import Tabs from '@theme/Tabs';
import TabItem from '@theme/TabItem';
If you have installed the kubevale chart before, please read the [Upgrade](#7-optional-upgrade) step directly.
If you have installed the kubevale chart before, please read the [Upgrade](#upgrade) step directly.
## 1. Setup Kubernetes cluster
@@ -184,7 +184,7 @@ Here are three ways to get KubeVela Cli:
** macOS/Linux **
```shell script
curl -fsSl https://kubevela.io/install.sh | bash
curl -fsSl https://kubevela.io/script/install.sh | bash
```
**Windows**
@@ -276,7 +276,19 @@ Then clean up CRDs (CRDs are not removed via helm by default):
If you have already installed KubeVela and wants to upgrade to the new version, you could follow below instructions.
## Step 1. Upgrade KubeVela CRDs
## Step 1. Update helm repo
```shell
helm repo update
```
you can get the new version kubevela chart by run:
```shell
helm search repo kubevela/vela-core -l
```
## Step 2. Upgrade KubeVela CRDs
```shell
kubectl apply -f https://raw.githubusercontent.com/oam-dev/kubevela/master/charts/vela-core/crds/core.oam.dev_componentdefinitions.yaml
@@ -290,8 +302,18 @@ kubectl apply -f https://raw.githubusercontent.com/oam-dev/kubevela/master/chart
kubectl apply -f https://raw.githubusercontent.com/oam-dev/kubevela/master/charts/vela-core/crds/core.oam.dev_applicationcontexts.yaml
```
## Step 2. Upgrade KubeVela helm chart
> Tips: If you meet errors like `* is invalid: spec.scope: Invalid value: "Namespaced": filed is immutable`. Please delete the crd with the error.
and re-apply the kubevela crds.
```shell
kubectl delete crd \
scopedefinitions.core.oam.dev \
traitdefinitions.core.oam.dev \
workloaddefinitions.core.oam.dev
```
## Step 3. Upgrade KubeVela helm chart
```shell
helm upgrade --install --create-namespace --namespace vela-system kubevela kubevela/vela-core --version <the_new_version>
```

View File

@@ -20,50 +20,50 @@ In the end, developers complain those platforms are too rigid and slow in respon
## What is KubeVela?
For platform builders, KubeVela serves as a framework that empowers them to create user friendly yet highly extensible platforms at ease. In details, KubeVela relieves the pains of building such platforms by doing the following:
For platform builders, KubeVela serves as a framework that relieves the pains of building developer focused platforms by doing the following:
- Application Centric. KubeVela enforces an *Application* abstraction as its main API and **ALL** KubeVela's capabilities serve for the applications' needs only. This is achieved by adopting the [Open Application Model](https://github.com/oam-dev/spec) as the core API for KubeVela.
- Developer Centric. KubeVela abstracts away the infrastructure level primitives by introducing the *Application* concept as main API, and then building operational features around the applications' needs only.
- Extending Natively. The *Application* abstraction is composed of modularized building blocks named *components* and *traits*. Any capability provided by Kubernetes ecosystem can be added to KubeVela as new component or trait through simple `kubectl apply -f`.
- Extending Natively. The *Application* is composed of modularized building blocks that support [CUELang](https://github.com/cuelang/cue) and [Helm](https://helm.sh) as template engines. This enable you to abstract Kubernetes capabilities in LEGO-style and ship them to end users via plain `kubectl apply -f`. Changes made to the abstraction templates take effect at runtime, neither recompilation nor redeployment of KubeVela is required.
- Simple yet Extensible Abstraction Mechanism. The *Application* abstraction is implemented with server-side encapsulation controller (supports [CUELang](https://github.com/cuelang/cue) and [Helm](https://helm.sh) as templating engine) to abstract user-facing primitives from Kubernetes API resources. Changes to existing capability templates (or new templates added) take effect at runtime, neither recompilation nor redeployment of KubeVela is required.
- Simple yet Reliable Abstraction Mechanism. Unlike most IaC (Infrastructure-as-Code) solutions, the abstractions in KubeVela is built with [Kubernetes Control Loop](https://kubernetes.io/docs/concepts/architecture/controller/) so they will never leave *Configuration Drift* in your cluster. As a [Kubernetes Custom Resource](https://kubernetes.io/docs/concepts/extend-kubernetes/api-extension/custom-resources/), KubeVela works with any CI/CD or GitOps tools seamlessly, no integration effort needed.
With KubeVela, platform builders now finally have the tooling support to design and ship any new capabilities to their end-users with high confidence and low turn around time.
With KubeVela, the platform builders finally have the tooling supports to design easy-to-use abstractions and ship them to end-users with high confidence and low turn around time.
For developers, such *Application* abstraction built with KubeVela will enable them to design and ship their applications to Kubernetes with minimal effort. Instead of managing a handful infrastructure details, a simple application definition that can be easily integrated with any CI/CD pipeline is all they need.
For end-users (e.g. app developers), such abstractions built with KubeVela will enable them to design and ship applications to Kubernetes with minimal effort - instead of managing a handful infrastructure details, a simple application definition that can be easily integrated with any CI/CD pipeline is all they need.
## Comparisons
### Platform-as-a-Service (PaaS)
### KubeVela vs. Platform-as-a-Service (PaaS)
The typical examples are Heroku and Cloud Foundry. They provides full application management capabilities and aim to improve developer experience and efficiency. In this context, KubeVela can provide similar experience but its built-in features are much lighter and easier to maintain compared to most of the existing PaaS offerings. KubeVela core components are nothing but a set of Kubernetes controllers/plugins.
The typical examples are Heroku and Cloud Foundry. They provide full application management capabilities and aim to improve developer experience and efficiency. In this context, KubeVela can provide similar experience.
Though the biggest difference lies KubeVela positions itself as the engine to build "PaaS-like" systems, not a PaaS offering.
Though the biggest difference lies in **flexibility**.
KubeVela is designed as a core engine whose primary goal is to enable platform team to create "PaaS-like" experience by simply registering Kubernetes API resources and defining templates. Comparing to this experience, most existing PaaS systems are either inextensible or have their own addon systems. Hence it's common for them to enforce constraints in the type of supported applications and the supported capabilities which will not happen in KubeVela based experience.
KubeVela is a Kubernetes plug-in that enabling you to serve end users with simplicity by defining your own abstractions, and this is achieved by templating Kubernetes API resources as application-centric abstractions in your cluster. Comparing to this mechanism, most existing PaaS systems are highly restricted and inflexible, i.e. they have to enforce constraints in the type of supported applications and capabilities, and as application needs grows, they always outgrow the capabilities of a PaaS system - this will never happen in KubeVela.
### Serverless platforms
### KubeVela vs. Serverless
Serverless platform such as AWS Lambda provides extraordinary user experience and agility to deploy serverless applications. However, those platforms impose even more constraints in extensibility. They are arguably "hard-coded" PaaS.
Kubernetes based serverless platforms such as Knative, OpenFaaS can be easily integrated with KubeVela by registering themselves as new workload types and traits. Even for AWS Lambda, there is an success story to integrate it with KubeVela by the tools developed by Crossplane.
Kubernetes based serverless platforms such as Knative, OpenFaaS can be easily integrated with KubeVela by registering themselves as new workload types and traits. Even for AWS Lambda, there is a success story to integrate it with KubeVela by the tools developed by Crossplane.
### Platform agnostic developer tools
### KubeVela vs. Platform agnostic developer tools
The typical example is Hashicorp's Waypoint. Waypoint is a developer facing tool which introduces a consistent workflow (i.e., build, deploy, release) to ship applications on top of different platforms.
KubeVela can be integrated into Waypoint like any other supported platforms. In this case, developers will use the Waypoint workflow to manage applications, and all the capabilities of KubeVela including abstractions will still be available in this integration.
KubeVela can be integrated into Waypoint as a supported platform. In this case, developers could use the Waypoint workflow to manage applications with leverage of abstractions (e.g. application, rollout, ingress, autoscaling etc) you built via KubeVela.
### Helm, etc.
### KubeVela vs. Helm
Helm is a package manager for Kubernetes that provides package, install, and upgrade a set of YAML files for Kubernetes as a unit. KubeVela leverages Helm heavily to package the capability dependencies and Helm controller is one of the core components behind *Application* abstraction.
Helm is a package manager for Kubernetes that provides package, install, and upgrade a set of YAML files for Kubernetes as a unit. KubeVela leverages Helm heavily to package the capability dependencies and Helm is also one of the core templating engines behind *Application* abstraction.
Though KubeVela itself is not a package manager, it's a core engine for platform builders to create upper layer platforms in easy and repeatable approach.
### Kubernetes
### KubeVela vs. Kubernetes
KubeVela is a Kubernetes plugin for building upper layer platforms. It leverages the native Kubernetes extensibility and capabilities to resolve a hard problem - making shipping applications enjoyable on Kubernetes.
KubeVela is a Kubernetes plugin for building higher level abstractions. It leverages [Open Application Model](https://github.com/oam-dev/spec) and the native Kubernetes extensibility to resolve a hard problem - making shipping applications enjoyable on Kubernetes.
## Getting Started
[Install KubeVela](./install) into any Kubernetes cluster to get started.
Now let's [get started](./quick-start.md) with KubeVela!

View File

@@ -93,7 +93,7 @@ spec:
}
writeConnectionSecretToRef: {
namespace: context.namespace
name: context.outputSecretName
name: parameter.secretName
}
providerConfigRef: {
name: "default"
@@ -102,15 +102,14 @@ spec:
}
}
parameter: {
engine: *"mysql" | string
engineVersion: *"8.0" | string
instanceClass: *"rds.mysql.c1.large" | string
username: string
engine: *"mysql" | string
engineVersion: *"8.0" | string
instanceClass: *"rds.mysql.c1.large" | string
username: string
secretName: string
}
```
Noted: In application, application developers need to use property `outputSecretName` as the secret name which is used to store all connection
items of cloud resource connections information.
```
### Step 2: Prepare TraitDefinition `service-binding` to do env-secret mapping
@@ -222,7 +221,7 @@ spec:
engineVersion: "8.0"
instanceClass: rds.mysql.c1.large
username: oamtest
outputSecretName: db-conn
secretName: db-conn
```
@@ -273,7 +272,7 @@ spec:
dataRedundancyType: parameter.dataRedundancyType
writeConnectionSecretToRef: {
namespace: context.namespace
name: context.outputSecretName
name: parameter.secretName
}
providerConfigRef: {
name: "default"
@@ -286,8 +285,8 @@ spec:
acl: *"private" | string
storageClass: *"Standard" | string
dataRedundancyType: *"LRS" | string
secretName: string
}
```
Update the application to also consume cloud resource OSS.
@@ -329,13 +328,13 @@ spec:
engineVersion: "8.0"
instanceClass: rds.mysql.c1.large
username: oamtest
outputSecretName: db-conn
secretName: db-conn
- name: sample-oss
type: alibaba-oss
properties:
name: velaweb
outputSecretName: oss-conn
secretName: oss-conn
```
Apply it and verify the application.
@@ -376,7 +375,7 @@ spec:
engineVersion: "8.0"
instanceClass: rds.mysql.c1.large
username: oamtest
outputSecretName: db-conn
secretName: db-conn
```
Apply the application to Kubernetes and a RDS instance will be automatically provisioned (may take some time, ~2 mins).

View File

@@ -8,7 +8,7 @@ With flexibility in defining abstractions, it's important to be able to debug, t
Please make sure below CLIs are present in your environment:
* [`cue` >=v0.2.2](https://cuelang.org/docs/install/)
* [`vela` (>v1.0.0)](https://kubevela.io/#/en/install?id=_3-optional-get-kubevela-cli)
* [`vela` (>v1.0.0)](../install#4-optional-get-kubevela-cli)
## Define Definition and Template

View File

@@ -234,7 +234,7 @@ spec:
replicas: 5
```
5. Apply the application rollout that upgarde the application from v1 to v2
5. Apply the application rollout that upgrade the application from v1 to v2
```yaml
apiVersion: core.oam.dev/v1beta1
kind: AppRollout
@@ -263,4 +263,4 @@ Here is the high level state transition graph
## Roadmap
Our recent roadmap for progressive rollout is [here](./roadmap).
Our recent roadmap for progressive rollout is [here](./roadmap).

View File

@@ -3,7 +3,6 @@ kind: Application
metadata:
name: test-rolling
annotations:
"app.oam.dev/rolling-components": "metrics-provider"
"app.oam.dev/rollout-template": "true"
spec:
components:

View File

@@ -3,7 +3,6 @@ kind: Application
metadata:
name: test-rolling
annotations:
"app.oam.dev/rolling-components": "metrics-provider"
"app.oam.dev/rollout-template": "true"
spec:
components:

View File

@@ -5,7 +5,7 @@ metadata:
spec:
# application (revision) reference
sourceAppRevisionName: test-rolling-v2
targetAppRevisionName: test-rolling-v3
targetAppRevisionName: test-rolling-v1
componentList:
- metrics-provider
rolloutPlan:

View File

@@ -1,118 +0,0 @@
import React from 'react';
import clsx from 'clsx';
import Layout from '@theme/Layout';
import Link from '@docusaurus/Link';
import features from '../data/features'
import Translate, { translate } from '@docusaurus/Translate';
import useDocusaurusContext from '@docusaurus/useDocusaurusContext';
import useBaseUrl from '@docusaurus/useBaseUrl';
import GitHubButton from 'react-github-btn';
import styles from './styles.module.css';
function Feature({ imgUrl, title, description, reverse }) {
return (
<div className={clsx('row', styles.feature, reverse ? styles.featureReverse : '')}>
<div className="col col--3">
<div className="text--center">
{imgUrl && <img className={styles.featureImage} src={useBaseUrl(imgUrl)} alt={title} />}
</div>
</div>
<div className={clsx('col col--9', styles.featureDesc)}>
<div>
<h2>{title}</h2>
<div>{description}</div>
</div>
</div>
</div>
)
}
const Button = ({ children, href }) => {
return (
<div className="col col--2 margin-horiz--sm">
<Link
className="button button--outline button--primary button--lg"
to={href}>
{children}
</Link>
</div>
);
};
export default function Home() {
const context = useDocusaurusContext();
const { siteConfig = {} } = context;
return (
<Layout title={siteConfig.tagline} description={siteConfig.tagline}>
<header className={clsx('hero', styles.hero)}>
<div className="container text--center">
<div className={styles.heroLogoWrapper}>
<img className={styles.heroLogo} src={useBaseUrl('img/logo.svg')} alt="Kubevela Logo" />
</div>
<h2 className={clsx('hero__title', styles.heroTitle)}>{siteConfig.title}</h2>
<GitHubButton
href="https://github.com/oam-dev/kubevela"
data-icon="octicon-star"
data-size="large"
data-show-count="true"
aria-label="Star facebook/metro on GitHub">
Star
</GitHubButton>
<p className="hero__subtitle">{siteConfig.tagline}</p>
<div
className={clsx(styles.heroButtons, 'name', 'margin-vert--md')}>
<Button href={useBaseUrl('docs/quick-start')}>Get Started</Button>
<Button href={useBaseUrl('docs/')}>Learn More</Button>
</div>
</div>
</header>
<WhatIs />
<main className={clsx('hero', styles.hero)}>
<div className="container">
<section className={styles.features}>
<div className="container">
{features.map((f, idx) => (
<Feature key={idx} {...f} />
))}
</div>
</section>
</div>
</main>
</Layout>
);
}
const WhatIs = () => (
<div className={clsx('hero', styles.hero)}>
<div className="container">
<div className="row">
<div className="col col--6">
<h1><Translate>What is KubeVela?</Translate></h1>
<p className="hero__subtitle">
<small>
<strong><Translate>For platform builders</Translate></strong>: <Translate>
KubeVela serves as a framework that empowers them to create user friendly yet highly extensible platforms at ease
</Translate>
<br />
<strong><Translate>For developers</Translate></strong>: <Translate>
such Application abstraction built with KubeVela will enable them to design and ship their applications to Kubernetes with minimal effort.
</Translate>
</small>
</p>
</div>
<div className="col">
<img
className="image"
src={useBaseUrl("img/what-is-kubevela.png")}
align="right"
alt="what is kubevela"
/>
</div>
</div>
</div>
</div>
);

View File

@@ -1,116 +0,0 @@
# Implemented based on Dapr Cli https://github.com/dapr/cli/tree/master/install
param (
[string]$Version,
[string]$VelaRoot = "c:\vela"
)
Write-Output ""
$ErrorActionPreference = 'stop'
#Escape space of VelaRoot path
$VelaRoot = $VelaRoot -replace ' ', '` '
# Constants
$VelaCliFileName = "vela.exe"
$VelaCliFilePath = "${VelaRoot}\${VelaCliFileName}"
# GitHub Org and repo hosting Vela CLI
$GitHubOrg = "oam-dev"
$GitHubRepo = "kubevela"
# Set Github request authentication for basic authentication.
if ($Env:GITHUB_USER) {
$basicAuth = [System.Convert]::ToBase64String([System.Text.Encoding]::ASCII.GetBytes($Env:GITHUB_USER + ":" + $Env:GITHUB_TOKEN));
$githubHeader = @{"Authorization" = "Basic $basicAuth" }
}
else {
$githubHeader = @{}
}
if ((Get-ExecutionPolicy) -gt 'RemoteSigned' -or (Get-ExecutionPolicy) -eq 'ByPass') {
Write-Output "PowerShell requires an execution policy of 'RemoteSigned'."
Write-Output "To make this change please run:"
Write-Output "'Set-ExecutionPolicy RemoteSigned -scope CurrentUser'"
break
}
# Change security protocol to support TLS 1.2 / 1.1 / 1.0 - old powershell uses TLS 1.0 as a default protocol
[Net.ServicePointManager]::SecurityProtocol = "tls12, tls11, tls"
# Check if KubeVela CLI is installed.
if (Test-Path $VelaCliFilePath -PathType Leaf) {
Write-Warning "vela is detected - $VelaCliFilePath"
Invoke-Expression "$VelaCliFilePath --version"
Write-Output "Reinstalling KubeVela..."
}
else {
Write-Output "Installing Vela..."
}
# Create Vela Directory
Write-Output "Creating $VelaRoot directory"
New-Item -ErrorAction Ignore -Path $VelaRoot -ItemType "directory"
if (!(Test-Path $VelaRoot -PathType Container)) {
throw "Cannot create $VelaRoot"
}
# Get the list of release from GitHub
$releases = Invoke-RestMethod -Headers $githubHeader -Uri "https://api.github.com/repos/${GitHubOrg}/${GitHubRepo}/releases" -Method Get
if ($releases.Count -eq 0) {
throw "No releases from github.com/oam-dev/kubevela repo"
}
# Filter windows binary and download archive
$os_arch = "windows-amd64"
if (!$Version) {
$windowsAsset = $releases | Where-Object { $_.tag_name -notlike "*rc*" } | Select-Object -First 1 | Select-Object -ExpandProperty assets | Where-Object { $_.name -Like "*${os_arch}.zip" }
if (!$windowsAsset) {
throw "Cannot find the windows KubeVela CLI binary"
}
$zipFileUrl = $windowsAsset.url
$assetName = $windowsAsset.name
} else {
$assetName = "vela-${Version}-${os_arch}.zip"
$zipFileUrl = "https://github.com/${GitHubOrg}/${GitHubRepo}/releases/download/${Version}/${assetName}"
}
$zipFilePath = $VelaRoot + "\" + $assetName
Write-Output "Downloading $zipFileUrl ..."
$githubHeader.Accept = "application/octet-stream"
Invoke-WebRequest -Headers $githubHeader -Uri $zipFileUrl -OutFile $zipFilePath
if (!(Test-Path $zipFilePath -PathType Leaf)) {
throw "Failed to download Vela Cli binary - $zipFilePath"
}
# Extract KubeVela CLI to $VelaRoot
Write-Output "Extracting $zipFilePath..."
Microsoft.Powershell.Archive\Expand-Archive -Force -Path $zipFilePath -DestinationPath $VelaRoot
$ExtractedVelaCliFilePath = "${VelaRoot}\${os_arch}\${VelaCliFileName}"
Copy-Item $ExtractedVelaCliFilePath -Destination $VelaCliFilePath
if (!(Test-Path $VelaCliFilePath -PathType Leaf)) {
throw "Failed to extract Vela Cli archive - $zipFilePath"
}
# Check the KubeVela CLI version
Invoke-Expression "$VelaCliFilePath --version"
# Clean up zipfile
Write-Output "Clean up $zipFilePath..."
Remove-Item $zipFilePath -Force
# Add VelaRoot directory to User Path environment variable
Write-Output "Try to add $VelaRoot to User Path Environment variable..."
$UserPathEnvironmentVar = [Environment]::GetEnvironmentVariable("PATH", "User")
if ($UserPathEnvironmentVar -like '*vela*') {
Write-Output "Skipping to add $VelaRoot to User Path - $UserPathEnvironmentVar"
}
else {
[System.Environment]::SetEnvironmentVariable("PATH", $UserPathEnvironmentVar + ";$VelaRoot", "User")
$UserPathEnvironmentVar = [Environment]::GetEnvironmentVariable("PATH", "User")
Write-Output "Added $VelaRoot to User Path - $UserPathEnvironmentVar"
}
Write-Output "`r`nKubeVela CLI is installed successfully."
Write-Output "To get started with KubeVela, please visit https://kubevela.io."

View File

@@ -1,189 +0,0 @@
#!/usr/bin/env bash
# Implemented based on Dapr Cli https://github.com/dapr/cli/tree/master/install
# Vela CLI location
: ${VELA_INSTALL_DIR:="/usr/local/bin"}
# sudo is required to copy binary to VELA_INSTALL_DIR for linux
: ${USE_SUDO:="false"}
# Http request CLI
VELA_HTTP_REQUEST_CLI=curl
# GitHub Organization and repo name to download release
GITHUB_ORG=oam-dev
GITHUB_REPO=kubevela
# Vela CLI filename
VELA_CLI_FILENAME=vela
VELA_CLI_FILE="${VELA_INSTALL_DIR}/${VELA_CLI_FILENAME}"
getSystemInfo() {
ARCH=$(uname -m)
case $ARCH in
armv7*) ARCH="arm";;
aarch64) ARCH="arm64";;
x86_64) ARCH="amd64";;
esac
OS=$(echo `uname`|tr '[:upper:]' '[:lower:]')
# Most linux distro needs root permission to copy the file to /usr/local/bin
if [ "$OS" == "linux" ] && [ "$VELA_INSTALL_DIR" == "/usr/local/bin" ]; then
USE_SUDO="true"
fi
}
verifySupported() {
local supported=(darwin-amd64 linux-amd64 linux-arm linux-arm64)
local current_osarch="${OS}-${ARCH}"
for osarch in "${supported[@]}"; do
if [ "$osarch" == "$current_osarch" ]; then
echo "Your system is ${OS}_${ARCH}"
return
fi
done
echo "No prebuilt binary for ${current_osarch}"
exit 1
}
runAsRoot() {
local CMD="$*"
if [ $EUID -ne 0 -a $USE_SUDO = "true" ]; then
CMD="sudo $CMD"
fi
$CMD
}
checkHttpRequestCLI() {
if type "curl" > /dev/null; then
VELA_HTTP_REQUEST_CLI=curl
elif type "wget" > /dev/null; then
VELA_HTTP_REQUEST_CLI=wget
else
echo "Either curl or wget is required"
exit 1
fi
}
checkExistingVela() {
if [ -f "$VELA_CLI_FILE" ]; then
echo -e "\nVela CLI is detected:"
$VELA_CLI_FILE --version
echo -e "Reinstalling Vela CLI - ${VELA_CLI_FILE}...\n"
else
echo -e "Installing Vela CLI...\n"
fi
}
getLatestRelease() {
local velaReleaseUrl="https://api.github.com/repos/${GITHUB_ORG}/${GITHUB_REPO}/releases"
local latest_release=""
if [ "$VELA_HTTP_REQUEST_CLI" == "curl" ]; then
latest_release=$(curl -s $velaReleaseUrl | grep \"tag_name\" | grep -v rc | awk 'NR==1{print $2}' | sed -n 's/\"\(.*\)\",/\1/p')
else
latest_release=$(wget -q --header="Accept: application/json" -O - $velaReleaseUrl | grep \"tag_name\" | grep -v rc | awk 'NR==1{print $2}' | sed -n 's/\"\(.*\)\",/\1/p')
fi
ret_val=$latest_release
}
downloadFile() {
LATEST_RELEASE_TAG=$1
VELA_CLI_ARTIFACT="${VELA_CLI_FILENAME}-${LATEST_RELEASE_TAG}-${OS}-${ARCH}.tar.gz"
# convert `-` to `_` to let it work
DOWNLOAD_BASE="https://github.com/${GITHUB_ORG}/${GITHUB_REPO}/releases/download"
DOWNLOAD_URL="${DOWNLOAD_BASE}/${LATEST_RELEASE_TAG}/${VELA_CLI_ARTIFACT}"
# Create the temp directory
VELA_TMP_ROOT=$(mktemp -dt vela-install-XXXXXX)
ARTIFACT_TMP_FILE="$VELA_TMP_ROOT/$VELA_CLI_ARTIFACT"
echo "Downloading $DOWNLOAD_URL ..."
if [ "$VELA_HTTP_REQUEST_CLI" == "curl" ]; then
curl -SsL "$DOWNLOAD_URL" -o "$ARTIFACT_TMP_FILE"
else
wget -q -O "$ARTIFACT_TMP_FILE" "$DOWNLOAD_URL"
fi
if [ ! -f "$ARTIFACT_TMP_FILE" ]; then
echo "failed to download $DOWNLOAD_URL ..."
exit 1
fi
}
installFile() {
tar xf "$ARTIFACT_TMP_FILE" -C "$VELA_TMP_ROOT"
local tmp_root_vela_cli="$VELA_TMP_ROOT/${OS}-${ARCH}/$VELA_CLI_FILENAME"
if [ ! -f "$tmp_root_vela_cli" ]; then
echo "Failed to unpack Vela CLI executable."
exit 1
fi
chmod o+x $tmp_root_vela_cli
runAsRoot cp "$tmp_root_vela_cli" "$VELA_INSTALL_DIR"
if [ -f "$VELA_CLI_FILE" ]; then
echo "$VELA_CLI_FILENAME installed into $VELA_INSTALL_DIR successfully."
$VELA_CLI_FILE --version
else
echo "Failed to install $VELA_CLI_FILENAME"
exit 1
fi
}
fail_trap() {
result=$?
if [ "$result" != "0" ]; then
echo "Failed to install Vela CLI"
echo "For support, go to https://kubevela.io"
fi
cleanup
exit $result
}
cleanup() {
if [[ -d "${VELA_TMP_ROOT:-}" ]]; then
rm -rf "$VELA_TMP_ROOT"
fi
}
installCompleted() {
echo -e "\nTo get started with KubeVela, please visit https://kubevela.io"
}
# -----------------------------------------------------------------------------
# main
# -----------------------------------------------------------------------------
trap "fail_trap" EXIT
getSystemInfo
verifySupported
checkExistingVela
checkHttpRequestCLI
if [ -z "$1" ]; then
echo "Getting the latest Vela CLI..."
getLatestRelease
else
ret_val=v$1
fi
echo "Installing $ret_val Vela CLI..."
downloadFile $ret_val
installFile
cleanup
installCompleted

View File

@@ -21,7 +21,7 @@ module.exports = {
label: 'Rollout Features',
items:[
"rollout/rollout",
"rollout/appdeploy"
'rollout/appdeploy'
]
},
{
@@ -63,27 +63,33 @@ module.exports = {
'kube/trait',
]
},
{
'Defining Traits': [
'cue/trait',
'cue/patch-trait',
'cue/status',
'cue/advanced',
]
},
{
'Defining Cloud Service': [
'platform-engineers/cloud-services',
]
},
{
'Hands-on Lab': [
'platform-engineers/debug-test-cue',
'platform-engineers/keda'
]
}
]
}
},
{
type: 'category',
label: 'Defining Traits',
items: [
'cue/trait',
'cue/patch-trait',
'cue/status',
'cue/advanced',
]
},
{
type: 'category',
label: 'Defining Cloud Service',
items: [
'platform-engineers/cloud-services'
]
},
{
type: 'category',
label: 'Hands-on Lab',
items: [
'platform-engineers/debug-test-cue',
'platform-engineers/keda'
]
},
],
},
{

View File

@@ -64,16 +64,6 @@ func fixNewSchemaValidationCheck(crds []string) error {
}
ioutil.WriteFile(crd, []byte(strings.Join(newData, "\n")), 0644)
}
// fix issue https://github.com/oam-dev/kubevela/issues/993
if strings.HasSuffix(crd, "legacy/charts/vela-core-legacy/crds/standard.oam.dev_routes.yaml") {
for _, line := range strings.Split(string(data), "\n") {
if strings.Contains(line, "default: Issuer") {
continue
}
newData = append(newData, line)
}
ioutil.WriteFile(crd, []byte(strings.Join(newData, "\n")), 0644)
}
}
return nil
}

87
hack/website/release.sh Normal file
View File

@@ -0,0 +1,87 @@
#!/bin/bash -l
#
# Copyright 2021. The KubeVela Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
set -e
if [[ -n "$SSH_DEPLOY_KEY" ]]
then
mkdir -p ~/.ssh
echo "$SSH_DEPLOY_KEY" > ~/.ssh/id_rsa
chmod 600 ~/.ssh/id_rsa
fi
echo "git clone"
git config --global user.email "yangsoonlx@gmail.com"
git config --global user.name "kubevela-bot"
git clone --single-branch --depth 1 git@github.com:oam-dev/kubevela.io.git git-page
echo "sidebars updates"
cat docs/sidebars.js > git-page/sidebars.js
echo "clear en docs"
rm -r git-page/docs/*
echo "clear zh docs"
rm -r git-page/i18n/zh/docusaurus-plugin-content-docs/*
echo "clear resources"
rm -r git-page/resources/*
echo "update resources"
cp -R docs/resources/* git-page/resources/
echo "update docs"
cp -R docs/en/* git-page/docs/
cp -R docs/zh-CN/* git-page/i18n/zh/docusaurus-plugin-content-docs/
echo "git push"
cd git-page
# Check for release only
SUB='release-'
if [[ "$VERSION" == *"$SUB"* ]]
then
# release-x.y -> vx.y
version=$(echo $VERSION|sed -e 's/\/*.*\/*-/v/g')
echo "updating website for version $version"
if grep -q $version versions.json; then
rm -r versioned_docs/version-${version}/
rm versioned_sidebars/version-${version}-sidebars.json
sed -i.bak "/${version}/d" versions.json
rm versions.json.bak
fi
yarn add nodejieba
if [ -e yarn.lock ]; then
yarn install --frozen-lockfile
elif [ -e package-lock.json ]; then
npm ci
else
npm i
fi
yarn run docusaurus docs:version $version
fi
if git diff --quiet
then
echo "nothing need to push, finished!"
else
git add .
git commit -m "sync commit $COMMIT_ID from kubevela-$VERSION"
git push origin main
fi

View File

@@ -1,4 +1,4 @@
#!/bin/sh -l
#!/bin/bash -l
#
# Copyright 2021. The KubeVela Authors.
#
@@ -24,12 +24,6 @@ git clone --single-branch --depth 1 https://github.com/oam-dev/kubevela.io.git g
echo "sidebars updates"
cat ${docs_path}/sidebars.js > git-page/sidebars.js
echo "docusaurus.config updates"
cat ${docs_path}/docusaurus.config.js > git-page/docusaurus.config.js
echo "index info updates"
cat ${docs_path}/index.js > git-page/src/pages/index.js
echo "clear en docs"
rm -r git-page/docs/*
echo "clear zh docs"
@@ -58,4 +52,32 @@ npm i
fi
echo "run build"
npm run build
npm run build
# Check for release only
SUB='release-'
if [[ "$VERSION" == *"$SUB"* ]]
then
# release-x.y -> vx.y
version=$(echo $VERSION|sed -e 's/\/*.*\/*-/v/g')
echo "updating website for version $version"
if grep -q $version versions.json; then
rm -r versioned_docs/version-${version}/
rm versioned_sidebars/version-${version}-sidebars.json
sed -i.bak "/${version}/d" versions.json
rm versions.json.bak
fi
yarn add nodejieba
if [ -e yarn.lock ]; then
yarn install --frozen-lockfile
elif [ -e package-lock.json ]; then
npm ci
else
npm i
fi
yarn run docusaurus docs:version $version
fi

View File

@@ -14,6 +14,8 @@ spec:
kind: AppDeployment
listKind: AppDeploymentList
plural: appdeployments
shortNames:
- appdeploy
singular: appdeployment
scope: Namespaced
subresources:

View File

@@ -9,9 +9,14 @@ metadata:
spec:
group: core.oam.dev
names:
categories:
- oam
kind: ApplicationRevision
listKind: ApplicationRevisionList
plural: applicationrevisions
shortNames:
- apprev
- revisions
singular: applicationrevision
scope: Namespaced
validation:

View File

@@ -14,6 +14,8 @@ spec:
kind: Application
listKind: ApplicationList
plural: applications
shortNames:
- apps
singular: application
scope: Namespaced
validation:

View File

@@ -7,6 +7,25 @@ metadata:
controller-gen.kubebuilder.io/version: v0.2.4
name: approllouts.core.oam.dev
spec:
additionalPrinterColumns:
- JSONPath: .status.rolloutTargetSize
name: TARGET
type: string
- JSONPath: .status.upgradedReplicas
name: UPGRADED
type: string
- JSONPath: .status.upgradedReadyReplicas
name: READY
type: string
- JSONPath: .status.batchRollingState
name: BATCH-STATE
type: string
- JSONPath: .status.rollingState
name: ROLLING-STATE
type: string
- JSONPath: .metadata.creationTimestamp
name: AGE
type: date
group: core.oam.dev
names:
categories:
@@ -14,6 +33,9 @@ spec:
kind: AppRollout
listKind: AppRolloutList
plural: approllouts
shortNames:
- approllout
- rollout
singular: approllout
scope: Namespaced
validation:
@@ -37,7 +59,7 @@ spec:
type: string
type: array
revertOnDelete:
description: RevertOnDelete revert the rollout when the rollout CR is deleted It will remove the target app from the kubernetes if it's set to true
description: RevertOnDelete revert the failed rollout when the rollout CR is deleted It will revert the change back to the source version at once (not in batches) Default is false
type: boolean
rolloutPlan:
description: RolloutPlan is the details on how to rollout the resources

View File

@@ -7,6 +7,16 @@ metadata:
controller-gen.kubebuilder.io/version: v0.2.4
name: componentdefinitions.core.oam.dev
spec:
additionalPrinterColumns:
- JSONPath: .spec.workload.definition.kind
name: WORKLOAD-KIND
type: string
- JSONPath: .metadata.annotations.definition\.oam\.dev/description
name: DESCRIPTION
type: string
- JSONPath: .metadata.creationTimestamp
name: AGE
type: date
group: core.oam.dev
names:
categories:
@@ -14,6 +24,8 @@ spec:
kind: ComponentDefinition
listKind: ComponentDefinitionList
plural: componentdefinitions
shortNames:
- comp
singular: componentdefinition
scope: Namespaced
subresources:

View File

@@ -8,9 +8,15 @@ metadata:
name: traitdefinitions.core.oam.dev
spec:
additionalPrinterColumns:
- JSONPath: .spec.definitionRef.name
name: DEFINITION-NAME
- JSONPath: .spec.appliesToWorkloads
name: APPLIES-TO
type: string
- JSONPath: .metadata.annotations.definition\.oam\.dev/description
name: DESCRIPTION
type: string
- JSONPath: .metadata.creationTimestamp
name: AGE
type: date
group: core.oam.dev
names:
categories:
@@ -18,6 +24,8 @@ spec:
kind: TraitDefinition
listKind: TraitDefinitionList
plural: traitdefinitions
shortNames:
- trait
singular: traitdefinition
scope: Namespaced
subresources:

View File

@@ -7,10 +7,6 @@ metadata:
controller-gen.kubebuilder.io/version: v0.2.4
name: workloaddefinitions.core.oam.dev
spec:
additionalPrinterColumns:
- JSONPath: .spec.definitionRef.name
name: DEFINITION-NAME
type: string
group: core.oam.dev
names:
categories:
@@ -18,6 +14,8 @@ spec:
kind: WorkloadDefinition
listKind: WorkloadDefinitionList
plural: workloaddefinitions
shortNames:
- workload
singular: workloaddefinition
scope: Namespaced
subresources: {}

View File

@@ -208,9 +208,13 @@ func handleItemsOfArrayType(t map[string]interface{}) {
}
if t["type"] == "array" {
if i, ok := t["items"].([]interface{}); ok {
itemSpec, _ := i[0].(map[string]interface{})
itemSpec["enum"] = nil
t["items"] = itemSpec
if len(i) > 0 {
if itemSpec, ok := i[0].(map[string]interface{}); ok {
handleItemsOfArrayType(itemSpec)
itemSpec["enum"] = nil
t["items"] = itemSpec
}
}
}
}
}

View File

@@ -54,7 +54,7 @@ func TestGenerateSchemaFromValues(t *testing.T) {
func TestGetChartValuesJSONSchema(t *testing.T) {
testHelm := testData("podinfo", "5.1.4", "http://oam.dev/catalog")
wantSchema, err := ioutil.ReadFile("./testdata/values.schema.json")
wantSchema, err := ioutil.ReadFile("./testdata/podinfo.values.schema.json")
if err != nil {
t.Error(err, "cannot load expected data")
}
@@ -241,6 +241,37 @@ func TestMakeSwaggerCompatible(t *testing.T) {
}}`,
want: `{"objectArray":{"items":{"enum":null,"properties":{"f0":{"enum":["v0"],"type":"string"},"f1":{"enum":["v1"],"type":"string"},"f2":{"enum":["v2"],"type":"string"}},"required":["f0","f1","f2"],"type":"object"},"type":"array"}}`,
},
{
caseName: "object type array embeds object type array",
testdata: `{
"objectArray": {
"type": "array",
"items": [
{
"type": "array",
"items": [
{
"type": "object",
"required": [
"f0"
],
"properties": {
"f0": {
"type": "string",
"enum": [
"v0"
]
}
}
}
]
}
]
}
}
`,
want: `{"objectArray":{"items":{"enum":null,"items":{"enum":null,"properties":{"f0":{"enum":["v0"],"type":"string"}},"required":["f0"],"type":"object"},"type":"array"},"type":"array"}}`,
},
}
for _, tc := range tests {

View File

@@ -0,0 +1,345 @@
{
"properties": {
"affinity": {
"type": "object"
},
"backend": {
"nullable": true
},
"backends": {
"default": [],
"type": "array"
},
"cache": {
"default": "",
"description": "Redis address in the format <host>:<port>",
"type": "string"
},
"certificate": {
"description": "create a certificate manager certificate",
"properties": {
"create": {
"default": false,
"type": "boolean"
},
"dnsNames": {
"default": [
"podinfo"
],
"description": "the hostname / subject alternative names for the certificate",
"items": {
"type": "string"
},
"type": "array"
},
"issuerRef": {
"description": "the issuer used to issue the certificate",
"properties": {
"kind": {
"default": "ClusterIssuer",
"type": "string"
},
"name": {
"default": "self-signed",
"type": "string"
}
},
"type": "object"
}
},
"type": "object"
},
"faults": {
"properties": {
"delay": {
"default": false,
"type": "boolean"
},
"error": {
"default": false,
"type": "boolean"
},
"testFail": {
"default": false,
"type": "boolean"
},
"testTimeout": {
"default": false,
"type": "boolean"
},
"unhealthy": {
"default": false,
"type": "boolean"
},
"unready": {
"default": false,
"type": "boolean"
}
},
"type": "object"
},
"h2c": {
"properties": {
"enabled": {
"default": false,
"type": "boolean"
}
},
"type": "object"
},
"hpa": {
"description": "metrics-server add-on required",
"properties": {
"cpu": {
"description": "average total CPU usage per pod (1-100)",
"nullable": true
},
"enabled": {
"default": false,
"type": "boolean"
},
"maxReplicas": {
"default": 10,
"type": "integer"
},
"memory": {
"description": "average memory usage per pod (100Mi-1Gi)",
"nullable": true
},
"requests": {
"description": "average http requests per second per pod (k8s-prometheus-adapter)",
"nullable": true
}
},
"type": "object"
},
"image": {
"properties": {
"pullPolicy": {
"default": "IfNotPresent",
"type": "string"
},
"repository": {
"default": "ghcr.io/stefanprodan/podinfo",
"type": "string"
},
"tag": {
"default": "5.1.4",
"type": "string"
}
},
"type": "object"
},
"ingress": {
"properties": {
"annotations": {
"type": "object"
},
"enabled": {
"default": false,
"type": "boolean"
},
"hosts": {
"default": [],
"type": "array"
},
"path": {
"default": "/*",
"description": "kubernetes.io/ingress.class: nginx\nkubernetes.io/tls-acme: \"true\"",
"type": "string"
},
"tls": {
"default": [],
"description": "- podinfo.local",
"type": "array"
}
},
"type": "object"
},
"linkerd": {
"properties": {
"profile": {
"properties": {
"enabled": {
"default": false,
"type": "boolean"
}
},
"type": "object"
}
},
"type": "object"
},
"logLevel": {
"default": "info",
"type": "string"
},
"nodeSelector": {
"type": "object"
},
"podAnnotations": {
"type": "object"
},
"redis": {
"description": "Redis deployment",
"properties": {
"enabled": {
"default": false,
"type": "boolean"
},
"repository": {
"default": "redis",
"type": "string"
},
"tag": {
"default": "6.0.8",
"type": "string"
}
},
"type": "object"
},
"replicaCount": {
"default": 1,
"type": "integer"
},
"resources": {
"properties": {
"limits": {
"nullable": true
},
"requests": {
"properties": {
"cpu": {
"default": "1m",
"type": "string"
},
"memory": {
"default": "16Mi",
"type": "string"
}
},
"type": "object"
}
},
"type": "object"
},
"service": {
"properties": {
"enabled": {
"default": true,
"type": "boolean"
},
"externalPort": {
"default": 9898,
"type": "integer"
},
"grpcPort": {
"default": 9999,
"type": "integer"
},
"grpcService": {
"default": "podinfo",
"type": "string"
},
"hostPort": {
"description": "the port used to bind the http port to the host\nNOTE: requires privileged container with NET_BIND_SERVICE capability -- this is useful for testing\nin local clusters such as kind without port forwarding",
"nullable": true
},
"httpPort": {
"default": 9898,
"type": "integer"
},
"metricsPort": {
"default": 9797,
"type": "integer"
},
"nodePort": {
"default": 31198,
"type": "integer"
},
"type": {
"default": "ClusterIP",
"type": "string"
}
},
"type": "object"
},
"serviceAccount": {
"properties": {
"enabled": {
"default": false,
"description": "Specifies whether a service account should be created",
"type": "boolean"
},
"name": {
"description": "The name of the service account to use.\nIf not set and create is true, a name is generated using the fullname template",
"nullable": true
}
},
"type": "object"
},
"serviceMonitor": {
"properties": {
"enabled": {
"default": false,
"type": "boolean"
},
"interval": {
"default": "15s",
"type": "string"
}
},
"type": "object"
},
"tls": {
"description": "enable tls on the podinfo service",
"properties": {
"certPath": {
"default": "/data/cert",
"description": "the path where the certificate key pair will be mounted",
"type": "string"
},
"enabled": {
"default": false,
"type": "boolean"
},
"hostPort": {
"description": "the port used to bind the tls port to the host\nNOTE: requires privileged container with NET_BIND_SERVICE capability -- this is useful for testing\nin local clusters such as kind without port forwarding",
"nullable": true
},
"port": {
"default": 9899,
"description": "the port used to host the tls endpoint on the service",
"type": "integer"
},
"secretName": {
"description": "the name of the secret used to mount the certificate key pair",
"nullable": true
}
},
"type": "object"
},
"tolerations": {
"default": [],
"type": "array"
},
"ui": {
"properties": {
"color": {
"default": "#34577c",
"type": "string"
},
"logo": {
"default": "",
"type": "string"
},
"message": {
"default": "",
"type": "string"
}
},
"type": "object"
}
},
"type": "object"
}

View File

@@ -140,17 +140,32 @@
"type": "boolean"
},
"hosts": {
"default": [],
"type": "array"
},
"path": {
"default": "/*",
"description": "kubernetes.io/ingress.class: nginx\nkubernetes.io/tls-acme: \"true\"",
"type": "string"
"items": {
"properties": {
"host": {
"default": "chart-example.local",
"type": "string"
},
"paths": {
"items": {
"properties": {
"path": {
"default": "/",
"type": "string"
}
},
"type": "object"
},
"type": "array"
}
},
"type": "object"
},
"type": "array"
},
"tls": {
"default": [],
"description": "- podinfo.local",
"type": "array"
}
},

View File

@@ -104,9 +104,10 @@ ingress:
annotations: {}
# kubernetes.io/ingress.class: nginx
# kubernetes.io/tls-acme: "true"
path: /*
hosts: []
# - podinfo.local
hosts:
- host: chart-example.local
paths:
- path: /
tls: []
# - secretName: chart-example-tls
# hosts:

View File

@@ -59,10 +59,6 @@ func NewRolloutPlanController(client client.Client, parentController oam.Object,
rolloutSpec *v1alpha1.RolloutPlan, rolloutStatus *v1alpha1.RolloutStatus,
targetWorkload, sourceWorkload *unstructured.Unstructured) *Controller {
initializedRolloutStatus := rolloutStatus.DeepCopy()
// use Mutation webhook?
if len(initializedRolloutStatus.RollingState) == 0 {
initializedRolloutStatus.ResetStatus()
}
if len(initializedRolloutStatus.BatchRollingState) == 0 {
initializedRolloutStatus.BatchRollingState = v1alpha1.BatchInitializingState
}
@@ -138,7 +134,7 @@ func (r *Controller) Reconcile(ctx context.Context) (res reconcile.Result, statu
case v1alpha1.RollingInBatchesState:
r.reconcileBatchInRolling(ctx, workloadController)
case v1alpha1.RolloutFailingState, v1alpha1.RolloutAbandoningState:
case v1alpha1.RolloutFailingState, v1alpha1.RolloutAbandoningState, v1alpha1.RolloutDeletingState:
if succeed := workloadController.Finalize(ctx, false); succeed {
r.finalizeRollout(ctx)
}

View File

@@ -222,6 +222,10 @@ func (c *CloneSetController) Finalize(ctx context.Context, succeed bool) bool {
newOwnerList = append(newOwnerList, owner)
}
c.cloneSet.SetOwnerReferences(newOwnerList)
// pause the resource when the rollout failed so we can try again next time
if !succeed {
c.cloneSet.Spec.UpdateStrategy.Paused = true
}
// patch the CloneSet
if err := c.client.Patch(ctx, c.cloneSet, clonePatch, client.FieldOwner(c.parentController.GetUID())); err != nil {
c.recorder.Event(c.parentController, event.Warning("Failed to the finalize the cloneset", err))

View File

@@ -16,12 +16,6 @@ limitations under the License.
package common
import (
"reflect"
v1 "k8s.io/api/core/v1"
)
const (
// AutoscaleControllerName is the controller name of Trait autoscale
AutoscaleControllerName = "autoscale"
@@ -38,9 +32,3 @@ const (
// DisableNoneCaps disable none of capabilities
DisableNoneCaps = ""
)
// ServiceKind is string "Service"
var ServiceKind = reflect.TypeOf(v1.Service{}).Name()
// ServiceAPIVersion is string "v1"
var ServiceAPIVersion = v1.SchemeGroupVersion.String()

View File

@@ -273,7 +273,7 @@ spec:
return 0
}
return traitObj.GetGeneration()
}, 5*time.Second, time.Second).Should(Equal(int64(2)))
}, 20*time.Second, time.Second).Should(Equal(int64(2)))
By("Check labels are removed")
_, found, _ := unstructured.NestedString(traitObj.UnstructuredContent(), "metadata", "labels", "test.label")

View File

@@ -24,8 +24,6 @@ import (
"strconv"
"strings"
types2 "github.com/oam-dev/kubevela/apis/types"
runtimev1alpha1 "github.com/crossplane/crossplane-runtime/apis/core/v1alpha1"
"github.com/crossplane/crossplane-runtime/pkg/fieldpath"
"github.com/crossplane/crossplane-runtime/pkg/resource"
@@ -42,6 +40,7 @@ import (
"github.com/oam-dev/kubevela/apis/core.oam.dev/v1alpha2"
"github.com/oam-dev/kubevela/apis/core.oam.dev/v1beta1"
oamtype "github.com/oam-dev/kubevela/apis/types"
helmapi "github.com/oam-dev/kubevela/pkg/appfile/helm/flux2apis"
"github.com/oam-dev/kubevela/pkg/controller/common"
"github.com/oam-dev/kubevela/pkg/controller/utils"
@@ -115,11 +114,15 @@ func (r *components) Render(ctx context.Context, ac *v1alpha2.ApplicationConfigu
rollingComponents[componentName] = true
}
}
// we need to do a template roll out if it's not done yet
needRolloutTemplate = ac.Status.RollingStatus != types2.RollingTemplated
} else if ac.Status.RollingStatus == types2.RollingTemplated {
// we need to do a template roll out if it's not done yet or forced
needRolloutTemplate = ac.Status.RollingStatus != oamtype.RollingTemplated
if needRolloutTemplate {
klog.InfoS("need to template the ac ", "appConfig", klog.KRef(ac.Namespace, ac.Name),
"rolling status", ac.Status.RollingStatus)
}
} else if ac.Status.RollingStatus == oamtype.RollingTemplated {
klog.InfoS("mark the ac rolling status as completed", "appConfig", klog.KRef(ac.Namespace, ac.Name))
ac.Status.RollingStatus = types2.RollingCompleted
ac.Status.RollingStatus = oamtype.RollingCompleted
}
for _, acc := range ac.Spec.Components {
@@ -133,7 +136,7 @@ func (r *components) Render(ctx context.Context, ac *v1alpha2.ApplicationConfigu
}
workloads = append(workloads, w)
if isComponentRolling && needRolloutTemplate {
ac.Status.RollingStatus = types2.RollingTemplating
ac.Status.RollingStatus = oamtype.RollingTemplating
}
}
workloadsAllClear := true
@@ -151,9 +154,9 @@ func (r *components) Render(ctx context.Context, ac *v1alpha2.ApplicationConfigu
res = append(res, *workloads[i])
}
// set the ac rollingStatus to be RollingTemplated if all workloads are going to be applied
if workloadsAllClear && ac.Status.RollingStatus == types2.RollingTemplating {
if workloadsAllClear && ac.Status.RollingStatus == oamtype.RollingTemplating {
klog.InfoS("mark the ac rolling status as templated", "appConfig", klog.KRef(ac.Namespace, ac.Name))
ac.Status.RollingStatus = types2.RollingTemplated
ac.Status.RollingStatus = oamtype.RollingTemplated
}
return res, ds, nil

View File

@@ -23,8 +23,6 @@ import (
"strconv"
"testing"
types2 "github.com/oam-dev/kubevela/apis/types"
"github.com/crossplane/crossplane-runtime/apis/core/v1alpha1"
"github.com/crossplane/crossplane-runtime/pkg/fieldpath"
"github.com/crossplane/crossplane-runtime/pkg/test"
@@ -46,6 +44,7 @@ import (
"github.com/oam-dev/kubevela/apis/core.oam.dev/common"
"github.com/oam-dev/kubevela/apis/core.oam.dev/v1alpha2"
"github.com/oam-dev/kubevela/apis/core.oam.dev/v1beta1"
oamtype "github.com/oam-dev/kubevela/apis/types"
helmapi "github.com/oam-dev/kubevela/pkg/appfile/helm/flux2apis"
"github.com/oam-dev/kubevela/pkg/oam"
"github.com/oam-dev/kubevela/pkg/oam/mock"
@@ -112,8 +111,11 @@ func TestRender(t *testing.T) {
oam.AnnotationRollingComponent: componentName,
"keep": strconv.FormatBool(true),
})
controlledNoneTemplateAC := controlledTemplateAC.DeepCopy()
controlledNoneTemplateAC.Status.RollingStatus = types2.RollingTemplated
controlledTemplatedAC := controlledTemplateAC.DeepCopy()
controlledTemplatedAC.Status.RollingStatus = oamtype.RollingTemplated
// ac will render template again if the status is not templated
controlledForceTemplateAC := controlledTemplatedAC.DeepCopy()
controlledForceTemplateAC.Status.RollingStatus = oamtype.RollingTemplating
ref := metav1.NewControllerRef(ac, v1alpha2.ApplicationConfigurationGroupVersionKind)
errTrait := errors.New("errTrait")
@@ -763,7 +765,7 @@ func TestRender(t *testing.T) {
return t, nil
}),
},
args: args{ac: controlledNoneTemplateAC},
args: args{ac: controlledTemplatedAC},
want: want{
w: []Workload{
{
@@ -785,18 +787,96 @@ func TestRender(t *testing.T) {
},
},
},
"Success-With-Force-Template-Deployment": {
reason: "We force render the workload as long as the status is not templated",
fields: fields{
client: &test.MockClient{MockGet: test.NewMockGetFn(nil, func(obj runtime.Object) error {
switch defObj := obj.(type) {
case *v1alpha2.Component:
ccomp := v1alpha2.Component{
Status: v1alpha2.ComponentStatus{
LatestRevision: &common.Revision{Name: revisionName2},
},
}
ccomp.DeepCopyInto(defObj)
case *v1alpha2.TraitDefinition:
ttrait := v1alpha2.TraitDefinition{ObjectMeta: metav1.ObjectMeta{Name: traitName},
Spec: v1alpha2.TraitDefinitionSpec{RevisionEnabled: true}}
ttrait.DeepCopyInto(defObj)
case *v1.ControllerRevision:
rev := &v1.ControllerRevision{
ObjectMeta: metav1.ObjectMeta{Name: revisionName, Namespace: namespace},
Data: runtime.RawExtension{Object: &v1alpha2.Component{
ObjectMeta: metav1.ObjectMeta{
Name: componentName,
Namespace: namespace,
},
Spec: v1alpha2.ComponentSpec{
Workload: runtime.RawExtension{
Object: &unstructured.Unstructured{},
},
},
Status: v1alpha2.ComponentStatus{
LatestRevision: &common.Revision{Name: revisionName2},
},
}},
Revision: 2,
}
rev.DeepCopyInto(defObj)
}
return nil
})},
params: ParameterResolveFn(func(_ []v1alpha2.ComponentParameter, _ []v1alpha2.ComponentParameterValue) ([]Parameter, error) {
return nil, nil
}),
workload: ResourceRenderFn(func(_ []byte, _ ...Parameter) (*unstructured.Unstructured, error) {
w := &unstructured.Unstructured{
Object: map[string]interface{}{
"apiVersion": "apps/v1",
"kind": "Deployment",
},
}
return w, nil
}),
trait: ResourceRenderFn(func(_ []byte, _ ...Parameter) (*unstructured.Unstructured, error) {
t := &unstructured.Unstructured{}
t.SetName(traitName)
return t, nil
}),
},
args: args{ac: controlledForceTemplateAC},
want: want{
w: []Workload{
{
SkipApply: false,
ComponentName: componentName,
ComponentRevisionName: revisionName2,
Workload: func() *unstructured.Unstructured {
w := &unstructured.Unstructured{}
w.SetNamespace(namespace)
w.SetName(revisionName2)
w.SetOwnerReferences([]metav1.OwnerReference{*ref})
w.SetAnnotations(map[string]string{
oam.AnnotationAppGeneration: "0",
})
return w
}(),
RevisionEnabled: true,
},
},
},
},
}
for name, tc := range cases {
t.Run(name, func(t *testing.T) {
r := &components{tc.fields.client, mock.NewMockDiscoveryMapper(), tc.fields.params,
tc.fields.workload, tc.fields.trait}
needTemplating := tc.args.ac.Status.RollingStatus != types2.RollingTemplated
needTemplating := tc.args.ac.Status.RollingStatus != oamtype.RollingTemplated
_, isRolling := tc.args.ac.GetAnnotations()[oam.AnnotationAppRollout]
got, _, err := r.Render(context.Background(), tc.args.ac)
if diff := cmp.Diff(tc.want.err, err, test.EquateErrors()); diff != "" {
t.Errorf("\n%s\nr.Render(...): -want error, +got error:\n%s\n", tc.reason, diff)
}
if isControlledByApp(tc.args.ac) {
// test the case of application generated AC
if diff := cmp.Diff(tc.want.w[0].ComponentName, got[0].ComponentName); diff != "" {
@@ -808,11 +888,11 @@ func TestRender(t *testing.T) {
if diff := cmp.Diff(tc.want.w[0].Workload.GetName(), got[0].Workload.GetName()); diff != "" {
t.Errorf("\n%s\nr.Render(...): -want, +got:\n%s\n", tc.reason, diff)
}
if _, exit := got[0].Workload.GetAnnotations()[oam.AnnotationAppRollout]; exit {
if _, exist := got[0].Workload.GetAnnotations()[oam.AnnotationAppRollout]; exist {
t.Errorf("\n%s\nr.Render(...) workload should not get annotation:%s\n", tc.reason,
oam.AnnotationAppRollout)
}
if _, exit := got[0].Workload.GetAnnotations()[oam.AnnotationRollingComponent]; exit {
if _, exist := got[0].Workload.GetAnnotations()[oam.AnnotationRollingComponent]; exist {
t.Errorf("\n%s\nr.Render(...) workload should not get annotation:%s\n", tc.reason,
oam.AnnotationRollingComponent)
}
@@ -820,7 +900,7 @@ func TestRender(t *testing.T) {
t.Errorf("\n%s\nr.Render(...) workload should get annotation:%s\n", tc.reason,
"keep")
}
if _, exit := got[0].Traits[0].Object.GetAnnotations()[oam.AnnotationRollingComponent]; exit {
if _, exist := got[0].Traits[0].Object.GetAnnotations()[oam.AnnotationRollingComponent]; exist {
t.Errorf("\n%s\nr.Render(...): trait should not get annotation:%s\n", tc.reason,
oam.AnnotationRollingComponent)
}
@@ -836,7 +916,7 @@ func TestRender(t *testing.T) {
if got[0].SkipApply {
t.Errorf("\n%s\nr.Render(...): template workload should not be skipped\n", tc.reason)
}
if tc.args.ac.Status.RollingStatus != types2.RollingTemplated {
if tc.args.ac.Status.RollingStatus != oamtype.RollingTemplated {
t.Errorf("\n%s\nr.Render(...): ac status should be templated but got %s\n", tc.reason,
ac.Status.RollingStatus)
}

View File

@@ -24,8 +24,6 @@ import (
types2 "github.com/oam-dev/kubevela/apis/types"
"github.com/oam-dev/kubevela/apis/core.oam.dev/v1beta1"
"github.com/crossplane/crossplane-runtime/pkg/event"
"github.com/crossplane/crossplane-runtime/pkg/logging"
"github.com/pkg/errors"
@@ -129,7 +127,6 @@ func (r *Reconciler) SetupWithManager(mgr ctrl.Manager, compHandler *ac.Componen
return ctrl.NewControllerManagedBy(mgr).
For(&v1alpha2.ApplicationContext{}).
Watches(&source.Kind{Type: &v1alpha2.Component{}}, compHandler).
Owns(&v1beta1.Application{}).
Complete(r)
}

View File

@@ -21,15 +21,14 @@ import (
"strconv"
"time"
"github.com/oam-dev/kubevela/apis/types"
"github.com/crossplane/crossplane-runtime/pkg/event"
"github.com/crossplane/crossplane-runtime/pkg/logging"
"github.com/crossplane/crossplane-runtime/pkg/meta"
"github.com/pkg/errors"
apierrors "k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/client-go/util/retry"
"k8s.io/klog/v2"
"k8s.io/kubectl/pkg/util/slice"
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/reconcile"
@@ -37,6 +36,7 @@ import (
oamv1alpha2 "github.com/oam-dev/kubevela/apis/core.oam.dev/v1alpha2"
"github.com/oam-dev/kubevela/apis/core.oam.dev/v1beta1"
"github.com/oam-dev/kubevela/apis/standard.oam.dev/v1alpha1"
"github.com/oam-dev/kubevela/apis/types"
"github.com/oam-dev/kubevela/pkg/controller/common/rollout"
controller "github.com/oam-dev/kubevela/pkg/controller/core.oam.dev"
"github.com/oam-dev/kubevela/pkg/oam"
@@ -45,6 +45,8 @@ import (
)
const (
errUpdateAppRollout = "failed to update the app rollout"
appRolloutFinalizer = "finalizers.approllout.oam.dev"
reconcileTimeOut = 60 * time.Second
@@ -65,7 +67,6 @@ type Reconciler struct {
// Reconcile is the main logic of appRollout controller
func (r *Reconciler) Reconcile(req ctrl.Request) (res reconcile.Result, retErr error) {
var appRollout v1beta1.AppRollout
ctx, cancel := context.WithTimeout(context.TODO(), reconcileTimeOut)
defer cancel()
@@ -92,21 +93,25 @@ func (r *Reconciler) Reconcile(req ctrl.Request) (res reconcile.Result, retErr e
return ctrl.Result{}, client.IgnoreNotFound(err)
}
klog.InfoS("Start to reconcile ", "appRollout", klog.KObj(&appRollout))
r.handleFinalizer(&appRollout)
if len(appRollout.Status.RollingState) == 0 {
appRollout.Status.ResetStatus()
}
targetAppRevisionName := appRollout.Spec.TargetAppRevisionName
sourceAppRevisionName := appRollout.Spec.SourceAppRevisionName
// handle rollout completed
if appRollout.Status.RollingState == v1alpha1.RolloutSucceedState ||
appRollout.Status.RollingState == v1alpha1.RolloutFailedState {
if appRollout.Status.LastUpgradedTargetAppRevision == targetAppRevisionName &&
appRollout.Status.LastSourceAppRevision == sourceAppRevisionName {
klog.InfoS("rollout completed, no need to reconcile", "source", sourceAppRevisionName,
"target", targetAppRevisionName)
return ctrl.Result{}, nil
}
// handle app Finalizer
doneReconcile, res, retErr := r.handleFinalizer(ctx, &appRollout)
if doneReconcile {
return res, retErr
}
// handle rollout target/source change
// no need to proceed if rollout is already in a terminal state and there is no source/target change
doneReconcile = r.handleRollingTerminated(appRollout, targetAppRevisionName, sourceAppRevisionName)
if doneReconcile {
return reconcile.Result{}, nil
}
// handle rollout target/source change,
if appRollout.Status.LastUpgradedTargetAppRevision != "" &&
appRollout.Status.LastUpgradedTargetAppRevision != targetAppRevisionName ||
(appRollout.Status.LastSourceAppRevision != "" && appRollout.Status.LastSourceAppRevision != sourceAppRevisionName) {
@@ -123,11 +128,17 @@ func (r *Reconciler) Reconcile(req ctrl.Request) (res reconcile.Result, retErr e
// continue to handle the previous resources until we are okay to move forward
targetAppRevisionName = appRollout.Status.LastUpgradedTargetAppRevision
sourceAppRevisionName = appRollout.Status.LastSourceAppRevision
} else {
// so that we don't think we are modified again
appRollout.Status.LastUpgradedTargetAppRevision = targetAppRevisionName
appRollout.Status.LastSourceAppRevision = sourceAppRevisionName
}
appRollout.Status.StateTransition(v1alpha1.RollingModifiedEvent)
}
// Get the source application
//TODO: handle deleting/abandoning state differently
// Get the source application first
var sourceApRev *oamv1alpha2.ApplicationRevision
var sourceApp *oamv1alpha2.ApplicationContext
var err error
@@ -135,7 +146,8 @@ func (r *Reconciler) Reconcile(req ctrl.Request) (res reconcile.Result, retErr e
klog.Info("source app fields not filled, this is a scale operation")
sourceApp = nil
} else {
sourceApRev, sourceApp, err = r.getSourceAppContexts(ctx, sourceAppRevisionName)
sourceApRev, sourceApp, err = r.getSourceAppContexts(ctx,
appRollout.Spec.ComponentList, appRollout.Status.RollingState, sourceAppRevisionName)
if err != nil {
return ctrl.Result{}, err
}
@@ -144,21 +156,24 @@ func (r *Reconciler) Reconcile(req ctrl.Request) (res reconcile.Result, retErr e
klog.Info("source app revision is not ready for rolling yet", "application revision", sourceAppRevisionName)
r.record.Event(&appRollout, event.Normal("Rollout Paused",
"source app revision is not ready for rolling yet", "application revision", sourceApp.GetName()))
return ctrl.Result{RequeueAfter: 3 * time.Second}, nil
return ctrl.Result{RequeueAfter: 3 * time.Second}, r.updateStatus(ctx, &appRollout)
}
}
// Get the target application revision
targetAppRev, targetApp, err := r.getTargetApps(ctx, targetAppRevisionName)
// Get the target application revision after the source app is templated
targetAppRev, targetApp, err := r.getTargetApps(ctx, appRollout.Spec.ComponentList,
appRollout.Status.RollingState, targetAppRevisionName)
if err != nil {
return ctrl.Result{}, err
}
// this ensures that we handle the target app init only once
appRollout.Status.StateTransition(v1alpha1.AppLocatedEvent)
// check if the app is templated
if targetApp.Status.RollingStatus != types.RollingTemplated {
r.record.Event(&appRollout, event.Normal("Rollout Paused",
"target app revision is not ready for rolling yet", "application revision", targetApp.GetName()))
return ctrl.Result{RequeueAfter: 3 * time.Second}, nil
return ctrl.Result{RequeueAfter: 3 * time.Second}, r.updateStatus(ctx, &appRollout)
}
// we get the real workloads from the spec of the revisions
@@ -167,13 +182,12 @@ func (r *Reconciler) Reconcile(req ctrl.Request) (res reconcile.Result, retErr e
klog.ErrorS(err, "cannot fetch the workloads to upgrade", "target application",
klog.KRef(req.Namespace, targetAppRevisionName), "source application", klog.KRef(req.Namespace, sourceAppRevisionName),
"commonComponent", appRollout.Spec.ComponentList)
return ctrl.Result{RequeueAfter: 5 * time.Second}, client.IgnoreNotFound(err)
return ctrl.Result{RequeueAfter: 5 * time.Second}, r.updateStatus(ctx, &appRollout)
}
klog.InfoS("get the target workload we need to work on", "targetWorkload", klog.KObj(targetWorkload))
if sourceWorkload != nil {
klog.InfoS("get the source workload we need to work on", "sourceWorkload", klog.KObj(sourceWorkload))
}
// reconcile the rollout part of the spec given the target and source workload
rolloutPlanController := rollout.NewRolloutPlanController(r, &appRollout, r.record,
&appRollout.Spec.RolloutPlan, &appRollout.Status.RolloutStatus, targetWorkload, sourceWorkload)
@@ -186,16 +200,66 @@ func (r *Reconciler) Reconcile(req ctrl.Request) (res reconcile.Result, retErr e
appRollout.Status.LastSourceAppRevision = appRollout.Spec.SourceAppRevisionName
}
if rolloutStatus.RollingState == v1alpha1.RolloutSucceedState {
klog.InfoS("rollout succeeded, record the source and target app revision", "source", sourceAppRevisionName,
"target", targetAppRevisionName)
if err = r.finalizeRollingSucceeded(ctx, sourceApp, targetApp); err != nil {
return ctrl.Result{}, err
}
klog.InfoS("rollout succeeded, record the source and target app revision", "source", sourceAppRevisionName,
"target", targetAppRevisionName)
} else if rolloutStatus.RollingState == v1alpha1.RolloutFailedState {
klog.InfoS("rollout failed, record the source and target app revision", "source", sourceAppRevisionName,
"target", targetAppRevisionName, "revert on deletion", appRollout.Spec.RevertOnDelete)
}
// update the appRollout status
return result, r.updateStatus(ctx, &appRollout)
}
// handle adding and handle finalizer logic, it turns if we should continue to reconcile
func (r *Reconciler) handleFinalizer(ctx context.Context, appRollout *v1beta1.AppRollout) (bool, reconcile.Result, error) {
if appRollout.DeletionTimestamp.IsZero() {
if !meta.FinalizerExists(&appRollout.ObjectMeta, appRolloutFinalizer) {
meta.AddFinalizer(&appRollout.ObjectMeta, appRolloutFinalizer)
klog.InfoS("Register new app rollout finalizers", "rollout", appRollout.Name,
"finalizers", appRollout.ObjectMeta.Finalizers)
return true, reconcile.Result{}, errors.Wrap(r.Update(ctx, appRollout), errUpdateAppRollout)
}
} else if meta.FinalizerExists(&appRollout.ObjectMeta, appRolloutFinalizer) {
if appRollout.Status.RollingState == v1alpha1.RolloutSucceedState {
klog.InfoS("Safe to delete the succeeded rollout", "rollout", appRollout.Name)
meta.RemoveFinalizer(&appRollout.ObjectMeta, appRolloutFinalizer)
return true, reconcile.Result{}, errors.Wrap(r.Update(ctx, appRollout), errUpdateAppRollout)
}
if appRollout.Status.RollingState == v1alpha1.RolloutFailedState {
klog.InfoS("delete the rollout in deleted state", "rollout", appRollout.Name)
if appRollout.Spec.RevertOnDelete {
klog.InfoS("need to revert the failed rollout", "rollout", appRollout.Name)
}
meta.RemoveFinalizer(&appRollout.ObjectMeta, appRolloutFinalizer)
return true, reconcile.Result{}, errors.Wrap(r.Update(ctx, appRollout), errUpdateAppRollout)
}
// still need to finalize
klog.Info("perform clean up", "app rollout", appRollout.Name)
r.record.Event(appRollout, event.Normal("Rollout ", "rollout target deleted, release the resources"))
appRollout.Status.StateTransition(v1alpha1.RollingDeletedEvent)
}
return false, reconcile.Result{}, nil
}
func (r *Reconciler) handleRollingTerminated(appRollout v1beta1.AppRollout, targetAppRevisionName string,
sourceAppRevisionName string) bool {
// handle rollout completed
if appRollout.Status.RollingState == v1alpha1.RolloutSucceedState ||
appRollout.Status.RollingState == v1alpha1.RolloutFailedState {
if appRollout.Status.LastUpgradedTargetAppRevision == targetAppRevisionName &&
appRollout.Status.LastSourceAppRevision == sourceAppRevisionName {
klog.InfoS("rollout completed, no need to reconcile", "source", sourceAppRevisionName,
"target", targetAppRevisionName)
return true
}
}
return false
}
func (r *Reconciler) finalizeRollingSucceeded(ctx context.Context, sourceApp *oamv1alpha2.ApplicationContext,
targetApp *oamv1alpha2.ApplicationContext) error {
if sourceApp != nil {
@@ -209,7 +273,7 @@ func (r *Reconciler) finalizeRollingSucceeded(ctx context.Context, sourceApp *oa
}
}
// remove the rollout annotation so that the target appConfig controller can take over the rest of the work
oamutil.RemoveAnnotations(targetApp, []string{oam.AnnotationAppRollout})
oamutil.RemoveAnnotations(targetApp, []string{oam.AnnotationAppRollout, oam.AnnotationRollingComponent})
if err := r.Update(ctx, targetApp); err != nil {
klog.ErrorS(err, "cannot remove the rollout annotation", "target application",
klog.KRef(targetApp.Namespace, targetApp.GetName()))
@@ -219,29 +283,17 @@ func (r *Reconciler) finalizeRollingSucceeded(ctx context.Context, sourceApp *oa
}
// UpdateStatus updates v1alpha2.AppRollout's Status with retry.RetryOnConflict
func (r *Reconciler) updateStatus(ctx context.Context, appRollout *v1beta1.AppRollout, opts ...client.UpdateOption) error {
func (r *Reconciler) updateStatus(ctx context.Context, appRollout *v1beta1.AppRollout) error {
status := appRollout.DeepCopy().Status
return retry.RetryOnConflict(retry.DefaultBackoff, func() (err error) {
if err = r.Get(ctx, client.ObjectKey{Namespace: appRollout.Namespace, Name: appRollout.Name}, appRollout); err != nil {
return
}
appRollout.Status = status
return r.Status().Update(ctx, appRollout, opts...)
return r.Status().Update(ctx, appRollout)
})
}
func (r *Reconciler) handleFinalizer(appRollout *v1beta1.AppRollout) {
if appRollout.DeletionTimestamp.IsZero() {
if !slice.ContainsString(appRollout.Finalizers, appRolloutFinalizer, nil) {
// TODO: add finalizer
klog.Info("add finalizer")
}
} else if slice.ContainsString(appRollout.Finalizers, appRolloutFinalizer, nil) {
// TODO: perform finalize
klog.Info("perform clean up")
}
}
// SetupWithManager setup the controller with manager
func (r *Reconciler) SetupWithManager(mgr ctrl.Manager) error {
r.record = event.NewAPIRecorder(mgr.GetEventRecorderFor("AppRollout")).

View File

@@ -20,6 +20,7 @@ import (
"context"
"fmt"
"strconv"
"strings"
"github.com/pkg/errors"
apierrors "k8s.io/apimachinery/pkg/api/errors"
@@ -30,6 +31,9 @@ import (
"k8s.io/utils/pointer"
"github.com/oam-dev/kubevela/apis/core.oam.dev/v1alpha2"
"github.com/oam-dev/kubevela/apis/standard.oam.dev/v1alpha1"
"github.com/oam-dev/kubevela/apis/types"
"github.com/oam-dev/kubevela/pkg/controller/common"
"github.com/oam-dev/kubevela/pkg/controller/core.oam.dev/v1alpha2/applicationconfiguration"
"github.com/oam-dev/kubevela/pkg/controller/utils"
"github.com/oam-dev/kubevela/pkg/oam"
@@ -39,8 +43,8 @@ import (
// getTargetApps try to locate the target appRevision and appContext that is responsible for the target
// we will create a new appContext when it's not found
func (r *Reconciler) getTargetApps(ctx context.Context, targetAppRevisionName string) (*v1alpha2.ApplicationRevision,
*v1alpha2.ApplicationContext, error) {
func (r *Reconciler) getTargetApps(ctx context.Context, componentList []string, rollingState v1alpha1.RollingState,
targetAppRevisionName string) (*v1alpha2.ApplicationRevision, *v1alpha2.ApplicationContext, error) {
var appRevision v1alpha2.ApplicationRevision
var appContext v1alpha2.ApplicationContext
namespaceName := oamutil.GetDefinitionNamespaceWithCtx(ctx)
@@ -52,30 +56,44 @@ func (r *Reconciler) getTargetApps(ctx context.Context, targetAppRevisionName st
}
if err := r.Get(ctx, ktypes.NamespacedName{Namespace: namespaceName, Name: targetAppRevisionName},
&appContext); err != nil {
if apierrors.IsNotFound(err) {
klog.InfoS("target application context does not exist, create one", "target application revision",
if apierrors.IsNotFound(err) && rollingState == v1alpha1.LocatingTargetAppState {
klog.InfoS("target application context does not exist yet, create one", "target application revision",
klog.KRef(namespaceName, targetAppRevisionName))
appContext, err = r.createAppContext(ctx, &appRevision)
appContext, err = r.createAppContext(ctx, componentList, &appRevision)
if err != nil {
return nil, nil, err
}
} else {
klog.ErrorS(err, "cannot locate target application context", "target application revision",
klog.KRef(namespaceName, targetAppRevisionName))
return &appRevision, &appContext, nil
}
// the appContext has to exist by now
klog.ErrorS(err, "cannot locate target application context", "target application name",
klog.KRef(namespaceName, targetAppRevisionName), "rollingState", rollingState)
return nil, nil, err
}
// special handle the first time we locate the appContext
if rollingState == v1alpha1.LocatingTargetAppState {
if appContext.Status.RollingStatus == types.RollingTemplated {
// force template the target app
klog.InfoS("force templating an already templated target application",
"target application revision", klog.KRef(namespaceName, targetAppRevisionName))
appContext.Status.RollingStatus = types.RollingTemplating
if err := r.Status().Update(ctx, &appContext); err != nil {
klog.ErrorS(err, "failed to force update target application context to be templating",
"target application name", klog.KRef(namespaceName, targetAppRevisionName))
return nil, nil, err
}
}
err := r.prepareAppContext(ctx, componentList, &appContext)
if err != nil {
return nil, nil, err
}
}
// set the AC as rolling
err := r.prepareAppContextForRollout(ctx, &appContext)
if err != nil {
return nil, nil, err
}
return &appRevision, &appContext, nil
}
// getTargetApps try to locate the source appRevision and appContext that is responsible for the source
func (r *Reconciler) getSourceAppContexts(ctx context.Context, sourceAppRevisionName string) (*v1alpha2.
ApplicationRevision, *v1alpha2.ApplicationContext, error) {
func (r *Reconciler) getSourceAppContexts(ctx context.Context, componentList []string, rollingState v1alpha1.RollingState,
sourceAppRevisionName string) (*v1alpha2.ApplicationRevision, *v1alpha2.ApplicationContext, error) {
var appRevision v1alpha2.ApplicationRevision
var appContext v1alpha2.ApplicationContext
namespaceName := oamutil.GetDefinitionNamespaceWithCtx(ctx)
@@ -89,26 +107,34 @@ func (r *Reconciler) getSourceAppContexts(ctx context.Context, sourceAppRevision
if err := r.Get(ctx, ktypes.NamespacedName{Namespace: namespaceName, Name: sourceAppRevisionName},
&appContext); err != nil {
// TODO: use the app name as the source Context to upgrade from none-rolling application to rolling
klog.ErrorS(err, "cannot locate source application revision", "source application revision",
klog.ErrorS(err, "cannot locate source application revision", "source application name",
klog.KRef(namespaceName, sourceAppRevisionName))
return nil, nil, err
}
// set the AC as rolling
err := r.prepareAppContextForRollout(ctx, &appContext)
if err != nil {
return nil, nil, err
// set the AC as rolling if we are still at locating state
if rollingState == v1alpha1.LocatingTargetAppState {
err := r.prepareAppContext(ctx, componentList, &appContext)
if err != nil {
return nil, nil, err
}
}
return &appRevision, &appContext, nil
}
func (r *Reconciler) prepareAppContextForRollout(ctx context.Context, appContext *v1alpha2.ApplicationContext) error {
oamutil.AddAnnotations(appContext, map[string]string{oam.AnnotationAppRollout: strconv.FormatBool(true)})
func (r *Reconciler) prepareAppContext(ctx context.Context, componentList []string,
appContext *v1alpha2.ApplicationContext) error {
oamutil.RemoveAnnotations(appContext, []string{oam.AnnotationAppRevision})
// pass the rolling component to the app
oamutil.AddAnnotations(appContext, map[string]string{oam.AnnotationAppRollout: strconv.FormatBool(true)})
if len(componentList) != 0 {
oamutil.AddAnnotations(appContext, map[string]string{
oam.AnnotationRollingComponent: strings.Join(componentList, common.RollingComponentsSep)})
}
return r.Update(ctx, appContext)
}
func (r *Reconciler) createAppContext(ctx context.Context, appRevision *v1alpha2.ApplicationRevision) (v1alpha2.
ApplicationContext, error) {
func (r *Reconciler) createAppContext(ctx context.Context, componentList []string,
appRevision *v1alpha2.ApplicationRevision) (v1alpha2.ApplicationContext, error) {
namespaceName := oamutil.GetDefinitionNamespaceWithCtx(ctx)
appContext := v1alpha2.ApplicationContext{
ObjectMeta: metav1.ObjectMeta{
@@ -131,6 +157,11 @@ func (r *Reconciler) createAppContext(ctx context.Context, appRevision *v1alpha2
}
// set the AC as rolling
oamutil.AddAnnotations(&appContext, map[string]string{oam.AnnotationAppRollout: strconv.FormatBool(true)})
// pass the rolling component to the app
if len(componentList) != 0 {
oamutil.AddAnnotations(&appContext, map[string]string{
oam.AnnotationRollingComponent: strings.Join(componentList, common.RollingComponentsSep)})
}
err := r.Create(ctx, &appContext)
return appContext, err
}

View File

@@ -146,7 +146,7 @@ func (ctx *templateContext) BaseContextFile() string {
if len(ctx.configs) > 0 {
bt, _ := json.Marshal(ctx.configs)
buff += ConfigFieldName + ": " + string(bt)
buff += ConfigFieldName + ": " + string(bt) + "\n"
}
if len(ctx.requiredSecrets) > 0 {

View File

@@ -19,6 +19,9 @@ package apply
import (
"context"
"github.com/oam-dev/kubevela/apis/core.oam.dev/v1beta1"
"github.com/oam-dev/kubevela/pkg/oam"
"github.com/pkg/errors"
kerrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
@@ -27,8 +30,6 @@ import (
"k8s.io/apimachinery/pkg/types"
"k8s.io/klog/v2"
"sigs.k8s.io/controller-runtime/pkg/client"
"github.com/oam-dev/kubevela/pkg/oam"
)
// Applicator applies new state to an object or create it if not exist.
@@ -175,7 +176,10 @@ func MustBeControllableBy(u types.UID) ApplyOption {
if c == nil {
return nil
}
// if workload is a cross namespace resource, skip check UID
if c.Kind == v1beta1.ResourceTrackerKind {
return nil
}
if c.UID != u {
return errors.Errorf("existing object is not controlled by UID %q", u)
}

View File

@@ -31,6 +31,8 @@ import (
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/apimachinery/pkg/types"
"sigs.k8s.io/controller-runtime/pkg/client"
"github.com/oam-dev/kubevela/apis/core.oam.dev/v1beta1"
)
var ctx = context.Background()
@@ -348,6 +350,15 @@ func TestMustBeControllableBy(t *testing.T) {
}}}},
want: errors.Errorf("existing object is not controlled by UID %q", uid),
},
"cross namespace resource": {
reason: "A cross namespace resource have a resourceTracker owner, skip check UID",
u: uid,
current: &testObject{ObjectMeta: metav1.ObjectMeta{OwnerReferences: []metav1.OwnerReference{{
UID: uid,
Controller: &controller,
Kind: v1beta1.ResourceTrackerKind,
}}}},
},
}
for name, tc := range cases {

37
pkg/utils/env/env.go vendored
View File

@@ -25,23 +25,16 @@ import (
"os"
"path/filepath"
acmev1 "github.com/wonderflow/cert-manager-api/pkg/apis/acme/v1"
certmanager "github.com/wonderflow/cert-manager-api/pkg/apis/certmanager/v1"
v1 "github.com/wonderflow/cert-manager-api/pkg/apis/meta/v1"
corev1 "k8s.io/api/core/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
k8stypes "k8s.io/apimachinery/pkg/types"
"k8s.io/utils/pointer"
"sigs.k8s.io/controller-runtime/pkg/client"
"github.com/oam-dev/kubevela/apis/types"
"github.com/oam-dev/kubevela/pkg/utils/system"
)
// ProductionACMEServer is the production ACME Server from let's encrypt
const ProductionACMEServer = "https://acme-v02.api.letsencrypt.org/directory"
// GetEnvDirByName will get env dir from name
func GetEnvDirByName(name string) string {
envdir, _ := system.GetEnvDir()
@@ -79,9 +72,6 @@ func CreateOrUpdateEnv(ctx context.Context, c client.Client, envName string, env
if envArgs.Email == "" {
envArgs.Email = old.Email
}
if envArgs.Issuer == "" {
envArgs.Issuer = old.Issuer
}
if envArgs.Namespace == "" {
envArgs.Namespace = old.Namespace
}
@@ -103,33 +93,6 @@ func CreateOrUpdateEnv(ctx context.Context, c client.Client, envName string, env
}
}
// Create Issuer For SSL if both email and domain are all set.
if envArgs.Email != "" && envArgs.Domain != "" {
issuerName := "oam-env-" + envArgs.Name
if err := c.Create(ctx, &certmanager.Issuer{
ObjectMeta: metav1.ObjectMeta{Name: issuerName, Namespace: envArgs.Namespace},
Spec: certmanager.IssuerSpec{
IssuerConfig: certmanager.IssuerConfig{
ACME: &acmev1.ACMEIssuer{
Email: envArgs.Email,
Server: ProductionACMEServer,
PrivateKey: v1.SecretKeySelector{
LocalObjectReference: v1.LocalObjectReference{Name: "oam-env-" + envArgs.Name + ".key"},
},
Solvers: []acmev1.ACMEChallengeSolver{{
HTTP01: &acmev1.ACMEChallengeSolverHTTP01{
Ingress: &acmev1.ACMEChallengeSolverHTTP01Ingress{Class: pointer.StringPtr("nginx")},
},
}},
},
},
},
}); err != nil && !apierrors.IsAlreadyExists(err) {
return message, err
}
envArgs.Issuer = issuerName
}
data, err := json.Marshal(envArgs)
if err != nil {
return message, err

View File

@@ -22,7 +22,6 @@ import (
"net/http"
"k8s.io/klog/v2"
"k8s.io/utils/pointer"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/manager"
"sigs.k8s.io/controller-runtime/pkg/runtime/inject"
@@ -70,10 +69,6 @@ func (h *MutatingHandler) Handle(ctx context.Context, req admission.Request) adm
// DefaultAppRollout will set the default value for the AppRollout®
func DefaultAppRollout(obj *v1beta1.AppRollout) {
klog.InfoS("create default for approllout", "name", obj.Name)
if obj.Spec.RevertOnDelete == nil {
klog.V(common.LogDebug).Info("default RevertOnDelete as false")
obj.Spec.RevertOnDelete = pointer.BoolPtr(false)
}
// default rollout plan
rollout.DefaultRolloutPlan(&obj.Spec.RolloutPlan)

View File

@@ -297,7 +297,7 @@ func GetOAMReleaseVersion(ns string) (string, error) {
return result.Chart.AppVersion(), nil
}
}
return "", errors.New("kubevela chart not found in your kubernetes cluster, refer to 'https://kubevela.io/#/en/install' for installation")
return "", errors.New("kubevela chart not found in your kubernetes cluster, refer to 'https://kubevela.io/docs/install' for installation")
}
// PrintTrackVelaRuntimeStatus prints status of installing vela-core runtime

View File

@@ -36,7 +36,6 @@ func TestUp(t *testing.T) {
env := types.EnvMeta{
Name: "up",
Namespace: "env-up",
Issuer: "up",
}
o := common.AppfileOptions{
IO: ioStream,

View File

@@ -261,8 +261,11 @@ func SyncDefinitionToLocal(ctx context.Context, c common.Args, localDefinitionDi
foundCapability = true
}
if foundCapability {
mapper := &discoverymapper.DefaultDiscoveryMapper{}
ref, err := util.ConvertWorkloadGVK2Definition(mapper, componentDef.Spec.Workload.Definition)
dm, err := c.GetDiscoveryMapper()
if err != nil {
return nil, err
}
ref, err := util.ConvertWorkloadGVK2Definition(dm, componentDef.Spec.Workload.Definition)
if err != nil {
return nil, err
}

View File

@@ -552,6 +552,158 @@ var _ = Describe("Test application cross namespace resource", func() {
return nil
}, time.Second*60, time.Microsecond*300).Should(BeNil())
})
It("Update a cross namespace workload of application", func() {
// install component definition
crossCdJson, _ := yaml.YAMLToJSON([]byte(crossCompDefYaml))
ccd := new(v1beta1.ComponentDefinition)
Expect(json.Unmarshal(crossCdJson, ccd)).Should(BeNil())
Expect(k8sClient.Create(ctx, ccd)).Should(SatisfyAny(BeNil(), &util.AlreadyExistMatcher{}))
var (
appName = "test-app-5"
app = new(v1beta1.Application)
componentName = "test-app-5-comp"
)
app = &v1beta1.Application{
ObjectMeta: metav1.ObjectMeta{
Name: appName,
Namespace: namespace,
},
Spec: v1beta1.ApplicationSpec{
Components: []v1beta1.ApplicationComponent{
v1beta1.ApplicationComponent{
Name: componentName,
Type: "cross-worker",
Properties: runtime.RawExtension{Raw: []byte(`{"cmd":["sleep","1000"],"image":"busybox"}`)},
},
},
},
}
Expect(k8sClient.Create(ctx, app)).Should(BeNil())
By("check resource tracker has been created and app status ")
resourceTracker := new(v1beta1.ResourceTracker)
Eventually(func() error {
app := new(v1beta1.Application)
if err := k8sClient.Get(ctx, types.NamespacedName{Namespace: namespace, Name: appName}, app); err != nil {
return fmt.Errorf("app not found %v", err)
}
if err := k8sClient.Get(ctx, generateResourceTrackerKey(app.Namespace, app.Name), resourceTracker); err != nil {
return err
}
if app.Status.Phase != common.ApplicationRunning {
return fmt.Errorf("application status is not running")
}
if app.Status.ResourceTracker == nil || app.Status.ResourceTracker.UID != resourceTracker.UID {
return fmt.Errorf("appication status error ")
}
return nil
}, time.Second*600, time.Microsecond*300).Should(BeNil())
By("check resource is generated correctly")
Expect(k8sClient.Get(ctx, types.NamespacedName{Namespace: namespace, Name: appName}, app)).Should(BeNil())
var workload appsv1.Deployment
Eventually(func() error {
appContext := &v1alpha2.ApplicationContext{}
if err := k8sClient.Get(ctx, types.NamespacedName{Namespace: namespace, Name: appName}, appContext); err != nil {
return fmt.Errorf("cannot generate AppContext %v", err)
}
component := &v1alpha2.Component{}
if err := k8sClient.Get(ctx, types.NamespacedName{Namespace: namespace, Name: componentName}, component); err != nil {
return fmt.Errorf("cannot generate component %v", err)
}
if component.ObjectMeta.Labels[oam.LabelAppName] != appName {
return fmt.Errorf("component error label ")
}
depolys := new(appsv1.DeploymentList)
opts := []client.ListOption{
client.InNamespace(crossNamespace),
client.MatchingLabels{
oam.LabelAppName: appName,
},
}
err := k8sClient.List(ctx, depolys, opts...)
if err != nil || len(depolys.Items) != 1 {
return fmt.Errorf("error workload number %v", err)
}
workload = depolys.Items[0]
if len(workload.OwnerReferences) != 1 || workload.OwnerReferences[0].UID != resourceTracker.UID {
return fmt.Errorf("wrokload ownerreference error")
}
if workload.Spec.Template.Spec.Containers[0].Image != "busybox" {
return fmt.Errorf("container image not match")
}
return nil
}, time.Second*50, time.Microsecond*300).Should(BeNil())
By("update application and check resource status")
Eventually(func() error {
checkApp := new(v1beta1.Application)
err := k8sClient.Get(ctx, types.NamespacedName{Namespace: namespace, Name: appName}, checkApp)
if err != nil {
return err
}
checkApp.Spec.Components[0].Properties = runtime.RawExtension{Raw: []byte(`{"cmd":["sleep","1000"],"image":"nginx"}`)}
err = k8sClient.Update(ctx, checkApp)
if err != nil {
return err
}
return nil
}, time.Second*60, time.Microsecond*300).Should(BeNil())
Eventually(func() error {
appContext := &v1alpha2.ApplicationContext{}
if err := k8sClient.Get(ctx, types.NamespacedName{Namespace: namespace, Name: appName}, appContext); err != nil {
return fmt.Errorf("cannot generate AppContext %v", err)
}
component := &v1alpha2.Component{}
if err := k8sClient.Get(ctx, types.NamespacedName{Namespace: namespace, Name: componentName}, component); err != nil {
return fmt.Errorf("cannot generate component %v", err)
}
if component.ObjectMeta.Labels[oam.LabelAppName] != appName {
return fmt.Errorf("component error label ")
}
depolys := new(appsv1.DeploymentList)
opts := []client.ListOption{
client.InNamespace(crossNamespace),
client.MatchingLabels{
oam.LabelAppName: appName,
},
}
err := k8sClient.List(ctx, depolys, opts...)
if err != nil || len(depolys.Items) != 1 {
return fmt.Errorf("error workload number %v", err)
}
workload = depolys.Items[0]
if len(workload.OwnerReferences) != 1 || workload.OwnerReferences[0].UID != resourceTracker.UID {
return fmt.Errorf("wrokload ownerreference error")
}
if workload.Spec.Template.Spec.Containers[0].Image != "nginx" {
return fmt.Errorf("container image not match")
}
return nil
}, time.Second*60, time.Microsecond*1000).Should(BeNil())
By("deleting application will remove resourceTracker and related workload will be removed")
time.Sleep(3 * time.Second) // wait informer cache to be synced
Expect(k8sClient.Get(ctx, types.NamespacedName{Namespace: namespace, Name: appName}, app)).Should(BeNil())
Expect(k8sClient.Delete(ctx, app)).Should(BeNil())
Eventually(func() error {
err := k8sClient.Get(ctx, generateResourceTrackerKey(app.Namespace, app.Name), resourceTracker)
if err == nil {
return fmt.Errorf("resourceTracker still exist")
}
if !apierrors.IsNotFound(err) {
return err
}
err = k8sClient.Get(ctx, types.NamespacedName{Namespace: crossNamespace, Name: workload.GetName()}, &workload)
if err == nil {
return fmt.Errorf("wrokload still exist")
}
if !apierrors.IsNotFound(err) {
return err
}
return nil
}, time.Second*30, time.Microsecond*300).Should(BeNil())
})
})
func generateResourceTrackerKey(namespace string, name string) types.NamespacedName {

View File

@@ -26,6 +26,7 @@ import (
kruise "github.com/openkruise/kruise-api/apps/v1alpha1"
corev1 "k8s.io/api/core/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/utils/pointer"
"sigs.k8s.io/controller-runtime/pkg/client"
@@ -126,7 +127,7 @@ var _ = Describe("Cloneset based rollout tests", func() {
Eventually(
func() error {
return k8sClient.Create(ctx, newAppRollout)
}, time.Second*5, time.Millisecond).Should(Succeed())
}, time.Second*5, time.Millisecond*100).Should(Succeed())
}
verifyRolloutOwnsCloneset := func() {
@@ -140,11 +141,21 @@ var _ = Describe("Cloneset based rollout tests", func() {
return ""
}
return clonesetOwner.Kind
}, time.Second*10, time.Second).Should(BeEquivalentTo(v1beta1.AppRolloutKind))
}, time.Second*10, time.Millisecond*100).Should(BeEquivalentTo(v1beta1.AppRolloutKind))
clonesetOwner := metav1.GetControllerOf(&kc)
Expect(clonesetOwner.APIVersion).Should(BeEquivalentTo(v1beta1.SchemeGroupVersion.String()))
}
verifyRolloutDeleted := func() {
By("Wait for the rollout delete")
Eventually(
func() bool {
err := k8sClient.Get(ctx, client.ObjectKey{Namespace: namespaceName, Name: appRolloutName}, &appRollout)
return apierrors.IsNotFound(err)
},
time.Second*30, time.Microsecond*100).Should(BeTrue())
}
verifyRolloutSucceeded := func(targetAppName string) {
By("Wait for the rollout phase change to succeed")
Eventually(
@@ -156,20 +167,19 @@ var _ = Describe("Cloneset based rollout tests", func() {
time.Second*120, time.Second).Should(Equal(oamstd.RolloutSucceedState))
Expect(appRollout.Status.UpgradedReadyReplicas).Should(BeEquivalentTo(appRollout.Status.RolloutTargetTotalSize))
Expect(appRollout.Status.UpgradedReplicas).Should(BeEquivalentTo(appRollout.Status.RolloutTargetTotalSize))
clonesetName := appRollout.Spec.ComponentList[0]
By("Verify AppContext rolling status")
var appConfig v1alpha2.ApplicationContext
var appContext v1alpha2.ApplicationContext
Eventually(
func() types.RollingStatus {
k8sClient.Get(ctx, client.ObjectKey{Namespace: namespaceName, Name: targetAppName}, &appConfig)
return appConfig.Status.RollingStatus
k8sClient.Get(ctx, client.ObjectKey{Namespace: namespaceName, Name: targetAppName}, &appContext)
return appContext.Status.RollingStatus
},
time.Second*60, time.Second).Should(BeEquivalentTo(types.RollingCompleted))
By("Wait for AppContext to resume the control of cloneset")
var clonesetOwner *metav1.OwnerReference
clonesetName := appRollout.Spec.ComponentList[0]
Eventually(
func() string {
err := k8sClient.Get(ctx, client.ObjectKey{Namespace: namespaceName, Name: clonesetName}, &kc)
@@ -188,13 +198,13 @@ var _ = Describe("Cloneset based rollout tests", func() {
Expect(kc.Status.UpdatedReadyReplicas).Should(BeEquivalentTo(*kc.Spec.Replicas))
}
verifyAppConfigInactive := func(appConfigName string) {
var appConfig v1alpha2.ApplicationContext
verifyAppConfigInactive := func(appContextName string) {
var appContext v1alpha2.ApplicationContext
By("Verify AppConfig is inactive")
Eventually(
func() types.RollingStatus {
k8sClient.Get(ctx, client.ObjectKey{Namespace: namespaceName, Name: appConfigName}, &appConfig)
return appConfig.Status.RollingStatus
k8sClient.Get(ctx, client.ObjectKey{Namespace: namespaceName, Name: appContextName}, &appContext)
return appContext.Status.RollingStatus
},
time.Second*30, time.Millisecond*500).Should(BeEquivalentTo(types.InactiveAfterRollingCompleted))
}
@@ -205,7 +215,19 @@ var _ = Describe("Cloneset based rollout tests", func() {
updateApp("app-target.yaml")
}
revertBackToSource := func() {
initialScale := func() {
By("Apply the application rollout to deploy the source")
var newAppRollout v1beta1.AppRollout
Expect(common.ReadYamlToObject("testdata/rollout/cloneset/appRollout.yaml", &newAppRollout)).Should(BeNil())
newAppRollout.Namespace = namespaceName
newAppRollout.Spec.SourceAppRevisionName = ""
newAppRollout.Spec.TargetAppRevisionName = utils.ConstructRevisionName(app.GetName(), 1)
createAppRolling(&newAppRollout)
appRolloutName = newAppRollout.Name
verifyRolloutSucceeded(newAppRollout.Spec.TargetAppRevisionName)
}
rollForwardToSource := func() {
By("Revert the application back to source")
updateApp("app-source.yaml")
@@ -263,17 +285,8 @@ var _ = Describe("Cloneset based rollout tests", func() {
It("Test cloneset rollout with a manual check", func() {
applyTwoAppVersion()
By("Apply the application rollout to deploy the source")
var newAppRollout v1beta1.AppRollout
Expect(common.ReadYamlToObject("testdata/rollout/cloneset/appRollout.yaml", &newAppRollout)).Should(BeNil())
newAppRollout.Namespace = namespaceName
newAppRollout.Spec.SourceAppRevisionName = ""
newAppRollout.Spec.TargetAppRevisionName = utils.ConstructRevisionName(app.GetName(), 1)
createAppRolling(&newAppRollout)
appRolloutName = newAppRollout.Name
verifyRolloutSucceeded(newAppRollout.Spec.TargetAppRevisionName)
// scale to v1
initialScale()
By("Apply the application rollout that stops after the first batch")
batchPartition := 0
Eventually(
@@ -288,7 +301,7 @@ var _ = Describe("Cloneset based rollout tests", func() {
By("Wait for the rollout phase change to rolling in batches")
Eventually(
func() oamstd.RollingState {
k8sClient.Get(ctx, client.ObjectKey{Namespace: namespaceName, Name: newAppRollout.Name}, &appRollout)
k8sClient.Get(ctx, client.ObjectKey{Namespace: namespaceName, Name: appRolloutName}, &appRollout)
return appRollout.Status.RollingState
},
time.Second*60, time.Millisecond*500).Should(BeEquivalentTo(oamstd.RollingInBatchesState))
@@ -404,18 +417,10 @@ var _ = Describe("Cloneset based rollout tests", func() {
Expect(appRollout.Status.GetCondition(oamstd.RolloutSucceed).LastTransitionTime).Should(BeEquivalentTo(lt))
})
It("Test rolling back after a successful rollout", func() {
It("Test rolling forward after a successful rollout", func() {
applyTwoAppVersion()
By("Apply the application rollout to deploy the source")
var newAppRollout v1beta1.AppRollout
Expect(common.ReadYamlToObject("testdata/rollout/cloneset/appRollout.yaml", &newAppRollout)).Should(BeNil())
newAppRollout.Namespace = namespaceName
newAppRollout.Spec.SourceAppRevisionName = ""
newAppRollout.Spec.TargetAppRevisionName = utils.ConstructRevisionName(app.GetName(), 1)
createAppRolling(&newAppRollout)
appRolloutName = newAppRollout.Name
verifyRolloutSucceeded(newAppRollout.Spec.TargetAppRevisionName)
// scale to v1
initialScale()
By("Finish the application rollout")
Eventually(
@@ -438,21 +443,13 @@ var _ = Describe("Cloneset based rollout tests", func() {
verifyRolloutSucceeded(appRollout.Spec.TargetAppRevisionName)
verifyAppConfigInactive(appRollout.Spec.SourceAppRevisionName)
revertBackToSource()
rollForwardToSource()
})
It("Test rolling back in the middle of rollout", func() {
It("Test rolling forward in the middle of rollout", func() {
applyTwoAppVersion()
By("Apply the application rollout to deploy the source")
var newAppRollout v1beta1.AppRollout
Expect(common.ReadYamlToObject("testdata/rollout/cloneset/appRollout.yaml", &newAppRollout)).Should(BeNil())
newAppRollout.Namespace = namespaceName
newAppRollout.Spec.SourceAppRevisionName = ""
newAppRollout.Spec.TargetAppRevisionName = utils.ConstructRevisionName(app.GetName(), 1)
createAppRolling(&newAppRollout)
appRolloutName = newAppRollout.Name
verifyRolloutSucceeded(newAppRollout.Spec.TargetAppRevisionName)
// scale to v1
initialScale()
By("Finish the application rollout")
Eventually(
@@ -471,8 +468,104 @@ var _ = Describe("Cloneset based rollout tests", func() {
return appRollout.Status.RollingState
},
time.Second*10, time.Millisecond*10).Should(BeEquivalentTo(oamstd.RollingInBatchesState))
// revert to source by rolling forward
rollForwardToSource()
})
revertBackToSource()
It("Test delete rollout plan should not remove workload", func() {
CreateClonesetDef()
applyTwoAppVersion()
// scale to v1
initialScale()
By("Finish the application rollout")
Eventually(
func() error {
k8sClient.Get(ctx, client.ObjectKey{Namespace: namespaceName, Name: appRollout.Name}, &appRollout)
appRollout.Spec.SourceAppRevisionName = utils.ConstructRevisionName(app.GetName(), 1)
appRollout.Spec.TargetAppRevisionName = utils.ConstructRevisionName(app.GetName(), 2)
appRollout.Spec.RolloutPlan.BatchPartition = nil
return k8sClient.Update(ctx, &appRollout)
}, time.Second*10, time.Millisecond*500).Should(Succeed())
By("Wait for the rollout phase change to rolling in batches")
Eventually(
func() oamstd.RollingState {
k8sClient.Get(ctx, client.ObjectKey{Namespace: namespaceName, Name: appRollout.Name}, &appRollout)
return appRollout.Status.RollingState
},
time.Second*10, time.Millisecond*500).Should(BeEquivalentTo(oamstd.RollingInBatchesState))
verifyRolloutOwnsCloneset()
By("Remove the application rollout")
// remove the rollout
Expect(k8sClient.Delete(ctx, &appRollout)).Should(Succeed())
verifyRolloutDeleted()
// wait for a bit until the application takes back control
By("Verify that application does not control the cloneset")
clonesetName := appRollout.Spec.ComponentList[0]
Eventually(
func() bool {
k8sClient.Get(ctx, client.ObjectKey{Namespace: namespaceName, Name: clonesetName}, &kc)
if metav1.GetControllerOf(&kc) != nil {
return false
}
return kc.Spec.UpdateStrategy.Paused
}, time.Second*30, time.Second).Should(BeTrue())
})
It("Test revert the rollout plan in the middle of rollout", func() {
CreateClonesetDef()
applyTwoAppVersion()
// scale to v1
initialScale()
By("Finish the application rollout")
Eventually(
func() error {
k8sClient.Get(ctx, client.ObjectKey{Namespace: namespaceName, Name: appRollout.Name}, &appRollout)
appRollout.Spec.SourceAppRevisionName = utils.ConstructRevisionName(app.GetName(), 1)
appRollout.Spec.TargetAppRevisionName = utils.ConstructRevisionName(app.GetName(), 2)
appRollout.Spec.RolloutPlan.BatchPartition = nil
return k8sClient.Update(ctx, &appRollout)
}, time.Second*5, time.Millisecond*500).Should(Succeed())
By("Wait for the rollout phase change to rolling in batches")
Eventually(
func() oamstd.RollingState {
k8sClient.Get(ctx, client.ObjectKey{Namespace: namespaceName, Name: appRollout.Name}, &appRollout)
return appRollout.Status.RollingState
},
time.Second*10, time.Millisecond*500).Should(BeEquivalentTo(oamstd.RollingInBatchesState))
verifyRolloutOwnsCloneset()
By("Revert the application rollout")
Eventually(
func() error {
k8sClient.Get(ctx, client.ObjectKey{Namespace: namespaceName, Name: appRollout.Name}, &appRollout)
appRollout.Spec.SourceAppRevisionName = utils.ConstructRevisionName(app.GetName(), 2)
appRollout.Spec.TargetAppRevisionName = utils.ConstructRevisionName(app.GetName(), 1)
appRollout.Spec.RolloutPlan.BatchPartition = nil
return k8sClient.Update(ctx, &appRollout)
}, time.Second*5, time.Millisecond*500).Should(Succeed())
verifyRolloutSucceeded(appRollout.Spec.TargetAppRevisionName)
verifyAppConfigInactive(appRollout.Spec.SourceAppRevisionName)
// wait for a bit until the application takes back control
By("Verify that application does not control the cloneset")
clonesetName := appRollout.Spec.ComponentList[0]
Eventually(
func() string {
k8sClient.Get(ctx, client.ObjectKey{Namespace: namespaceName, Name: clonesetName}, &kc)
clonesetOwner := metav1.GetControllerOf(&kc)
if clonesetOwner == nil {
return ""
}
return clonesetOwner.Kind
}, time.Second*30, time.Second).Should(BeEquivalentTo(v1alpha2.ApplicationContextKind))
})
PIt("Test rolling by changing the definition", func() {

View File

@@ -3,7 +3,6 @@ kind: Application
metadata:
name: test-rolling-pause
annotations:
"app.oam.dev/rolling-components": "metrics-provider"
"app.oam.dev/rollout-template": "true"
spec:
components:

View File

@@ -3,7 +3,6 @@ kind: Application
metadata:
name: test-e2e-rolling
annotations:
"app.oam.dev/rolling-components": "metrics-provider"
"app.oam.dev/rollout-template": "true"
spec:
components:

View File

@@ -3,7 +3,6 @@ kind: Application
metadata:
name: test-e2e-rolling
annotations:
"app.oam.dev/rolling-components": "metrics-provider"
"app.oam.dev/rollout-template": "true"
spec:
components:

View File

@@ -3,7 +3,6 @@ kind: Application
metadata:
name: test-e2e-rolling
annotations:
"app.oam.dev/rolling-components": "metrics-provider"
"app.oam.dev/rollout-template": "true"
spec:
components: