mirror of
https://github.com/kubevela/kubevela.git
synced 2026-02-27 00:04:07 +00:00
Compare commits
57 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
3800a700b0 | ||
|
|
75f081f248 | ||
|
|
fb6d505e6e | ||
|
|
1a267bc6d1 | ||
|
|
8643558f6b | ||
|
|
d063cd3b78 | ||
|
|
c4fa81b703 | ||
|
|
ebc8476a31 | ||
|
|
0333b9e891 | ||
|
|
3be63bba91 | ||
|
|
f9f2c3b119 | ||
|
|
34684650c3 | ||
|
|
e76992df87 | ||
|
|
3a68ec77fd | ||
|
|
199e747865 | ||
|
|
c6b65cbd5b | ||
|
|
9fa237721e | ||
|
|
b90fffad43 | ||
|
|
1ad6dcc1a7 | ||
|
|
1659528015 | ||
|
|
ce20f55382 | ||
|
|
384c5ab6a9 | ||
|
|
a7bfb2fb6f | ||
|
|
8703950da6 | ||
|
|
f7cfe6fbae | ||
|
|
e8ed4d78cf | ||
|
|
0c8e22ef4d | ||
|
|
e9004d7478 | ||
|
|
282efa2893 | ||
|
|
580157df6a | ||
|
|
6ecb0567f5 | ||
|
|
dcf79a9b57 | ||
|
|
b411d79ed0 | ||
|
|
d8eb7d687c | ||
|
|
63b348f4d8 | ||
|
|
2e43a6dc78 | ||
|
|
26163aacdc | ||
|
|
5164efeb4a | ||
|
|
2254777523 | ||
|
|
389722a94e | ||
|
|
4b86689d63 | ||
|
|
2b07ec49d4 | ||
|
|
7edfbde2f5 | ||
|
|
bc6a60dde2 | ||
|
|
1407fc5125 | ||
|
|
2d6f2083db | ||
|
|
c619b7b290 | ||
|
|
bc6000f794 | ||
|
|
2e92a66fa5 | ||
|
|
e5a9be8de9 | ||
|
|
5d40f121ea | ||
|
|
58c3283411 | ||
|
|
6d65169449 | ||
|
|
9bf6cc2659 | ||
|
|
ecbb5f56ca | ||
|
|
7ede0746c1 | ||
|
|
e8e45dd7d3 |
30
.github/workflows/codeql-analysis.yml
vendored
Normal file
30
.github/workflows/codeql-analysis.yml
vendored
Normal file
@@ -0,0 +1,30 @@
|
||||
name: "CodeQL"
|
||||
|
||||
on:
|
||||
push:
|
||||
branches: [ master, release-* ]
|
||||
|
||||
jobs:
|
||||
analyze:
|
||||
name: Analyze
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
language: [ 'go' ]
|
||||
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v2
|
||||
|
||||
- name: Initialize CodeQL
|
||||
uses: github/codeql-action/init@v1
|
||||
with:
|
||||
languages: ${{ matrix.language }}
|
||||
|
||||
- name: Autobuild
|
||||
uses: github/codeql-action/autobuild@v1
|
||||
|
||||
- name: Perform CodeQL Analysis
|
||||
uses: github/codeql-action/analyze@v1
|
||||
5
.github/workflows/go.yml
vendored
5
.github/workflows/go.yml
vendored
@@ -15,7 +15,6 @@ env:
|
||||
# Common versions
|
||||
GO_VERSION: '1.14'
|
||||
GOLANGCI_VERSION: 'v1.38'
|
||||
DOCKER_BUILDX_VERSION: 'v0.4.2'
|
||||
KIND_VERSION: 'v0.7.0'
|
||||
|
||||
jobs:
|
||||
@@ -30,7 +29,7 @@ jobs:
|
||||
uses: fkirc/skip-duplicate-actions@v3.3.0
|
||||
with:
|
||||
github_token: ${{ secrets.GITHUB_TOKEN }}
|
||||
paths_ignore: '["**.md", "**.png", "**.jpg"]'
|
||||
paths_ignore: '["**.md", "**.mdx", "**.png", "**.jpg"]'
|
||||
do_not_skip: '["workflow_dispatch", "schedule", "push"]'
|
||||
concurrent_skipping: false
|
||||
|
||||
@@ -167,6 +166,8 @@ jobs:
|
||||
run: |
|
||||
make e2e-cleanup
|
||||
make e2e-setup
|
||||
helm lint ./charts/vela-core
|
||||
helm test -n vela-system kubevela --timeout 5m
|
||||
|
||||
- name: Wait for e2e preparation ready
|
||||
run: |
|
||||
|
||||
78
.github/workflows/release.yml
vendored
78
.github/workflows/release.yml
vendored
@@ -34,52 +34,104 @@ jobs:
|
||||
- name: Get release
|
||||
id: get_release
|
||||
uses: bruceadams/get-release@v1.2.2
|
||||
- name: Upload Linux amd64 tar.gz
|
||||
- name: Upload Vela Linux amd64 tar.gz
|
||||
uses: actions/upload-release-asset@v1.0.2
|
||||
with:
|
||||
upload_url: ${{ steps.get_release.outputs.upload_url }}
|
||||
asset_path: ./_bin/vela-linux-amd64.tar.gz
|
||||
asset_path: ./_bin/vela/vela-linux-amd64.tar.gz
|
||||
asset_name: vela-${{ steps.get_version.outputs.VERSION }}-linux-amd64.tar.gz
|
||||
asset_content_type: binary/octet-stream
|
||||
- name: Upload Linux amd64 zip
|
||||
- name: Upload Vela Linux amd64 zip
|
||||
uses: actions/upload-release-asset@v1.0.2
|
||||
with:
|
||||
upload_url: ${{ steps.get_release.outputs.upload_url }}
|
||||
asset_path: ./_bin/vela-linux-amd64.zip
|
||||
asset_path: ./_bin/vela/vela-linux-amd64.zip
|
||||
asset_name: vela-${{ steps.get_version.outputs.VERSION }}-linux-amd64.zip
|
||||
asset_content_type: binary/octet-stream
|
||||
- name: Upload MacOS tar.gz
|
||||
- name: Upload Vela MacOS tar.gz
|
||||
uses: actions/upload-release-asset@v1.0.2
|
||||
with:
|
||||
upload_url: ${{ steps.get_release.outputs.upload_url }}
|
||||
asset_path: ./_bin/vela-darwin-amd64.tar.gz
|
||||
asset_path: ./_bin/vela/vela-darwin-amd64.tar.gz
|
||||
asset_name: vela-${{ steps.get_version.outputs.VERSION }}-darwin-amd64.tar.gz
|
||||
asset_content_type: binary/octet-stream
|
||||
- name: Upload MacOS zip
|
||||
- name: Upload Vela MacOS zip
|
||||
uses: actions/upload-release-asset@v1.0.2
|
||||
with:
|
||||
upload_url: ${{ steps.get_release.outputs.upload_url }}
|
||||
asset_path: ./_bin/vela-darwin-amd64.zip
|
||||
asset_path: ./_bin/vela/vela-darwin-amd64.zip
|
||||
asset_name: vela-${{ steps.get_version.outputs.VERSION }}-darwin-amd64.zip
|
||||
asset_content_type: binary/octet-stream
|
||||
- name: Upload Windows tar.gz
|
||||
- name: Upload Vela Windows tar.gz
|
||||
uses: actions/upload-release-asset@v1.0.2
|
||||
with:
|
||||
upload_url: ${{ steps.get_release.outputs.upload_url }}
|
||||
asset_path: ./_bin/vela-windows-amd64.tar.gz
|
||||
asset_path: ./_bin/vela/vela-windows-amd64.tar.gz
|
||||
asset_name: vela-${{ steps.get_version.outputs.VERSION }}-windows-amd64.tar.gz
|
||||
asset_content_type: binary/octet-stream
|
||||
- name: Upload Windows zip
|
||||
- name: Upload Vela Windows zip
|
||||
uses: actions/upload-release-asset@v1.0.2
|
||||
with:
|
||||
upload_url: ${{ steps.get_release.outputs.upload_url }}
|
||||
asset_path: ./_bin/vela-windows-amd64.zip
|
||||
asset_path: ./_bin/vela/vela-windows-amd64.zip
|
||||
asset_name: vela-${{ steps.get_version.outputs.VERSION }}-windows-amd64.zip
|
||||
asset_content_type: binary/octet-stream
|
||||
- name: Upload Kubectl-Vela Linux amd64 tar.gz
|
||||
uses: actions/upload-release-asset@v1.0.2
|
||||
with:
|
||||
upload_url: ${{ steps.get_release.outputs.upload_url }}
|
||||
asset_path: ./_bin/kubectl-vela/kubectl-vela-linux-amd64.tar.gz
|
||||
asset_name: kubectl-vela-${{ steps.get_version.outputs.VERSION }}-linux-amd64.tar.gz
|
||||
asset_content_type: binary/octet-stream
|
||||
- name: Upload Kubectl-Vela Linux amd64 zip
|
||||
uses: actions/upload-release-asset@v1.0.2
|
||||
with:
|
||||
upload_url: ${{ steps.get_release.outputs.upload_url }}
|
||||
asset_path: ./_bin/kubectl-vela/kubectl-vela-linux-amd64.zip
|
||||
asset_name: kubectl-vela-${{ steps.get_version.outputs.VERSION }}-linux-amd64.zip
|
||||
asset_content_type: binary/octet-stream
|
||||
- name: Upload Kubectl-Vela MacOS tar.gz
|
||||
uses: actions/upload-release-asset@v1.0.2
|
||||
with:
|
||||
upload_url: ${{ steps.get_release.outputs.upload_url }}
|
||||
asset_path: ./_bin/kubectl-vela/kubectl-vela-darwin-amd64.tar.gz
|
||||
asset_name: kubectl-vela-${{ steps.get_version.outputs.VERSION }}-darwin-amd64.tar.gz
|
||||
asset_content_type: binary/octet-stream
|
||||
- name: Upload Kubectl-Vela MacOS zip
|
||||
uses: actions/upload-release-asset@v1.0.2
|
||||
with:
|
||||
upload_url: ${{ steps.get_release.outputs.upload_url }}
|
||||
asset_path: ./_bin/kubectl-vela/kubectl-vela-darwin-amd64.zip
|
||||
asset_name: kubectl-vela-${{ steps.get_version.outputs.VERSION }}-darwin-amd64.zip
|
||||
asset_content_type: binary/octet-stream
|
||||
- name: Upload Kubectl-Vela Windows tar.gz
|
||||
uses: actions/upload-release-asset@v1.0.2
|
||||
with:
|
||||
upload_url: ${{ steps.get_release.outputs.upload_url }}
|
||||
asset_path: ./_bin/kubectl-vela/kubectl-vela-windows-amd64.tar.gz
|
||||
asset_name: kubectl-vela-${{ steps.get_version.outputs.VERSION }}-windows-amd64.tar.gz
|
||||
asset_content_type: binary/octet-stream
|
||||
- name: Upload Kubectl-Vela Windows zip
|
||||
uses: actions/upload-release-asset@v1.0.2
|
||||
with:
|
||||
upload_url: ${{ steps.get_release.outputs.upload_url }}
|
||||
asset_path: ./_bin/kubectl-vela/kubectl-vela-windows-amd64.zip
|
||||
asset_name: kubectl-vela-${{ steps.get_version.outputs.VERSION }}-windows-amd64.zip
|
||||
asset_content_type: binary/octet-stream
|
||||
- name: Upload Checksums
|
||||
uses: actions/upload-release-asset@v1.0.2
|
||||
with:
|
||||
upload_url: ${{ steps.get_release.outputs.upload_url }}
|
||||
asset_path: ./_bin/sha256sums.txt
|
||||
asset_name: sha256sums.txt
|
||||
asset_content_type: text/plain
|
||||
asset_content_type: text/plain
|
||||
- uses: actions/setup-node@v1
|
||||
with:
|
||||
node-version: '12.x'
|
||||
- name: Sync release to kubevela.io Repo
|
||||
env:
|
||||
SSH_DEPLOY_KEY: ${{ secrets.GH_PAGES_DEPLOY }}
|
||||
VERSION: ${{ steps.get_version.outputs.VERSION }}
|
||||
COMMIT_ID: ${{ github.sha }}
|
||||
run: |
|
||||
bash ./hack/website/release.sh
|
||||
10
.github/workflows/timed-task.yml
vendored
Normal file
10
.github/workflows/timed-task.yml
vendored
Normal file
@@ -0,0 +1,10 @@
|
||||
name: Timed Task
|
||||
on:
|
||||
schedule:
|
||||
- cron: '0 16 * * *'
|
||||
jobs:
|
||||
clean-image:
|
||||
runs-on: aliyun
|
||||
steps:
|
||||
- name: Cleanup image
|
||||
run: docker image prune -f
|
||||
24
Dockerfile
24
Dockerfile
@@ -1,5 +1,5 @@
|
||||
# Build the manager binary
|
||||
FROM --platform=${BUILDPLATFORM:-linux/amd64} golang:1.14 as builder
|
||||
FROM --platform=${BUILDPLATFORM:-linux/amd64} golang:1.14-alpine as builder
|
||||
|
||||
WORKDIR /workspace
|
||||
# Copy the Go Modules manifests
|
||||
@@ -19,24 +19,26 @@ COPY version/ version/
|
||||
ARG TARGETARCH
|
||||
ARG VERSION
|
||||
ARG GITVERSION
|
||||
RUN CGO_ENABLED=0 GOOS=linux GOARCH=${TARGETARCH} GO111MODULE=on \
|
||||
go build -a -ldflags "-X github.com/oam-dev/kubevela/version.VelaVersion=${VERSION:-undefined} -X github.com/oam-dev/kubevela/version.GitRevision=${GITVERSION:-undefined}" \
|
||||
RUN GO111MODULE=on CGO_ENABLED=0 GOOS=linux GOARCH=${TARGETARCH} \
|
||||
go build -a -ldflags "-s -w -X github.com/oam-dev/kubevela/version.VelaVersion=${VERSION:-undefined} -X github.com/oam-dev/kubevela/version.GitRevision=${GITVERSION:-undefined}" \
|
||||
-o manager-${TARGETARCH} main.go
|
||||
|
||||
# Use ubuntu as base image for convenience.
|
||||
# Use alpine as base image due to the discussion in issue #1448
|
||||
# You can replace distroless as minimal base image to package the manager binary
|
||||
# Refer to https://github.com/GoogleContainerTools/distroless for more details
|
||||
# Could use `--build-arg=BASE_IMAGE=gcr.io/distroless/static:nonroot` to overwrite
|
||||
# Overwrite `BASE_IMAGE` by passing `--build-arg=BASE_IMAGE=gcr.io/distroless/static:nonroot`
|
||||
ARG BASE_IMAGE
|
||||
FROM ${BASE_IMAGE:-ubuntu:latest}
|
||||
FROM ${BASE_IMAGE:-alpine:latest}
|
||||
# This is required by daemon connnecting with cri
|
||||
RUN ln -s /usr/bin/* /usr/sbin/ && apt-get update -y \
|
||||
&& apt-get install --no-install-recommends -y ca-certificates \
|
||||
&& apt-get clean && rm -rf /var/log/*log /var/lib/apt/lists/* /var/log/apt/* /var/lib/dpkg/*-old /var/cache/debconf/*-old
|
||||
RUN apk add --no-cache ca-certificates bash
|
||||
|
||||
WORKDIR /
|
||||
|
||||
ARG TARGETARCH
|
||||
COPY --from=builder /workspace/manager-${TARGETARCH} /manager
|
||||
COPY --from=builder /workspace/manager-${TARGETARCH} /usr/local/bin/manager
|
||||
|
||||
ENTRYPOINT ["/manager"]
|
||||
COPY entrypoint.sh /usr/local/bin/
|
||||
|
||||
ENTRYPOINT ["entrypoint.sh"]
|
||||
|
||||
CMD ["manager"]
|
||||
|
||||
69
Makefile
69
Makefile
@@ -3,10 +3,11 @@ VELA_VERSION ?= master
|
||||
# Repo info
|
||||
GIT_COMMIT ?= git-$(shell git rev-parse --short HEAD)
|
||||
GIT_COMMIT_LONG ?= $(shell git rev-parse HEAD)
|
||||
VELA_VERSION_VAR := github.com/oam-dev/kubevela/version.VelaVersion
|
||||
VELA_GITVERSION_VAR := github.com/oam-dev/kubevela/version.GitRevision
|
||||
LDFLAGS ?= "-X $(VELA_VERSION_VAR)=$(VELA_VERSION) -X $(VELA_GITVERSION_VAR)=$(GIT_COMMIT)"
|
||||
VELA_VERSION_KEY := github.com/oam-dev/kubevela/version.VelaVersion
|
||||
VELA_GITVERSION_KEY := github.com/oam-dev/kubevela/version.GitRevision
|
||||
LDFLAGS ?= "-s -w -X $(VELA_VERSION_KEY)=$(VELA_VERSION) -X $(VELA_GITVERSION_KEY)=$(GIT_COMMIT)"
|
||||
|
||||
GOBUILD_ENV = GO111MODULE=on CGO_ENABLED=0
|
||||
GOX = go run github.com/mitchellh/gox
|
||||
TARGETS := darwin/amd64 linux/amd64 windows/amd64
|
||||
DIST_DIRS := find * -type d -exec
|
||||
@@ -34,6 +35,10 @@ else
|
||||
GOBIN=$(shell go env GOBIN)
|
||||
endif
|
||||
|
||||
# Image URL to use all building/pushing image targets
|
||||
VELA_CORE_IMAGE ?= vela-core:latest
|
||||
VELA_CORE_TEST_IMAGE ?= vela-core-test:$(GIT_COMMIT)
|
||||
|
||||
all: build
|
||||
|
||||
# Run tests
|
||||
@@ -42,18 +47,18 @@ test: vet lint staticcheck
|
||||
go test -race -covermode=atomic ./references/apiserver/... ./references/appfile/... ./references/cli/... ./references/common/... ./references/plugins/...
|
||||
@$(OK) unit-tests pass
|
||||
|
||||
# Build manager binary
|
||||
build: fmt vet lint staticcheck
|
||||
go run hack/chart/generate.go
|
||||
go build -o bin/vela -ldflags ${LDFLAGS} references/cmd/cli/main.go
|
||||
git checkout references/cmd/cli/fake/chart_source.go
|
||||
# Build vela cli binary
|
||||
build: fmt vet lint staticcheck vela-cli kubectl-vela
|
||||
@$(OK) build succeed
|
||||
|
||||
vela-cli:
|
||||
go run hack/chart/generate.go
|
||||
go build -o bin/vela -ldflags ${LDFLAGS} references/cmd/cli/main.go
|
||||
$(GOBUILD_ENV) go build -o bin/vela -a -ldflags $(LDFLAGS) ./references/cmd/cli/main.go
|
||||
git checkout references/cmd/cli/fake/chart_source.go
|
||||
|
||||
kubectl-vela:
|
||||
$(GOBUILD_ENV) go build -o bin/kubectl-vela -a -ldflags $(LDFLAGS) ./cmd/plugin/main.go
|
||||
|
||||
dashboard-build:
|
||||
cd references/dashboard && npm install && cd ..
|
||||
|
||||
@@ -72,10 +77,10 @@ docs-start:
|
||||
ifeq ($(wildcard git-page),)
|
||||
git clone --single-branch --depth 1 https://github.com/oam-dev/kubevela.io.git git-page
|
||||
endif
|
||||
rm -r git-page/docs && rm -r git-page/resources
|
||||
rm -r git-page/docs
|
||||
rm git-page/sidebars.js
|
||||
cat docs/sidebars.js > git-page/sidebars.js
|
||||
cp -R docs/en git-page/docs && cp -R docs/resources git-page/resources
|
||||
cp -R docs/en git-page/docs
|
||||
cd git-page && yarn install && yarn start
|
||||
|
||||
api-gen:
|
||||
@@ -87,19 +92,28 @@ generate-source:
|
||||
go run hack/frontend/source.go
|
||||
|
||||
cross-build:
|
||||
rm -rf _bin
|
||||
go run hack/chart/generate.go
|
||||
GO111MODULE=on CGO_ENABLED=0 $(GOX) -ldflags $(LDFLAGS) -parallel=2 -output="_bin/{{.OS}}-{{.Arch}}/vela" -osarch='$(TARGETS)' ./references/cmd/cli/
|
||||
$(GOBUILD_ENV) $(GOX) -ldflags $(LDFLAGS) -parallel=2 -output="_bin/vela/{{.OS}}-{{.Arch}}/vela" -osarch='$(TARGETS)' ./references/cmd/cli
|
||||
$(GOBUILD_ENV) $(GOX) -ldflags $(LDFLAGS) -parallel=2 -output="_bin/kubectl-vela/{{.OS}}-{{.Arch}}/kubectl-vela" -osarch='$(TARGETS)' ./cmd/plugin
|
||||
git checkout references/cmd/cli/fake/chart_source.go
|
||||
|
||||
compress:
|
||||
( \
|
||||
echo "\n## Release Info\nVERSION: $(VELA_VERSION)" >> README.md && \
|
||||
echo "GIT_COMMIT: $(GIT_COMMIT_LONG)\n" >> README.md && \
|
||||
cd _bin && \
|
||||
$(DIST_DIRS) cp ../LICENSE {} \; && \
|
||||
$(DIST_DIRS) cp ../README.md {} \; && \
|
||||
cd _bin/vela && \
|
||||
$(DIST_DIRS) cp ../../LICENSE {} \; && \
|
||||
$(DIST_DIRS) cp ../../README.md {} \; && \
|
||||
$(DIST_DIRS) tar -zcf vela-{}.tar.gz {} \; && \
|
||||
$(DIST_DIRS) zip -r vela-{}.zip {} \; && \
|
||||
sha256sum vela-* > sha256sums.txt \
|
||||
cd ../kubectl-vela && \
|
||||
$(DIST_DIRS) cp ../../LICENSE {} \; && \
|
||||
$(DIST_DIRS) cp ../../README.md {} \; && \
|
||||
$(DIST_DIRS) tar -zcf kubectl-vela-{}.tar.gz {} \; && \
|
||||
$(DIST_DIRS) zip -r kubectl-vela-{}.zip {} \; && \
|
||||
cd .. && \
|
||||
sha256sum vela/vela-* kubectl-vela/kubectl-vela-* > sha256sums.txt \
|
||||
)
|
||||
|
||||
# Run against the configured Kubernetes cluster in ~/.kube/config
|
||||
@@ -120,7 +134,7 @@ staticcheck: staticchecktool
|
||||
$(STATICCHECK) ./...
|
||||
|
||||
lint: golangci
|
||||
$(GOLANGCILINT) run ./...
|
||||
$(GOLANGCILINT) run ./...
|
||||
|
||||
reviewable: manifests fmt vet lint staticcheck
|
||||
go mod tidy
|
||||
@@ -133,16 +147,16 @@ check-diff: reviewable
|
||||
|
||||
# Build the docker image
|
||||
docker-build:
|
||||
docker build --build-arg=VERSION=$(VELA_VERSION) --build-arg=GITVERSION=$(GIT_COMMIT) . -t ${IMG}
|
||||
docker build --build-arg=VERSION=$(VELA_VERSION) --build-arg=GITVERSION=$(GIT_COMMIT) -t $(VELA_CORE_IMAGE) .
|
||||
|
||||
# Push the docker image
|
||||
docker-push:
|
||||
docker push ${IMG}
|
||||
docker push $(VELA_CORE_IMAGE)
|
||||
|
||||
e2e-setup:
|
||||
helm install --create-namespace -n flux-system helm-flux http://oam.dev/catalog/helm-flux2-0.1.0.tgz
|
||||
helm install kruise https://github.com/openkruise/kruise/releases/download/v0.7.0/kruise-chart.tgz
|
||||
helm upgrade --install --create-namespace --namespace vela-system --set image.pullPolicy=IfNotPresent --set image.repository=vela-core-test --set image.tag=$(GIT_COMMIT) --wait kubevela ./charts/vela-core
|
||||
helm upgrade --install --create-namespace --namespace vela-system --set image.pullPolicy=IfNotPresent --set image.repository=vela-core-test --set applicationRevisionLimit=5 --set image.tag=$(GIT_COMMIT) --wait kubevela ./charts/vela-core
|
||||
ginkgo version
|
||||
ginkgo -v -r e2e/setup
|
||||
kubectl wait --for=condition=Ready pod -l app.kubernetes.io/name=vela-core,app.kubernetes.io/instance=kubevela -n vela-system --timeout=600s
|
||||
@@ -179,25 +193,22 @@ e2e-cleanup:
|
||||
|
||||
image-cleanup:
|
||||
# Delete Docker image
|
||||
ifneq ($(shell docker images -q vela-core-test:$(GIT_COMMIT)),)
|
||||
docker image rm -f vela-core-test:$(GIT_COMMIT)
|
||||
ifneq ($(shell docker images -q $(VELA_CORE_TEST_IMAGE)),)
|
||||
docker rmi -f $(VELA_CORE_TEST_IMAGE)
|
||||
endif
|
||||
|
||||
# load docker image to the kind cluster
|
||||
kind-load:
|
||||
docker build -t vela-core-test:$(GIT_COMMIT) .
|
||||
kind load docker-image vela-core-test:$(GIT_COMMIT) || { echo >&2 "kind not installed or error loading image: $(IMAGE)"; exit 1; }
|
||||
|
||||
# Image URL to use all building/pushing image targets
|
||||
IMG ?= vela-core:latest
|
||||
docker build -t $(VELA_CORE_TEST_IMAGE) .
|
||||
kind load docker-image $(VELA_CORE_TEST_IMAGE) || { echo >&2 "kind not installed or error loading image: $(VELA_CORE_TEST_IMAGE)"; exit 1; }
|
||||
|
||||
# Run tests
|
||||
core-test: fmt vet manifests
|
||||
go test ./pkg/... -coverprofile cover.out
|
||||
|
||||
# Build manager binary
|
||||
# Build vela core manager binary
|
||||
manager: fmt vet lint manifests
|
||||
go build -o bin/manager ./cmd/core/main.go
|
||||
$(GOBUILD_ENV) go build -o bin/manager -a -ldflags $(LDFLAGS) ./cmd/core/main.go
|
||||
|
||||
# Run against the configured Kubernetes cluster in ~/.kube/config
|
||||
core-run: fmt vet manifests
|
||||
|
||||
21
README.md
21
README.md
@@ -22,13 +22,22 @@ KubeVela is a modern application engine that adapts to your application's needs,
|
||||
- Gitter: [Discussion](https://gitter.im/oam-dev/community)
|
||||
- Bi-weekly Community Call: [Meeting Notes](https://docs.google.com/document/d/1nqdFEyULekyksFHtFvgvFAYE-0AMHKoS3RMnaKsarjs)
|
||||
|
||||
## What problems does it solve?
|
||||
## Introduction
|
||||
|
||||
Traditional Platform-as-a-Service (PaaS) system enables easy application deployments and everything just works, but this happiness disappears when your application outgrows the capabilities of your platform.
|
||||
*Developers simply want to deploy.*
|
||||
|
||||
KubeVela is a modern application engine whose capabilities are actually Infrastructure-as-Code (IaC) components coded by you or come from the ecosystem. Think of it as a *Heroku* which is fully programmable to serve your needs as you grow and expand.
|
||||
Traditional Platform-as-a-Service (PaaS) systems enable easy application deployments, but this happiness disappears when your application outgrows the capabilities of your platform. This is inevitable regardless of your PaaS is built on Kubernetes or not - the root cause is its inflexibility.
|
||||
|
||||
As a plus, KubeVela leverages [Kubernetes Control Loop](https://kubernetes.io/docs/concepts/architecture/controller/) to enforce all those abstractions so they will never leave *configuration drift* (i.e. the running instances are not in line with the expected configuration) in your clusters.
|
||||
KubeVela is a modern application deployment system that adapts to your needs. Essentially, KubeVela enables you to define platform capabilities (such as workloads, operational behaviors, and cloud services) as reusable [CUE](https://cuelang.org/) or [Helm](https://helm.sh) components, per needs of your application deployment. And when your needs grow, your platform capabilities expand naturally in a programmable approach.
|
||||
|
||||
Perfect in flexibility though, X-as-Code tends to lead to configuration drift. That's why KubeVela is fully built as a [Kubernetes Controller](https://kubernetes.io/docs/concepts/architecture/controller/) instead of a client-side tool, i.e. all its capabilities are modeled as code but enforced with battle tested reconciling loops which will never leave inconsistency in your clusters. Think about *Platform-as-Code* enabled by Kubernetes, CUE and Helm.
|
||||
|
||||
With developer experience in mind, KubeVela exposes those programmable platform capabilities as application-centric API shown as below:
|
||||
- Components - deployable/provisionable entities that composed your application deployment
|
||||
- e.g. a Kubernetes workload, a MySQL database, or a AWS OSS bucket
|
||||
- Traits - attachable operational features per your needs
|
||||
- e.g. autoscaling rules, rollout strategies, ingress rules, sidecars, security policies etc
|
||||
- Application - full description of your application deployment assembled with components and traits.
|
||||
|
||||
## Getting Started
|
||||
|
||||
@@ -38,10 +47,10 @@ As a plus, KubeVela leverages [Kubernetes Control Loop](https://kubernetes.io/do
|
||||
|
||||
## Features
|
||||
|
||||
- **Zero-restriction deployment** - design and express platform capabilities with [CUE](https://cuelang.org/) or [Helm](https://helm.sh) per needs of your application, and let Kubernetes controller guarantee the deployment determinism. GUI forms are automatically generated for capabilities so even your dashboard are fully extensible.
|
||||
- **Zero-restriction application deployment system** - design and express platform capabilities with CUE and Helm per needs of your application, and let Kubernetes controller guarantee the determinism in the application deployment. GUI forms are automatically generated for capabilities so even your dashboard are fully extensible.
|
||||
- **Generic progressive rollout framework** - built-in rollout framework and strategies to upgrade your microservice regardless of its workload type (e.g. stateless, stateful, or even custom operators etc).
|
||||
- **Multi-cluster multi-revision application deployment** - built-in model to deploy or rollout your apps across hybrid infrastructures, with Service Mesh for traffic shifting.
|
||||
- **Simple and native** - KubeVela is a just simple Kubernetes custom controller, all its capabilities are defined as [Custom Resources](https://kubernetes.io/docs/concepts/extend-kubernetes/api-extension/custom-resources/) so they naturally work with any CI/CD or GitOps tools that works with Kubernetes.
|
||||
- **Simple and native** - KubeVela is a just simple Kubernetes custom controller, all its capabilities are defined as [Custom Resources](https://kubernetes.io/docs/concepts/extend-kubernetes/api-extension/custom-resources/) so they naturally work with any CI/CD or GitOps tools which work with Kubernetes.
|
||||
|
||||
## Documentation
|
||||
|
||||
|
||||
@@ -160,10 +160,12 @@ const (
|
||||
|
||||
// ApplicationComponentStatus record the health status of App component
|
||||
type ApplicationComponentStatus struct {
|
||||
Name string `json:"name"`
|
||||
Healthy bool `json:"healthy"`
|
||||
Message string `json:"message,omitempty"`
|
||||
Traits []ApplicationTraitStatus `json:"traits,omitempty"`
|
||||
Name string `json:"name"`
|
||||
// WorkloadDefinition is the definition of a WorkloadDefinition, such as deployments/apps.v1
|
||||
WorkloadDefinition WorkloadGVK `json:"workloadDefinition,omitempty"`
|
||||
Healthy bool `json:"healthy"`
|
||||
Message string `json:"message,omitempty"`
|
||||
Traits []ApplicationTraitStatus `json:"traits,omitempty"`
|
||||
}
|
||||
|
||||
// ApplicationTraitStatus records the trait health status
|
||||
|
||||
@@ -65,6 +65,7 @@ func (in *AppStatus) DeepCopy() *AppStatus {
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *ApplicationComponentStatus) DeepCopyInto(out *ApplicationComponentStatus) {
|
||||
*out = *in
|
||||
out.WorkloadDefinition = in.WorkloadDefinition
|
||||
if in.Traits != nil {
|
||||
in, out := &in.Traits, &out.Traits
|
||||
*out = make([]ApplicationTraitStatus, len(*in))
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
Copyright 2020 The KubeVela Authors.
|
||||
Copyright 2021 The KubeVela Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
Copyright 2020 The KubeVela Authors.
|
||||
Copyright 2021 The KubeVela Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
|
||||
@@ -71,7 +71,6 @@ type ComponentDefinitionStatus struct {
|
||||
// +kubebuilder:subresource:status
|
||||
// +kubebuilder:printcolumn:name="WORKLOAD-KIND",type=string,JSONPath=".spec.workload.definition.kind"
|
||||
// +kubebuilder:printcolumn:name="DESCRIPTION",type=string,JSONPath=".metadata.annotations.definition\\.oam\\.dev/description"
|
||||
// +kubebuilder:printcolumn:name="AGE",type=date,JSONPath=".metadata.creationTimestamp"
|
||||
type ComponentDefinition struct {
|
||||
metav1.TypeMeta `json:",inline"`
|
||||
metav1.ObjectMeta `json:"metadata,omitempty"`
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
Copyright 2020 The KubeVela Authors.
|
||||
Copyright 2021 The KubeVela Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
Copyright 2020 The KubeVela Authors.
|
||||
Copyright 2021 The KubeVela Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
Copyright 2020 The KubeVela Authors.
|
||||
Copyright 2021 The KubeVela Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
@@ -71,7 +71,6 @@ type WorkloadDefinitionStatus struct {
|
||||
// Component.
|
||||
// +kubebuilder:resource:scope=Namespaced,categories={oam},shortName=workload
|
||||
// +kubebuilder:printcolumn:name="DEFINITION-NAME",type=string,JSONPath=".spec.definitionRef.name"
|
||||
// +kubebuilder:printcolumn:name="AGE",type=date,JSONPath=".metadata.creationTimestamp"
|
||||
type WorkloadDefinition struct {
|
||||
metav1.TypeMeta `json:",inline"`
|
||||
metav1.ObjectMeta `json:"metadata,omitempty"`
|
||||
@@ -167,7 +166,6 @@ type TraitDefinitionStatus struct {
|
||||
// +kubebuilder:subresource:status
|
||||
// +kubebuilder:printcolumn:name="APPLIES-TO",type=string,JSONPath=".spec.appliesToWorkloads"
|
||||
// +kubebuilder:printcolumn:name="DESCRIPTION",type=string,JSONPath=".metadata.annotations.definition\\.oam\\.dev/description"
|
||||
// +kubebuilder:printcolumn:name="AGE",type=date,JSONPath=".metadata.creationTimestamp"
|
||||
type TraitDefinition struct {
|
||||
metav1.TypeMeta `json:",inline"`
|
||||
metav1.ObjectMeta `json:"metadata,omitempty"`
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
Copyright 2020 The KubeVela Authors.
|
||||
Copyright 2021 The KubeVela Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
Copyright 2020 The KubeVela Authors.
|
||||
Copyright 2021 The KubeVela Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
Copyright 2020 The KubeVela Authors.
|
||||
Copyright 2021 The KubeVela Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
Copyright 2020 The KubeVela Authors.
|
||||
Copyright 2021 The KubeVela Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
|
||||
@@ -72,7 +72,6 @@ type ComponentDefinitionStatus struct {
|
||||
// +kubebuilder:subresource:status
|
||||
// +kubebuilder:printcolumn:name="WORKLOAD-KIND",type=string,JSONPath=".spec.workload.definition.kind"
|
||||
// +kubebuilder:printcolumn:name="DESCRIPTION",type=string,JSONPath=".metadata.annotations.definition\\.oam\\.dev/description"
|
||||
// +kubebuilder:printcolumn:name="AGE",type=date,JSONPath=".metadata.creationTimestamp"
|
||||
type ComponentDefinition struct {
|
||||
metav1.TypeMeta `json:",inline"`
|
||||
metav1.ObjectMeta `json:"metadata,omitempty"`
|
||||
|
||||
@@ -72,7 +72,6 @@ type WorkloadDefinitionStatus struct {
|
||||
// +kubebuilder:storageversion
|
||||
// +kubebuilder:printcolumn:name="DEFINITION-NAME",type=string,JSONPath=".spec.definitionRef.name"
|
||||
// +kubebuilder:printcolumn:name="DESCRIPTION",type=string,JSONPath=".metadata.annotations.definition\\.oam\\.dev/description"
|
||||
// +kubebuilder:printcolumn:name="AGE",type=date,JSONPath=".metadata.creationTimestamp"
|
||||
type WorkloadDefinition struct {
|
||||
metav1.TypeMeta `json:",inline"`
|
||||
metav1.ObjectMeta `json:"metadata,omitempty"`
|
||||
@@ -169,7 +168,6 @@ type TraitDefinitionStatus struct {
|
||||
// +kubebuilder:storageversion
|
||||
// +kubebuilder:printcolumn:name="APPLIES-TO",type=string,JSONPath=".spec.appliesToWorkloads"
|
||||
// +kubebuilder:printcolumn:name="DESCRIPTION",type=string,JSONPath=".metadata.annotations.definition\\.oam\\.dev/description"
|
||||
// +kubebuilder:printcolumn:name="AGE",type=date,JSONPath=".metadata.creationTimestamp"
|
||||
type TraitDefinition struct {
|
||||
metav1.TypeMeta `json:",inline"`
|
||||
metav1.ObjectMeta `json:"metadata,omitempty"`
|
||||
|
||||
@@ -242,7 +242,11 @@ func (r *RolloutStatus) SetRolloutCondition(new runtimev1alpha1.Condition) {
|
||||
if !exists {
|
||||
r.Conditions = append(r.Conditions, new)
|
||||
}
|
||||
}
|
||||
|
||||
// we can't panic since it will crash the other controllers
|
||||
func (r *RolloutStatus) illegalStateTransition(err error) {
|
||||
r.RolloutFailed(err.Error())
|
||||
}
|
||||
|
||||
// StateTransition is the center place to do rollout state transition
|
||||
@@ -261,13 +265,18 @@ func (r *RolloutStatus) StateTransition(event RolloutEvent) {
|
||||
|
||||
// we have special transition for these types of event since they require additional info
|
||||
if event == RollingFailedEvent || event == RollingRetriableFailureEvent {
|
||||
panic(fmt.Errorf(invalidRollingStateTransition, rollingState, event))
|
||||
r.illegalStateTransition(fmt.Errorf(invalidRollingStateTransition, rollingState, event))
|
||||
return
|
||||
}
|
||||
// special handle modified event here
|
||||
if event == RollingModifiedEvent {
|
||||
if r.RollingState == RolloutDeletingState {
|
||||
r.illegalStateTransition(fmt.Errorf(invalidRollingStateTransition, rollingState, event))
|
||||
return
|
||||
}
|
||||
if r.RollingState == RolloutFailedState || r.RollingState == RolloutSucceedState {
|
||||
r.ResetStatus()
|
||||
} else if r.RollingState != RolloutDeletingState {
|
||||
} else {
|
||||
r.SetRolloutCondition(NewNegativeCondition(r.getRolloutConditionType(), "Rollout Spec is modified"))
|
||||
r.RollingState = RolloutAbandoningState
|
||||
r.BatchRollingState = BatchInitializingState
|
||||
@@ -278,7 +287,8 @@ func (r *RolloutStatus) StateTransition(event RolloutEvent) {
|
||||
// special handle deleted event here, it can happen at many states
|
||||
if event == RollingDeletedEvent {
|
||||
if r.RollingState == RolloutFailedState || r.RollingState == RolloutSucceedState {
|
||||
panic(fmt.Errorf(invalidRollingStateTransition, rollingState, event))
|
||||
r.illegalStateTransition(fmt.Errorf(invalidRollingStateTransition, rollingState, event))
|
||||
return
|
||||
}
|
||||
r.SetRolloutCondition(NewNegativeCondition(r.getRolloutConditionType(), "Rollout is being deleted"))
|
||||
r.RollingState = RolloutDeletingState
|
||||
@@ -301,7 +311,7 @@ func (r *RolloutStatus) StateTransition(event RolloutEvent) {
|
||||
r.RollingState = InitializingState
|
||||
return
|
||||
}
|
||||
panic(fmt.Errorf(invalidRollingStateTransition, rollingState, event))
|
||||
r.illegalStateTransition(fmt.Errorf(invalidRollingStateTransition, rollingState, event))
|
||||
|
||||
case InitializingState:
|
||||
if event == RollingInitializedEvent {
|
||||
@@ -310,7 +320,7 @@ func (r *RolloutStatus) StateTransition(event RolloutEvent) {
|
||||
r.BatchRollingState = BatchInitializingState
|
||||
return
|
||||
}
|
||||
panic(fmt.Errorf(invalidRollingStateTransition, rollingState, event))
|
||||
r.illegalStateTransition(fmt.Errorf(invalidRollingStateTransition, rollingState, event))
|
||||
|
||||
case RollingInBatchesState:
|
||||
r.batchStateTransition(event)
|
||||
@@ -322,7 +332,7 @@ func (r *RolloutStatus) StateTransition(event RolloutEvent) {
|
||||
r.ResetStatus()
|
||||
return
|
||||
}
|
||||
panic(fmt.Errorf(invalidRollingStateTransition, rollingState, event))
|
||||
r.illegalStateTransition(fmt.Errorf(invalidRollingStateTransition, rollingState, event))
|
||||
|
||||
case RolloutDeletingState:
|
||||
if event == RollingFinalizedEvent {
|
||||
@@ -330,7 +340,7 @@ func (r *RolloutStatus) StateTransition(event RolloutEvent) {
|
||||
r.RollingState = RolloutFailedState
|
||||
return
|
||||
}
|
||||
panic(fmt.Errorf(invalidRollingStateTransition, rollingState, event))
|
||||
r.illegalStateTransition(fmt.Errorf(invalidRollingStateTransition, rollingState, event))
|
||||
|
||||
case FinalisingState:
|
||||
if event == RollingFinalizedEvent {
|
||||
@@ -338,7 +348,7 @@ func (r *RolloutStatus) StateTransition(event RolloutEvent) {
|
||||
r.RollingState = RolloutSucceedState
|
||||
return
|
||||
}
|
||||
panic(fmt.Errorf(invalidRollingStateTransition, rollingState, event))
|
||||
r.illegalStateTransition(fmt.Errorf(invalidRollingStateTransition, rollingState, event))
|
||||
|
||||
case RolloutFailingState:
|
||||
if event == RollingFinalizedEvent {
|
||||
@@ -346,13 +356,13 @@ func (r *RolloutStatus) StateTransition(event RolloutEvent) {
|
||||
r.RollingState = RolloutFailedState
|
||||
return
|
||||
}
|
||||
panic(fmt.Errorf(invalidRollingStateTransition, rollingState, event))
|
||||
r.illegalStateTransition(fmt.Errorf(invalidRollingStateTransition, rollingState, event))
|
||||
|
||||
case RolloutSucceedState, RolloutFailedState:
|
||||
panic(fmt.Errorf(invalidRollingStateTransition, rollingState, event))
|
||||
r.illegalStateTransition(fmt.Errorf(invalidRollingStateTransition, rollingState, event))
|
||||
|
||||
default:
|
||||
panic(fmt.Errorf("invalid rolling state %s", rollingState))
|
||||
r.illegalStateTransition(fmt.Errorf("invalid rolling state %s before transition", rollingState))
|
||||
}
|
||||
}
|
||||
|
||||
@@ -371,7 +381,7 @@ func (r *RolloutStatus) batchStateTransition(event RolloutEvent) {
|
||||
r.BatchRollingState = BatchInRollingState
|
||||
return
|
||||
}
|
||||
panic(fmt.Errorf(invalidBatchRollingStateTransition, batchRollingState, event))
|
||||
r.illegalStateTransition(fmt.Errorf(invalidBatchRollingStateTransition, batchRollingState, event))
|
||||
|
||||
case BatchInRollingState:
|
||||
if event == RolloutOneBatchEvent {
|
||||
@@ -379,7 +389,7 @@ func (r *RolloutStatus) batchStateTransition(event RolloutEvent) {
|
||||
r.BatchRollingState = BatchVerifyingState
|
||||
return
|
||||
}
|
||||
panic(fmt.Errorf(invalidBatchRollingStateTransition, batchRollingState, event))
|
||||
r.illegalStateTransition(fmt.Errorf(invalidBatchRollingStateTransition, batchRollingState, event))
|
||||
|
||||
case BatchVerifyingState:
|
||||
if event == OneBatchAvailableEvent {
|
||||
@@ -387,7 +397,7 @@ func (r *RolloutStatus) batchStateTransition(event RolloutEvent) {
|
||||
r.BatchRollingState = BatchFinalizingState
|
||||
return
|
||||
}
|
||||
panic(fmt.Errorf(invalidBatchRollingStateTransition, batchRollingState, event))
|
||||
r.illegalStateTransition(fmt.Errorf(invalidBatchRollingStateTransition, batchRollingState, event))
|
||||
|
||||
case BatchFinalizingState:
|
||||
if event == FinishedOneBatchEvent {
|
||||
@@ -402,7 +412,7 @@ func (r *RolloutStatus) batchStateTransition(event RolloutEvent) {
|
||||
r.RollingState = FinalisingState
|
||||
return
|
||||
}
|
||||
panic(fmt.Errorf(invalidBatchRollingStateTransition, batchRollingState, event))
|
||||
r.illegalStateTransition(fmt.Errorf(invalidBatchRollingStateTransition, batchRollingState, event))
|
||||
|
||||
case BatchReadyState:
|
||||
if event == BatchRolloutApprovedEvent {
|
||||
@@ -411,9 +421,9 @@ func (r *RolloutStatus) batchStateTransition(event RolloutEvent) {
|
||||
r.CurrentBatch++
|
||||
return
|
||||
}
|
||||
panic(fmt.Errorf(invalidBatchRollingStateTransition, batchRollingState, event))
|
||||
r.illegalStateTransition(fmt.Errorf(invalidBatchRollingStateTransition, batchRollingState, event))
|
||||
|
||||
default:
|
||||
panic(fmt.Errorf("invalid batch rolling state %s", batchRollingState))
|
||||
r.illegalStateTransition(fmt.Errorf("invalid batch rolling state %s", batchRollingState))
|
||||
}
|
||||
}
|
||||
|
||||
@@ -64,4 +64,6 @@ const (
|
||||
TypeCap = "Managing Capabilities"
|
||||
// TypeSystem defines one category
|
||||
TypeSystem = "System"
|
||||
// TypePlugin defines one category used in Kubectl Plugin
|
||||
TypePlugin = "Debug and Test"
|
||||
)
|
||||
|
||||
@@ -446,6 +446,17 @@ spec:
|
||||
- type
|
||||
type: object
|
||||
type: array
|
||||
workloadDefinition:
|
||||
description: WorkloadDefinition is the definition of a WorkloadDefinition, such as deployments/apps.v1
|
||||
properties:
|
||||
apiVersion:
|
||||
type: string
|
||||
kind:
|
||||
type: string
|
||||
required:
|
||||
- apiVersion
|
||||
- kind
|
||||
type: object
|
||||
required:
|
||||
- healthy
|
||||
- name
|
||||
@@ -1501,6 +1512,17 @@ spec:
|
||||
- type
|
||||
type: object
|
||||
type: array
|
||||
workloadDefinition:
|
||||
description: WorkloadDefinition is the definition of a WorkloadDefinition, such as deployments/apps.v1
|
||||
properties:
|
||||
apiVersion:
|
||||
type: string
|
||||
kind:
|
||||
type: string
|
||||
required:
|
||||
- apiVersion
|
||||
- kind
|
||||
type: object
|
||||
required:
|
||||
- healthy
|
||||
- name
|
||||
|
||||
@@ -458,6 +458,17 @@ spec:
|
||||
- type
|
||||
type: object
|
||||
type: array
|
||||
workloadDefinition:
|
||||
description: WorkloadDefinition is the definition of a WorkloadDefinition, such as deployments/apps.v1
|
||||
properties:
|
||||
apiVersion:
|
||||
type: string
|
||||
kind:
|
||||
type: string
|
||||
required:
|
||||
- apiVersion
|
||||
- kind
|
||||
type: object
|
||||
required:
|
||||
- healthy
|
||||
- name
|
||||
@@ -916,6 +927,17 @@ spec:
|
||||
- type
|
||||
type: object
|
||||
type: array
|
||||
workloadDefinition:
|
||||
description: WorkloadDefinition is the definition of a WorkloadDefinition, such as deployments/apps.v1
|
||||
properties:
|
||||
apiVersion:
|
||||
type: string
|
||||
kind:
|
||||
type: string
|
||||
required:
|
||||
- apiVersion
|
||||
- kind
|
||||
type: object
|
||||
required:
|
||||
- healthy
|
||||
- name
|
||||
|
||||
@@ -26,9 +26,6 @@ spec:
|
||||
- jsonPath: .metadata.annotations.definition\.oam\.dev/description
|
||||
name: DESCRIPTION
|
||||
type: string
|
||||
- jsonPath: .metadata.creationTimestamp
|
||||
name: AGE
|
||||
type: date
|
||||
name: v1alpha2
|
||||
schema:
|
||||
openAPIV3Schema:
|
||||
@@ -225,9 +222,6 @@ spec:
|
||||
- jsonPath: .metadata.annotations.definition\.oam\.dev/description
|
||||
name: DESCRIPTION
|
||||
type: string
|
||||
- jsonPath: .metadata.creationTimestamp
|
||||
name: AGE
|
||||
type: date
|
||||
name: v1beta1
|
||||
schema:
|
||||
openAPIV3Schema:
|
||||
|
||||
@@ -26,9 +26,6 @@ spec:
|
||||
- jsonPath: .metadata.annotations.definition\.oam\.dev/description
|
||||
name: DESCRIPTION
|
||||
type: string
|
||||
- jsonPath: .metadata.creationTimestamp
|
||||
name: AGE
|
||||
type: date
|
||||
name: v1alpha2
|
||||
schema:
|
||||
openAPIV3Schema:
|
||||
@@ -209,9 +206,6 @@ spec:
|
||||
- jsonPath: .metadata.annotations.definition\.oam\.dev/description
|
||||
name: DESCRIPTION
|
||||
type: string
|
||||
- jsonPath: .metadata.creationTimestamp
|
||||
name: AGE
|
||||
type: date
|
||||
name: v1beta1
|
||||
schema:
|
||||
openAPIV3Schema:
|
||||
|
||||
@@ -23,9 +23,6 @@ spec:
|
||||
- jsonPath: .spec.definitionRef.name
|
||||
name: DEFINITION-NAME
|
||||
type: string
|
||||
- jsonPath: .metadata.creationTimestamp
|
||||
name: AGE
|
||||
type: date
|
||||
name: v1alpha2
|
||||
schema:
|
||||
openAPIV3Schema:
|
||||
@@ -212,9 +209,6 @@ spec:
|
||||
- jsonPath: .metadata.annotations.definition\.oam\.dev/description
|
||||
name: DESCRIPTION
|
||||
type: string
|
||||
- jsonPath: .metadata.creationTimestamp
|
||||
name: AGE
|
||||
type: date
|
||||
name: v1beta1
|
||||
schema:
|
||||
openAPIV3Schema:
|
||||
|
||||
@@ -72,7 +72,7 @@ webhooks:
|
||||
{{- if .Values.admissionWebhooks.patch.enabled }}
|
||||
failurePolicy: Ignore
|
||||
{{- else }}
|
||||
failurePolicy: Fails
|
||||
failurePolicy: Fail
|
||||
{{- end }}
|
||||
name: mutating.core.oam-dev.v1alpha2.components
|
||||
sideEffects: None
|
||||
@@ -99,7 +99,7 @@ webhooks:
|
||||
{{- if .Values.admissionWebhooks.patch.enabled }}
|
||||
failurePolicy: Ignore
|
||||
{{- else }}
|
||||
failurePolicy: Fails
|
||||
failurePolicy: Fail
|
||||
{{- end }}
|
||||
name: mcontainerized.kb.io
|
||||
sideEffects: None
|
||||
|
||||
24
charts/vela-core/templates/defwithtemplate/annotations.yaml
Normal file
24
charts/vela-core/templates/defwithtemplate/annotations.yaml
Normal file
@@ -0,0 +1,24 @@
|
||||
# Code generated by KubeVela templates. DO NOT EDIT.
|
||||
apiVersion: core.oam.dev/v1beta1
|
||||
kind: TraitDefinition
|
||||
metadata:
|
||||
annotations:
|
||||
definition.oam.dev/description: "Add annotations for your Workload."
|
||||
name: annotations
|
||||
namespace: {{.Values.systemDefinitionNamespace}}
|
||||
spec:
|
||||
appliesToWorkloads:
|
||||
- webservice
|
||||
- worker
|
||||
schematic:
|
||||
cue:
|
||||
template: |-
|
||||
patch: {
|
||||
spec: template: metadata: annotations: {
|
||||
for k, v in parameter {
|
||||
"\(k)": v
|
||||
}
|
||||
}
|
||||
}
|
||||
parameter: [string]: string
|
||||
|
||||
51
charts/vela-core/templates/defwithtemplate/cpuhpa.yaml
Normal file
51
charts/vela-core/templates/defwithtemplate/cpuhpa.yaml
Normal file
@@ -0,0 +1,51 @@
|
||||
# Code generated by KubeVela templates. DO NOT EDIT.
|
||||
apiVersion: core.oam.dev/v1beta1
|
||||
kind: TraitDefinition
|
||||
metadata:
|
||||
annotations:
|
||||
definition.oam.dev/description: "configure k8s HPA with CPU metrics for Deployment"
|
||||
name: cpuscaler
|
||||
namespace: {{.Values.systemDefinitionNamespace}}
|
||||
spec:
|
||||
appliesToWorkloads:
|
||||
- webservice
|
||||
- worker
|
||||
schematic:
|
||||
cue:
|
||||
template: |
|
||||
outputs: cpuhpa: {
|
||||
apiVersion: "autoscaling/v2beta2"
|
||||
kind: "HorizontalPodAutoscaler"
|
||||
metadata: name: context.name
|
||||
spec: {
|
||||
scaleTargetRef: {
|
||||
apiVersion: "apps/v1"
|
||||
kind: "Deployment"
|
||||
name: context.name
|
||||
}
|
||||
minReplicas: parameter.min
|
||||
maxReplicas: parameter.max
|
||||
metrics: [{
|
||||
type: "Resource"
|
||||
resource: {
|
||||
name: "cpu"
|
||||
target: {
|
||||
type: "Utilization"
|
||||
averageUtilization: parameter.cpuUtil
|
||||
}
|
||||
}
|
||||
}]
|
||||
}
|
||||
}
|
||||
parameter: {
|
||||
|
||||
// +usage=Specify the minimal number of replicas to which the autoscaler can scale down
|
||||
min: *1 | int
|
||||
|
||||
// +usage=Specify the maximum number of of replicas to which the autoscaler can scale up
|
||||
max: *10 | int
|
||||
|
||||
// +usage=Specify the average cpu utilization, for example, 50 means the CPU usage is 50%
|
||||
cpuUtil: *50 | int
|
||||
}
|
||||
|
||||
@@ -25,11 +25,6 @@ spec:
|
||||
schematic:
|
||||
cue:
|
||||
template: |
|
||||
parameter: {
|
||||
domain: string
|
||||
http: [string]: int
|
||||
}
|
||||
|
||||
// trait template can have multiple outputs in one trait
|
||||
outputs: service: {
|
||||
apiVersion: "v1"
|
||||
@@ -72,3 +67,11 @@ spec:
|
||||
}
|
||||
}
|
||||
|
||||
parameter: {
|
||||
// +usage=Specify the domain you want to expose
|
||||
domain: string
|
||||
|
||||
// +usage=Specify the mapping relationship between the http path and the workload port
|
||||
http: [string]: int
|
||||
}
|
||||
|
||||
|
||||
24
charts/vela-core/templates/defwithtemplate/labels.yaml
Normal file
24
charts/vela-core/templates/defwithtemplate/labels.yaml
Normal file
@@ -0,0 +1,24 @@
|
||||
# Code generated by KubeVela templates. DO NOT EDIT.
|
||||
apiVersion: core.oam.dev/v1beta1
|
||||
kind: TraitDefinition
|
||||
metadata:
|
||||
annotations:
|
||||
definition.oam.dev/description: "Add labels for your Workload."
|
||||
name: labels
|
||||
namespace: {{.Values.systemDefinitionNamespace}}
|
||||
spec:
|
||||
appliesToWorkloads:
|
||||
- webservice
|
||||
- worker
|
||||
schematic:
|
||||
cue:
|
||||
template: |-
|
||||
patch: {
|
||||
spec: template: metadata: labels: {
|
||||
for k, v in parameter {
|
||||
"\(k)": v
|
||||
}
|
||||
}
|
||||
}
|
||||
parameter: [string]: string
|
||||
|
||||
@@ -3,30 +3,22 @@ apiVersion: core.oam.dev/v1beta1
|
||||
kind: TraitDefinition
|
||||
metadata:
|
||||
annotations:
|
||||
definition.oam.dev/description: "Configures replicas for your service."
|
||||
definition.oam.dev/description: "Configures replicas for your service by patch replicas field."
|
||||
name: scaler
|
||||
namespace: {{.Values.systemDefinitionNamespace}}
|
||||
spec:
|
||||
appliesToWorkloads:
|
||||
- webservice
|
||||
- worker
|
||||
definitionRef:
|
||||
name: manualscalertraits.core.oam.dev
|
||||
workloadRefPath: spec.workloadRef
|
||||
podDisruptive: true
|
||||
podDisruptive: false
|
||||
schematic:
|
||||
cue:
|
||||
template: |
|
||||
outputs: scaler: {
|
||||
apiVersion: "core.oam.dev/v1alpha2"
|
||||
kind: "ManualScalerTrait"
|
||||
spec: {
|
||||
replicaCount: parameter.replicas
|
||||
}
|
||||
patch: {
|
||||
spec: replicas: parameter.replicas
|
||||
}
|
||||
parameter: {
|
||||
//+short=r
|
||||
//+usage=Replicas of the workload
|
||||
// +usage=Specify the number of workload
|
||||
replicas: *1 | int
|
||||
}
|
||||
|
||||
|
||||
48
charts/vela-core/templates/defwithtemplate/sidecar.yaml
Normal file
48
charts/vela-core/templates/defwithtemplate/sidecar.yaml
Normal file
@@ -0,0 +1,48 @@
|
||||
# Code generated by KubeVela templates. DO NOT EDIT.
|
||||
apiVersion: core.oam.dev/v1beta1
|
||||
kind: TraitDefinition
|
||||
metadata:
|
||||
annotations:
|
||||
definition.oam.dev/description: "inject a sidecar container into your app"
|
||||
name: sidecar
|
||||
namespace: {{.Values.systemDefinitionNamespace}}
|
||||
spec:
|
||||
appliesToWorkloads:
|
||||
- webservice
|
||||
- worker
|
||||
schematic:
|
||||
cue:
|
||||
template: |-
|
||||
patch: {
|
||||
// +patchKey=name
|
||||
spec: template: spec: containers: [{
|
||||
name: parameter.name
|
||||
image: parameter.image
|
||||
command: parameter.cmd
|
||||
if parameter["volumes"] != _|_ {
|
||||
volumeMounts: [ for v in parameter.volumes {
|
||||
{
|
||||
mountPath: v.path
|
||||
name: v.name
|
||||
}
|
||||
}]
|
||||
}
|
||||
}]
|
||||
}
|
||||
parameter: {
|
||||
// +usage=Specify the name of sidecar container
|
||||
name: string
|
||||
|
||||
// +usage=Specify the image of sidecar container
|
||||
image: string
|
||||
|
||||
// +usage=Specify the commands run in the sidecar
|
||||
cmd?: [...string]
|
||||
|
||||
// +usage=Specify the shared volume path
|
||||
volumes?: [...{
|
||||
name: string
|
||||
path: string
|
||||
}]
|
||||
}
|
||||
|
||||
@@ -34,7 +34,7 @@ spec:
|
||||
}
|
||||
}
|
||||
parameter: {
|
||||
// +usage=specify number of tasks to run in parallel
|
||||
// +usage=Specify number of tasks to run in parallel
|
||||
// +short=c
|
||||
count: *1 | int
|
||||
|
||||
|
||||
@@ -62,7 +62,50 @@ spec:
|
||||
cpu: parameter.cpu
|
||||
}
|
||||
}
|
||||
|
||||
if parameter["volumes"] != _|_ {
|
||||
volumeMounts: [ for v in parameter.volumes {
|
||||
{
|
||||
mountPath: v.mountPath
|
||||
name: v.name
|
||||
}}]
|
||||
}
|
||||
}]
|
||||
|
||||
if parameter["volumes"] != _|_ {
|
||||
volumes: [ for v in parameter.volumes {
|
||||
{
|
||||
name: v.name
|
||||
if v.type == "pvc" {
|
||||
persistentVolumeClaim: {
|
||||
claimName: v.claimName
|
||||
}
|
||||
}
|
||||
if v.type == "configMap" {
|
||||
configMap: {
|
||||
defaultMode: v.defaultMode
|
||||
name: v.cmName
|
||||
if v.items != _|_ {
|
||||
items: v.items
|
||||
}
|
||||
}
|
||||
}
|
||||
if v.type == "secret" {
|
||||
secret: {
|
||||
defaultMode: v.defaultMode
|
||||
secretName: v.secretName
|
||||
if v.items != _|_ {
|
||||
items: v.items
|
||||
}
|
||||
}
|
||||
}
|
||||
if v.type == "emptyDir" {
|
||||
emptyDir: {
|
||||
medium: v.medium
|
||||
}
|
||||
}
|
||||
}}]
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -100,5 +143,37 @@ spec:
|
||||
|
||||
// If addRevisionLabel is true, the appRevision label will be added to the underlying pods
|
||||
addRevisionLabel: *false | bool
|
||||
|
||||
// +usage=Declare volumes and volumeMounts
|
||||
volumes?: [...{
|
||||
name: string
|
||||
mountPath: string
|
||||
// +usage=Specify volume type, options: "pvc","configMap","secret","emptyDir"
|
||||
type: "pvc" | "configMap" | "secret" | "emptyDir"
|
||||
if type == "pvc" {
|
||||
claimName: string
|
||||
}
|
||||
if type == "configMap" {
|
||||
defaultMode: *420 | int
|
||||
cmName: string
|
||||
items?: [...{
|
||||
key: string
|
||||
path: string
|
||||
mode: *511 | int
|
||||
}]
|
||||
}
|
||||
if type == "secret" {
|
||||
defaultMode: *420 | int
|
||||
secretName: string
|
||||
items?: [...{
|
||||
key: string
|
||||
path: string
|
||||
mode: *511 | int
|
||||
}]
|
||||
}
|
||||
if type == "emptyDir" {
|
||||
medium: *"" | "Memory"
|
||||
}
|
||||
}]
|
||||
}
|
||||
|
||||
|
||||
@@ -35,7 +35,59 @@ spec:
|
||||
if parameter["cmd"] != _|_ {
|
||||
command: parameter.cmd
|
||||
}
|
||||
|
||||
if parameter["volumes"] != _|_ {
|
||||
volumeMounts: [ for v in parameter.volumes {
|
||||
{
|
||||
mountPath: v.mountPath
|
||||
name: v.name
|
||||
}
|
||||
}]
|
||||
}
|
||||
|
||||
if parameter["volumes"] != _|_ {
|
||||
volumeMounts: [ for v in parameter.volumes {
|
||||
{
|
||||
mountPath: v.mountPath
|
||||
name: v.name
|
||||
}}]
|
||||
}
|
||||
}]
|
||||
|
||||
if parameter["volumes"] != _|_ {
|
||||
volumes: [ for v in parameter.volumes {
|
||||
{
|
||||
name: v.name
|
||||
if v.type == "pvc" {
|
||||
persistentVolumeClaim: {
|
||||
claimName: v.claimName
|
||||
}
|
||||
}
|
||||
if v.type == "configMap" {
|
||||
configMap: {
|
||||
defaultMode: v.defaultMode
|
||||
name: v.cmName
|
||||
if v.items != _|_ {
|
||||
items: v.items
|
||||
}
|
||||
}
|
||||
}
|
||||
if v.type == "secret" {
|
||||
secret: {
|
||||
defaultMode: v.defaultMode
|
||||
secretName: v.secretName
|
||||
if v.items != _|_ {
|
||||
items: v.items
|
||||
}
|
||||
}
|
||||
}
|
||||
if v.type == "emptyDir" {
|
||||
emptyDir: {
|
||||
medium: v.medium
|
||||
}
|
||||
}
|
||||
}}]
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -47,5 +99,36 @@ spec:
|
||||
image: string
|
||||
// +usage=Commands to run in the container
|
||||
cmd?: [...string]
|
||||
// +usage=Declare volumes and volumeMounts
|
||||
volumes?: [...{
|
||||
name: string
|
||||
mountPath: string
|
||||
// +usage=Specify volume type, options: "pvc","configMap","secret","emptyDir"
|
||||
type: "pvc" | "configMap" | "secret" | "emptyDir"
|
||||
if type == "pvc" {
|
||||
claimName: string
|
||||
}
|
||||
if type == "configMap" {
|
||||
defaultMode: *420 | int
|
||||
cmName: string
|
||||
items?: [...{
|
||||
key: string
|
||||
path: string
|
||||
mode: *511 | int
|
||||
}]
|
||||
}
|
||||
if type == "secret" {
|
||||
defaultMode: *420 | int
|
||||
secretName: string
|
||||
items?: [...{
|
||||
key: string
|
||||
path: string
|
||||
mode: *511 | int
|
||||
}]
|
||||
}
|
||||
if type == "emptyDir" {
|
||||
medium: *"" | "Memory"
|
||||
}
|
||||
}]
|
||||
}
|
||||
|
||||
|
||||
@@ -121,6 +121,7 @@ spec:
|
||||
- "--disable-caps={{ .Values.disableCaps }}"
|
||||
{{ end }}
|
||||
- "--system-definition-namespace={{ .Values.systemDefinitionNamespace }}"
|
||||
- "--application-revision-limit={{ .Values.applicationRevisionLimit }}"
|
||||
image: {{ .Values.image.repository }}:{{ .Values.image.tag }}
|
||||
imagePullPolicy: {{ quote .Values.image.pullPolicy }}
|
||||
resources:
|
||||
|
||||
60
charts/vela-core/templates/test/test-application.yaml
Normal file
60
charts/vela-core/templates/test/test-application.yaml
Normal file
@@ -0,0 +1,60 @@
|
||||
apiVersion: core.oam.dev/v1beta1
|
||||
kind: Application
|
||||
metadata:
|
||||
annotations:
|
||||
helm.sh/hook: test-success
|
||||
name: first-vela-app
|
||||
spec:
|
||||
components:
|
||||
- name: express-server
|
||||
type: webservice
|
||||
properties:
|
||||
image: crccheck/hello-world
|
||||
port: 8000
|
||||
traits:
|
||||
- type: ingress
|
||||
properties:
|
||||
domain: testsvc.example.com
|
||||
http:
|
||||
"/": 8000
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: "{{ .Release.Name }}-application-test"
|
||||
annotations:
|
||||
"helm.sh/hook": test
|
||||
spec:
|
||||
serviceAccountName: kubevela-vela-core
|
||||
containers:
|
||||
- name: {{ .Release.Name }}-application-test
|
||||
image: alpine/k8s:1.18.2
|
||||
imagePullPolicy: IfNotPresent
|
||||
command:
|
||||
- /bin/bash
|
||||
- -ec
|
||||
- |
|
||||
|
||||
set -e
|
||||
|
||||
echo "Waiting application is ready..."
|
||||
|
||||
echo "waiting for application being Applied"
|
||||
kubectl -n vela-system wait --for=condition=Applied application first-vela-app --timeout=3m
|
||||
echo "application being Applied"
|
||||
|
||||
# wait for deploy being created
|
||||
echo "waiting for deployment being available"
|
||||
kubectl -n vela-system wait --for=condition=available deploy express-server --timeout 3m
|
||||
echo "deployment being available"
|
||||
|
||||
# wait for ingress being created
|
||||
while ! [ `kubectl -n vela-system get ing express-server | grep -v NAME | wc -l` = 1 ]; do
|
||||
echo "waiting for ingress being created"
|
||||
sleep 1
|
||||
done
|
||||
|
||||
|
||||
|
||||
echo "Application and its components are created"
|
||||
restartPolicy: Never
|
||||
@@ -80,3 +80,5 @@ admissionWebhooks:
|
||||
|
||||
|
||||
systemDefinitionNamespace: vela-system
|
||||
|
||||
applicationRevisionLimit: 10
|
||||
|
||||
@@ -30,9 +30,6 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/crossplane/crossplane-runtime/pkg/logging"
|
||||
injectorcontroller "github.com/oam-dev/trait-injector/controllers"
|
||||
"github.com/oam-dev/trait-injector/pkg/injector"
|
||||
"github.com/oam-dev/trait-injector/pkg/plugin"
|
||||
"go.uber.org/zap/zapcore"
|
||||
"gopkg.in/natefinch/lumberjack.v2"
|
||||
ctrl "sigs.k8s.io/controller-runtime"
|
||||
@@ -70,7 +67,7 @@ func main() {
|
||||
var logRetainDate int
|
||||
var certDir string
|
||||
var webhookPort int
|
||||
var useWebhook, useTraitInjector bool
|
||||
var useWebhook bool
|
||||
var controllerArgs oamcontroller.Args
|
||||
var healthAddr string
|
||||
var disableCaps string
|
||||
@@ -79,7 +76,6 @@ func main() {
|
||||
var applyOnceOnly string
|
||||
|
||||
flag.BoolVar(&useWebhook, "use-webhook", false, "Enable Admission Webhook")
|
||||
flag.BoolVar(&useTraitInjector, "use-trait-injector", false, "Enable TraitInjector")
|
||||
flag.StringVar(&certDir, "webhook-cert-dir", "/k8s-webhook-server/serving-certs", "Admission webhook cert/key dir.")
|
||||
flag.IntVar(&webhookPort, "webhook-port", 9443, "admission webhook listen address")
|
||||
flag.StringVar(&metricsAddr, "metrics-addr", ":8080", "The address the metric endpoint binds to.")
|
||||
@@ -93,6 +89,8 @@ func main() {
|
||||
flag.BoolVar(&logDebug, "log-debug", false, "Enable debug logs for development purpose")
|
||||
flag.IntVar(&controllerArgs.RevisionLimit, "revision-limit", 50,
|
||||
"RevisionLimit is the maximum number of revisions that will be maintained. The default value is 50.")
|
||||
flag.IntVar(&controllerArgs.AppRevisionLimit, "application-revision-limit", 10,
|
||||
"application-revision-limit is the maximum number of application useless revisions that will be maintained, if the useless revisions exceed this number, older ones will be GCed first.The default value is 10.")
|
||||
flag.StringVar(&controllerArgs.CustomRevisionHookURL, "custom-revision-hook-url", "",
|
||||
"custom-revision-hook-url is a webhook url which will let KubeVela core to call with applicationConfiguration and component info and return a customized component revision")
|
||||
flag.BoolVar(&controllerArgs.ApplicationConfigurationInstalled, "app-config-installed", true,
|
||||
@@ -217,24 +215,6 @@ func main() {
|
||||
}
|
||||
setupLog.Info("use storage driver", "storageDriver", os.Getenv(system.StorageDriverEnv))
|
||||
|
||||
if useTraitInjector {
|
||||
// register all service injectors
|
||||
plugin.RegisterTargetInjectors(injector.Defaults()...)
|
||||
|
||||
tiWebhook := &injectorcontroller.ServiceBindingReconciler{
|
||||
Client: mgr.GetClient(),
|
||||
Log: ctrl.Log.WithName("controllers").WithName("ServiceBinding"),
|
||||
Scheme: mgr.GetScheme(),
|
||||
Recorder: mgr.GetEventRecorderFor("servicebinding"),
|
||||
}
|
||||
if err = (tiWebhook).SetupWithManager(mgr); err != nil {
|
||||
setupLog.Error(err, "unable to create controller", "controller", "ServiceBinding")
|
||||
os.Exit(1)
|
||||
}
|
||||
// this has hard coded requirement "./ssl/service-injector.pem", "./ssl/service-injector.key"
|
||||
go tiWebhook.ServeAdmission()
|
||||
}
|
||||
|
||||
setupLog.Info("starting the vela controller manager")
|
||||
|
||||
if err := mgr.Start(makeSignalHandler()); err != nil {
|
||||
|
||||
35
cmd/plugin/main.go
Normal file
35
cmd/plugin/main.go
Normal file
@@ -0,0 +1,35 @@
|
||||
/*
|
||||
Copyright 2021. The KubeVela Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"math/rand"
|
||||
"os"
|
||||
"time"
|
||||
|
||||
"github.com/oam-dev/kubevela/pkg/plugin/cli"
|
||||
)
|
||||
|
||||
func main() {
|
||||
rand.Seed(time.Now().UnixNano())
|
||||
|
||||
command := cli.NewCommand()
|
||||
|
||||
if err := command.Execute(); err != nil {
|
||||
os.Exit(1)
|
||||
}
|
||||
}
|
||||
@@ -51,7 +51,7 @@ There are two distinguished layers:
|
||||
- **API layer**: It defines the API discovery and serving endpoints that Vela APIServer implementation must follow. This is the integration point for external system components (e.g. UI) to contact.
|
||||
- **Storage layer**: It describes the storage systems and objects that Vela APIServer syncs data with behind the scene. There are three types of storage:
|
||||
- **K8s cluster**: Vela APIServer manages multiple k8s clusters with regard to the applications and definitions custom resources.
|
||||
- **Catalog server**: Vela APIServer manages mulitple catalogs which contain COTS application pacakges. Currently in our use case the catalogs resides in Git repos. In the future we can extend this to other catalog storage like file server, object storage.
|
||||
- **Catalog server**: Vela APIServer manages multiple catalogs which contain COTS application pacakges. Currently in our use case the catalogs resides in Git repos. In the future we can extend this to other catalog storage like file server, object storage.
|
||||
- **MySQL database**: Vela APIServer stores global, cross-cluster, cross catalog information in a MySQL database. These data do not exist in k8s or catalog and thus need to be managed by APIServer in a separate database. The database is usually hosted on cloud.
|
||||
|
||||
#### Environment API
|
||||
|
||||
@@ -105,7 +105,7 @@ In the following example, we are assuming the app has deployed v1 now and is upg
|
||||
|
||||
We will make sure the spec works for the following environments:
|
||||
|
||||
- K8s ingress + service (traffic split percetange determined by replica number)
|
||||
- K8s ingress + service (traffic split percentages determined by replica number)
|
||||
- Istio service mesh
|
||||
|
||||
Here is the workflow with Istio:
|
||||
|
||||
@@ -142,7 +142,7 @@ Besides the condition in `apply-once-only`, `apply-once-only-force` has one more
|
||||
|
||||
Three available options are provided to a vela-core runtime setup flag named `apply-one-only`, referring to three modes:
|
||||
|
||||
- off - `apply-once-only` is disabeld, this is the default option
|
||||
- off - `apply-once-only` is disabled, this is the default option
|
||||
- on - `apply-once-only` is enabled
|
||||
- force - `apply-once-only-force` is enabled
|
||||
|
||||
|
||||
@@ -152,7 +152,7 @@ status:
|
||||
# update with original manifest fails
|
||||
# reconciling also fails for cannot applying trait
|
||||
```
|
||||
Additonally, if a trait has no immutable field, update will eliminate all fields set by others.
|
||||
Additionally, if a trait has no immutable field, update will eliminate all fields set by others.
|
||||
```yaml
|
||||
# original trait manifest
|
||||
kind: Bar
|
||||
|
||||
@@ -136,7 +136,7 @@ const cueTemplate appCreateMode = "appFile"
|
||||
```go
|
||||
type appConfigValue struct {
|
||||
appName string `json:"appName"`
|
||||
defintion runtime.RawExtension `json:"defintion"` // the content
|
||||
definition runtime.RawExtension `json:"definition"` // the content
|
||||
definitionName string `json:"definitionName"` // use to find the definition
|
||||
definitionType string `json:"definitionType"`
|
||||
}
|
||||
@@ -161,7 +161,7 @@ const cueTemplate appUpdateMode = "appFile"
|
||||
```go
|
||||
|
||||
type appConfigValue struct {
|
||||
defintion runtime.RawExtension `json:"defintion"` // the content
|
||||
definition runtime.RawExtension `json:"definition"` // the content
|
||||
}
|
||||
```
|
||||
|
||||
|
||||
@@ -179,7 +179,7 @@ just change the application, and the system will pick up the change, then apply
|
||||
depends on the workload type and we will list each in the
|
||||
[rollout with different workload](#Rollout plan work with different type of workloads) section
|
||||
. This special AC logic is also the real magic for the other rollout scenario to work as AC
|
||||
controller is the only entity that is directly responsible for emiting the workload to the k8s.
|
||||
controller is the only entity that is directly responsible for emitting the workload to the k8s.
|
||||
|
||||
|
||||
#### ApplicationDeployment workflow
|
||||
|
||||
@@ -2,14 +2,14 @@
|
||||
|
||||
[Here](https://github.com/oam-dev/kubevela/tree/master/docs) is the source documentation of [Kubevela website](http://kubevela.io/).
|
||||
Any files modifid here will trigger the `check-docs` Github action to run and validate the docs could be build successfully into the website.
|
||||
Any changes on these files(`docs/en/*`, `resource/*`, `sidebars.js`) will be submitted to the corresponding locations of the repo
|
||||
Any changes on these files(`docs/en/*`, `docs/en/resource/*`, `sidebars.js`) will be submitted to the corresponding locations of the repo
|
||||
[kubevela.io](https://github.com/oam-dev/kubevela.io). The Github-Action there will parse the document and publish it to the Kubevela Website automatically.
|
||||
|
||||
Please follow our guides below to learn how to write the docs in the right way.
|
||||
|
||||
## Add or Update Docs
|
||||
|
||||
When you add or modify the docs, these three files(`docs/en/`, `resource/` and `sidebars.js`) should be taken into consideration.
|
||||
When you add or modify the docs, these three files(`docs/en/`, `docs/en/resource/` and `sidebars.js`) should be taken into consideration.
|
||||
|
||||
1. `docs/en/`, the main English documentation files are mainly located in this folder. All markdown files need to follow the format,
|
||||
that the title at the beginning should be in the following format:
|
||||
@@ -28,11 +28,11 @@ When you add or modify the docs, these three files(`docs/en/`, `resource/` and `
|
||||
[the definition and template concepts](../platform-engineers/definition-and-templates)
|
||||
```
|
||||
|
||||
2. `resource/`, image files are located in this folder. When you want to use link any image in documentation,
|
||||
2. `docs/en/resource/`, image files are located in this folder. When you want to use link any image in documentation,
|
||||
you should put the image resources here and use a relative path like below:
|
||||
|
||||
```markdown
|
||||

|
||||

|
||||
```
|
||||
|
||||
3. `sidebars.js`, this file contain the navigation information of the KubeVela website.
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||

|
||||

|
||||
|
||||
*Make shipping applications more enjoyable.*
|
||||
|
||||
|
||||
124
docs/en/advanced-install.md
Normal file
124
docs/en/advanced-install.md
Normal file
@@ -0,0 +1,124 @@
|
||||
---
|
||||
title: Advanced Topics for Installation
|
||||
---
|
||||
|
||||
## Install KubeVela with cert-manager
|
||||
|
||||
KubeVela can use cert-manager generate certs for your application if it's available. Note that you need to install cert-manager **before** the KubeVela chart.
|
||||
|
||||
```shell script
|
||||
helm repo add jetstack https://charts.jetstack.io
|
||||
helm repo update
|
||||
helm install cert-manager jetstack/cert-manager --namespace cert-manager --version v1.2.0 --create-namespace --set installCRDs=true
|
||||
```
|
||||
|
||||
Install kubevela with enabled certmanager:
|
||||
```shell script
|
||||
helm install --create-namespace -n vela-system --set admissionWebhooks.certManager.enabled=true kubevela kubevela/vela-core
|
||||
```
|
||||
|
||||
## Install Pre-release
|
||||
|
||||
Add flag `--devel` in command `helm search` to choose a pre-release
|
||||
version in format `<next_version>-rc-master`. It means a release candidate version build on `master` branch,
|
||||
such as `0.4.0-rc-master`.
|
||||
|
||||
```shell script
|
||||
helm search repo kubevela/vela-core -l --devel
|
||||
```
|
||||
```console
|
||||
NAME CHART VERSION APP VERSION DESCRIPTION
|
||||
kubevela/vela-core 0.4.0-rc-master 0.4.0-rc-master A Helm chart for KubeVela core
|
||||
kubevela/vela-core 0.3.2 0.3.2 A Helm chart for KubeVela core
|
||||
kubevela/vela-core 0.3.1 0.3.1 A Helm chart for KubeVela core
|
||||
```
|
||||
|
||||
And try the following command to install it.
|
||||
|
||||
```shell script
|
||||
helm install --create-namespace -n vela-system kubevela kubevela/vela-core --version <next_version>-rc-master
|
||||
```
|
||||
```console
|
||||
NAME: kubevela
|
||||
LAST DEPLOYED: Thu Apr 1 19:41:30 2021
|
||||
NAMESPACE: vela-system
|
||||
STATUS: deployed
|
||||
REVISION: 1
|
||||
NOTES:
|
||||
Welcome to use the KubeVela! Enjoy your shipping application journey!
|
||||
```
|
||||
|
||||
## Upgrade
|
||||
|
||||
### Step 1. Update Helm repo
|
||||
|
||||
|
||||
You can explore the newly released chart versions of KubeVela by run:
|
||||
|
||||
```shell
|
||||
helm repo update
|
||||
helm search repo kubevela/vela-core -l
|
||||
```
|
||||
|
||||
### Step 2. Upgrade KubeVela CRDs
|
||||
|
||||
```shell
|
||||
kubectl apply -f https://raw.githubusercontent.com/oam-dev/kubevela/master/charts/vela-core/crds/core.oam.dev_componentdefinitions.yaml
|
||||
kubectl apply -f https://raw.githubusercontent.com/oam-dev/kubevela/master/charts/vela-core/crds/core.oam.dev_workloaddefinitions.yaml
|
||||
kubectl apply -f https://raw.githubusercontent.com/oam-dev/kubevela/master/charts/vela-core/crds/core.oam.dev_traitdefinitions.yaml
|
||||
kubectl apply -f https://raw.githubusercontent.com/oam-dev/kubevela/master/charts/vela-core/crds/core.oam.dev_applications.yaml
|
||||
kubectl apply -f https://raw.githubusercontent.com/oam-dev/kubevela/master/charts/vela-core/crds/core.oam.dev_approllouts.yaml
|
||||
kubectl apply -f https://raw.githubusercontent.com/oam-dev/kubevela/master/charts/vela-core/crds/core.oam.dev_applicationrevisions.yaml
|
||||
kubectl apply -f https://raw.githubusercontent.com/oam-dev/kubevela/master/charts/vela-core/crds/core.oam.dev_scopedefinitions.yaml
|
||||
kubectl apply -f https://raw.githubusercontent.com/oam-dev/kubevela/master/charts/vela-core/crds/core.oam.dev_appdeployments.yaml
|
||||
kubectl apply -f https://raw.githubusercontent.com/oam-dev/kubevela/master/charts/vela-core/crds/core.oam.dev_applicationcontexts.yaml
|
||||
```
|
||||
|
||||
> Tips: If you see errors like `* is invalid: spec.scope: Invalid value: "Namespaced": filed is immutable`. Please delete the CRD which reports error and re-apply the kubevela crds.
|
||||
|
||||
```shell
|
||||
kubectl delete crd \
|
||||
scopedefinitions.core.oam.dev \
|
||||
traitdefinitions.core.oam.dev \
|
||||
workloaddefinitions.core.oam.dev
|
||||
```
|
||||
|
||||
### Step 3. Upgrade KubeVela Helm chart
|
||||
|
||||
```shell
|
||||
helm upgrade --install --create-namespace --namespace vela-system kubevela kubevela/vela-core --version <the_new_version>
|
||||
```
|
||||
|
||||
## Clean Up
|
||||
|
||||
Run:
|
||||
|
||||
```shell script
|
||||
helm uninstall -n vela-system kubevela
|
||||
rm -r ~/.vela
|
||||
```
|
||||
|
||||
This will uninstall KubeVela server component and its dependency components.
|
||||
This also cleans up local CLI cache.
|
||||
|
||||
Then clean up CRDs (CRDs are not removed via helm by default):
|
||||
|
||||
```shell script
|
||||
kubectl delete crd \
|
||||
appdeployments.core.oam.dev \
|
||||
applicationconfigurations.core.oam.dev \
|
||||
applicationcontexts.core.oam.dev \
|
||||
applicationdeployments.core.oam.dev \
|
||||
applicationrevisions.core.oam.dev \
|
||||
applications.core.oam.dev \
|
||||
approllouts.core.oam.dev \
|
||||
componentdefinitions.core.oam.dev \
|
||||
components.core.oam.dev \
|
||||
containerizedworkloads.core.oam.dev \
|
||||
healthscopes.core.oam.dev \
|
||||
manualscalertraits.core.oam.dev \
|
||||
podspecworkloads.standard.oam.dev \
|
||||
scopedefinitions.core.oam.dev \
|
||||
traitdefinitions.core.oam.dev \
|
||||
workloaddefinitions.core.oam.dev
|
||||
```
|
||||
@@ -1,35 +1,58 @@
|
||||
---
|
||||
title: Application CRD
|
||||
title: Deploy Application
|
||||
---
|
||||
|
||||
This documentation will walk through how to use `Application` object to define your apps with corresponding operational behaviors in declarative approach.
|
||||
This documentation will walk through a full application deployment workflow on KubeVela platform.
|
||||
|
||||
## Example
|
||||
## Introduction
|
||||
|
||||
The sample application below claimed a `backend` component with *Worker* workload type, and a `frontend` component with *Web Service* workload type.
|
||||
KubeVela is a fully self-service platform. All capabilities an application deployment needs are maintained as building block modules in this platform. Specifically:
|
||||
- Components - deployable/provisionable entities that composed your application deployment
|
||||
- e.g. a Kubernetes workload, a MySQL database, or a AWS OSS bucket
|
||||
- Traits - attachable operational features per your needs
|
||||
- e.g. autoscaling rules, rollout strategies, ingress rules, sidecars, security policies etc
|
||||
|
||||
Moreover, the `frontend` component claimed `sidecar` and `autoscaler` traits which means the workload will be automatically injected with a `fluentd` sidecar and scale from 1-100 replicas triggered by CPU usage.
|
||||
## Step 1: Check Capabilities in the Platform
|
||||
|
||||
As user of this platform, you could check available components you can deploy, and available traits you can attach.
|
||||
|
||||
```console
|
||||
$ kubectl get componentdefinitions -n vela-system
|
||||
NAME WORKLOAD-KIND DESCRIPTION AGE
|
||||
task Job Describes jobs that run code or a script to completion. 5h52m
|
||||
webservice Deployment Describes long-running, scalable, containerized services that have a stable network endpoint to receive external network traffic from customers. 5h52m
|
||||
worker Deployment Describes long-running, scalable, containerized services that running at backend. They do NOT have network endpoint to receive external network traffic. 5h52m
|
||||
```
|
||||
|
||||
```console
|
||||
$ kubectl get traitdefinitions -n vela-system
|
||||
NAME APPLIES-TO DESCRIPTION AGE
|
||||
ingress ["webservice","worker"] Configures K8s ingress and service to enable web traffic for your service. Please use route trait in cap center for advanced usage. 6h8m
|
||||
cpuscaler ["webservice","worker"] Configure k8s HPA with CPU metrics for Deployment 6h8m
|
||||
```
|
||||
|
||||
To show the specification for given capability, you could use `vela` CLI. For example, `vela show webservice` will return full schema of *Web Service* component and `vela show webservice --web` will open its capability reference documentation in your browser.
|
||||
|
||||
## Step 2: Design and Deploy Application
|
||||
|
||||
In KubeVela, `Application` is the main API to define your application deployment based on available capabilities. Every `Application` could contain multiple components, each of them can be attached with a number of traits per needs.
|
||||
|
||||
Now let's define an application composed by *Web Service* and *Worker* components.
|
||||
|
||||
```yaml
|
||||
# sample.yaml
|
||||
apiVersion: core.oam.dev/v1beta1
|
||||
kind: Application
|
||||
metadata:
|
||||
name: website
|
||||
spec:
|
||||
components:
|
||||
- name: backend
|
||||
type: worker
|
||||
properties:
|
||||
image: busybox
|
||||
cmd:
|
||||
- sleep
|
||||
- '1000'
|
||||
- name: frontend
|
||||
type: webservice
|
||||
properties:
|
||||
image: nginx
|
||||
traits:
|
||||
- type: autoscaler
|
||||
- type: cpuscaler
|
||||
properties:
|
||||
min: 1
|
||||
max: 10
|
||||
@@ -38,14 +61,32 @@ spec:
|
||||
properties:
|
||||
name: "sidecar-test"
|
||||
image: "fluentd"
|
||||
- name: backend
|
||||
type: worker
|
||||
properties:
|
||||
image: busybox
|
||||
cmd:
|
||||
- sleep
|
||||
- '1000'
|
||||
```
|
||||
|
||||
In this sample, we also attached `sidecar` and `cpuscaler` traits to the `frontend` component.
|
||||
So after deployed, the `frontend` component instance (a Kubernetes Deployment workload) will be automatically injected
|
||||
with a `fluentd` sidecar and automatically scale from 1-10 replicas based on CPU usage.
|
||||
|
||||
### Deploy the Application
|
||||
|
||||
Apply application yaml above, then you'll get the application started
|
||||
Apply application YAML to Kubernetes:
|
||||
|
||||
```shell
|
||||
$ kubectl get application -o yaml
|
||||
$ kubectl apply -f https://raw.githubusercontent.com/oam-dev/kubevela/master/docs/examples/enduser/sample.yaml
|
||||
application.core.oam.dev/website created
|
||||
```
|
||||
|
||||
You'll get the application becomes `running`.
|
||||
|
||||
```shell
|
||||
$ kubectl get application website -o yaml
|
||||
apiVersion: core.oam.dev/v1beta1
|
||||
kind: Application
|
||||
metadata:
|
||||
@@ -64,12 +105,31 @@ status:
|
||||
|
||||
```
|
||||
|
||||
You could see a Deployment named `frontend` with a container `fluentd` injected is running.
|
||||
### Verify the Deployment
|
||||
|
||||
You could see a Deployment named `frontend` is running, with port exposed, and with a container `fluentd` injected.
|
||||
|
||||
```shell
|
||||
$ kubectl get deploy frontend
|
||||
NAME READY UP-TO-DATE AVAILABLE AGE
|
||||
frontend 1/1 1 1 100m
|
||||
frontend 1/1 1 1 97s
|
||||
```
|
||||
|
||||
```shell
|
||||
$ kubectl get deploy frontend -o yaml
|
||||
...
|
||||
spec:
|
||||
containers:
|
||||
- image: nginx
|
||||
imagePullPolicy: Always
|
||||
name: frontend
|
||||
ports:
|
||||
- containerPort: 80
|
||||
protocol: TCP
|
||||
- image: fluentd
|
||||
imagePullPolicy: Always
|
||||
name: sidecar-test
|
||||
...
|
||||
```
|
||||
|
||||
Another Deployment is also running named `backend`.
|
||||
@@ -77,161 +137,13 @@ Another Deployment is also running named `backend`.
|
||||
```shell
|
||||
$ kubectl get deploy backend
|
||||
NAME READY UP-TO-DATE AVAILABLE AGE
|
||||
backend 1/1 1 1 100m
|
||||
backend 1/1 1 1 111s
|
||||
```
|
||||
|
||||
An HPA was also created by the `autoscaler` trait.
|
||||
An HPA was also created by the `cpuscaler` trait.
|
||||
|
||||
```shell
|
||||
$ kubectl get HorizontalPodAutoscaler frontend
|
||||
NAME REFERENCE TARGETS MINPODS MAXPODS REPLICAS AGE
|
||||
frontend Deployment/frontend <unknown>/50% 1 10 1 101m
|
||||
```
|
||||
|
||||
|
||||
## Under the Hood
|
||||
|
||||
In above sample, the `type: worker` means the specification of this component (claimed in following `properties` section) will be enforced by a `ComponentDefinition` object named `worker` as below:
|
||||
|
||||
```yaml
|
||||
apiVersion: core.oam.dev/v1beta1
|
||||
kind: ComponentDefinition
|
||||
metadata:
|
||||
name: worker
|
||||
annotations:
|
||||
definition.oam.dev/description: "Describes long-running, scalable, containerized services that running at backend. They do NOT have network endpoint to receive external network traffic."
|
||||
spec:
|
||||
workload:
|
||||
definition:
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
schematic:
|
||||
cue:
|
||||
template: |
|
||||
output: {
|
||||
apiVersion: "apps/v1"
|
||||
kind: "Deployment"
|
||||
spec: {
|
||||
selector: matchLabels: {
|
||||
"app.oam.dev/component": context.name
|
||||
}
|
||||
template: {
|
||||
metadata: labels: {
|
||||
"app.oam.dev/component": context.name
|
||||
}
|
||||
spec: {
|
||||
containers: [{
|
||||
name: context.name
|
||||
image: parameter.image
|
||||
|
||||
if parameter["cmd"] != _|_ {
|
||||
command: parameter.cmd
|
||||
}
|
||||
}]
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
parameter: {
|
||||
image: string
|
||||
cmd?: [...string]
|
||||
}
|
||||
```
|
||||
|
||||
|
||||
Hence, the `properties` section of `backend` only supports two parameters: `image` and `cmd`, this is enforced by the `parameter` list of the `.spec.template` field of the definition.
|
||||
|
||||
The similar extensible abstraction mechanism also applies to traits.
|
||||
For example, `type: autoscaler` in `frontend` means its trait specification (i.e. `properties` section)
|
||||
will be enforced by a `TraitDefinition` object named `autoscaler` as below:
|
||||
|
||||
```yaml
|
||||
apiVersion: core.oam.dev/v1beta1
|
||||
kind: TraitDefinition
|
||||
metadata:
|
||||
annotations:
|
||||
definition.oam.dev/description: "configure k8s HPA for Deployment"
|
||||
name: hpa
|
||||
spec:
|
||||
appliesToWorkloads:
|
||||
- webservice
|
||||
- worker
|
||||
schematic:
|
||||
cue:
|
||||
template: |
|
||||
outputs: hpa: {
|
||||
apiVersion: "autoscaling/v2beta2"
|
||||
kind: "HorizontalPodAutoscaler"
|
||||
metadata: name: context.name
|
||||
spec: {
|
||||
scaleTargetRef: {
|
||||
apiVersion: "apps/v1"
|
||||
kind: "Deployment"
|
||||
name: context.name
|
||||
}
|
||||
minReplicas: parameter.min
|
||||
maxReplicas: parameter.max
|
||||
metrics: [{
|
||||
type: "Resource"
|
||||
resource: {
|
||||
name: "cpu"
|
||||
target: {
|
||||
type: "Utilization"
|
||||
averageUtilization: parameter.cpuUtil
|
||||
}
|
||||
}
|
||||
}]
|
||||
}
|
||||
}
|
||||
parameter: {
|
||||
min: *1 | int
|
||||
max: *10 | int
|
||||
cpuUtil: *50 | int
|
||||
}
|
||||
```
|
||||
|
||||
The application also have a `sidecar` trait.
|
||||
|
||||
```yaml
|
||||
apiVersion: core.oam.dev/v1beta1
|
||||
kind: TraitDefinition
|
||||
metadata:
|
||||
annotations:
|
||||
definition.oam.dev/description: "add sidecar to the app"
|
||||
name: sidecar
|
||||
spec:
|
||||
appliesToWorkloads:
|
||||
- webservice
|
||||
- worker
|
||||
schematic:
|
||||
cue:
|
||||
template: |-
|
||||
patch: {
|
||||
// +patchKey=name
|
||||
spec: template: spec: containers: [parameter]
|
||||
}
|
||||
parameter: {
|
||||
name: string
|
||||
image: string
|
||||
command?: [...string]
|
||||
}
|
||||
```
|
||||
|
||||
All the definition objects are expected to be declared and installed by platform team and end users will only focus on `Application` resource.
|
||||
|
||||
Please note that the end users of KubeVela do NOT need to know about definition objects, they learn how to use a given capability with visualized forms (or the JSON schema of parameters if they prefer). Please check the [Generate Forms from Definitions](/docs/platform-engineers/openapi-v3-json-schema) section about how this is achieved.
|
||||
|
||||
### Conventions and "Standard Contract"
|
||||
|
||||
After the `Application` resource is applied to Kubernetes cluster,
|
||||
the KubeVela runtime will generate and manage the underlying resources instances following below "standard contract" and conventions.
|
||||
|
||||
|
||||
| Label | Description |
|
||||
| :--: | :---------: |
|
||||
|`workload.oam.dev/type=<component definition name>` | The name of its corresponding `ComponentDefinition` |
|
||||
|`trait.oam.dev/type=<trait definition name>` | The name of its corresponding `TraitDefinition` |
|
||||
|`app.oam.dev/name=<app name>` | The name of the application it belongs to |
|
||||
|`app.oam.dev/component=<component name>` | The name of the component it belongs to |
|
||||
|`trait.oam.dev/resource=<name of trait resource instance>` | The name of trait resource instance |
|
||||
|`app.oam.dev/appRevision=<name of app revision>` | The name of the application revision it belongs to |
|
||||
```
|
||||
@@ -16,11 +16,11 @@ First of all, KubeVela introduces a workflow with separate of concerns as below:
|
||||
|
||||
Below is how this workflow looks like:
|
||||
|
||||

|
||||

|
||||
|
||||
This template based workflow make it possible for platform team enforce best practices and deployment confidence with a set of Kubernetes CRDs, and give end users a *PaaS-like* experience (*i.e. app-centric, higher level abstractions, self-service operations etc*) by natural.
|
||||
|
||||

|
||||

|
||||
|
||||
Below are the core concepts in KubeVela that make this happen.
|
||||
|
||||
@@ -64,7 +64,7 @@ spec:
|
||||
|
||||
## Building the Abstraction
|
||||
|
||||
Unlike most of the higher level abstractions, the `Application` resource in KubeVela is a LEGO-style object and does not even have fixed schema. Instead, it is composed by building blocks (app components and traits etc.) that allow you to on-board platform capabilities to this application definition via your own abstractions.
|
||||
The `Application` resource in KubeVela is a LEGO-style object and does not even have fixed schema. Instead, it is composed by building blocks (app components and traits etc.) that allow you to on-board platform capabilities to this application definition via your own abstractions.
|
||||
|
||||
The building blocks to abstraction and model platform capabilities named `ComponentDefinition` and `TraitDefinition`.
|
||||
|
||||
@@ -93,12 +93,12 @@ Currently, a KubeVela `environment` only maps to a Kubernetes namespace, while t
|
||||
|
||||
The main concepts of KubeVela could be shown as below:
|
||||
|
||||

|
||||

|
||||
|
||||
## Architecture
|
||||
|
||||
The overall architecture of KubeVela is shown as below:
|
||||
|
||||

|
||||

|
||||
|
||||
Specifically, the application controller is responsible for application abstraction and encapsulation (i.e. the controller for `Application` and `Definition`). The rollout controller will handle progressive rollout strategy with the whole application as a unit. The multi-cluster deployment engine is responsible for deploying the application across multiple clusters and environments with traffic shifting and rollout features supported.
|
||||
|
||||
@@ -209,6 +209,7 @@ output: {
|
||||
| Context Variable | Description |
|
||||
| :--: | :---------: |
|
||||
| `context.appRevision` | The revision of the application |
|
||||
| `context.appRevisionNum` | The revision number(`int` type) of the application, e.g., `context.appRevisionNum` will be `1` if `context.appRevision` is `app-v1`|
|
||||
| `context.appName` | The name of the application |
|
||||
| `context.name` | The name of the component of the application |
|
||||
| `context.namespace` | The namespace of the application |
|
||||
|
||||
@@ -4,7 +4,7 @@ title: Define resources located in defferent namespace with application
|
||||
|
||||
In this section, we will introduce how to use cue template create resources (workload/trait) in different namespace with the application.
|
||||
|
||||
By default, the `metadata.namespace` of K8s resource in CuE template is automatically filled with the same namespace of the applicaiton.
|
||||
By default, the `metadata.namespace` of K8s resource in CuE template is automatically filled with the same namespace of the application.
|
||||
|
||||
If you want to create K8s resources running in a specific namespace witch is different with the application, you can set the `metadata.namespace` field.
|
||||
KubeVela will create the resources in the specified namespace, and create a resourceTracker object as owener of those resources.
|
||||
|
||||
@@ -22,7 +22,7 @@ This command will automatically open the reference documentation for given workl
|
||||
|
||||
Let's take `$ vela show webservice --web` as example. The detailed schema documentation for `Web Service` workload type will show up immediately as below:
|
||||
|
||||

|
||||

|
||||
|
||||
Note that there's in the section named `Specification`, it even provides you with a full sample for the usage of this workload type with a fake name `my-service-name`.
|
||||
|
||||
@@ -30,7 +30,7 @@ Note that there's in the section named `Specification`, it even provides you wit
|
||||
|
||||
Similarly, we can also do `$ vela show autoscale --web`:
|
||||
|
||||

|
||||

|
||||
|
||||
With these auto-generated reference documentations, we could easily complete the application description by simple copy-paste, for example:
|
||||
|
||||
|
||||
@@ -102,6 +102,6 @@ kubectl --namespace monitoring port-forward `kubectl -n monitoring get pods -l p
|
||||
|
||||
Then access the Prometheus dashboard via http://localhost:9090/targets
|
||||
|
||||

|
||||

|
||||
|
||||
</details>
|
||||
|
||||
@@ -149,14 +149,14 @@ Hello World -- This is rolling 02
|
||||
In detail, `Rollout` controller will create a canary of your app , and then gradually shift traffic to the canary while measuring key performance indicators like HTTP requests success rate at the same time.
|
||||
|
||||
|
||||

|
||||

|
||||
|
||||
In this sample, for every `10s`, `5%` traffic will be shifted to canary from the primary, until the traffic on canary reached `50%`. At the mean time, the instance number of canary will automatically scale to `replicas: 2` per configured in Appfile.
|
||||
|
||||
|
||||
Based on analysis result of the KPIs during this traffic shifting, a canary will be promoted or aborted if analysis is failed. If promoting, the primary will be upgraded from v1 to v2, and traffic will be fully shifted back to the primary instances. So as result, canary instances will be deleted after the promotion finished.
|
||||
|
||||

|
||||

|
||||
|
||||
> Note: KubeVela's `Rollout` trait is implemented with [Weaveworks Flagger](https://flagger.app/) operator.
|
||||
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
---
|
||||
title: The Reference Documentation of Capabilities
|
||||
title: Overview
|
||||
---
|
||||
|
||||
In this documentation, we will show how to check the detailed schema of a given capability (i.e. component type or trait).
|
||||
@@ -20,13 +20,13 @@ This command will automatically open the reference documentation for given compo
|
||||
|
||||
Let's take `$ vela show webservice --web` as example. The detailed schema documentation for `Web Service` component type will show up immediately as below:
|
||||
|
||||

|
||||

|
||||
|
||||
Note that there's in the section named `Specification`, it even provides you with a full sample for the usage of this workload type with a fake name `my-service-name`.
|
||||
|
||||
Similarly, we can also do `$ vela show autoscale`:
|
||||
|
||||

|
||||

|
||||
|
||||
With these auto-generated reference documentations, we could easily complete the application description by simple copy-paste, for example:
|
||||
|
||||
|
||||
@@ -11,11 +11,7 @@ Describes jobs that run code or a script to completion.
|
||||
List of all configuration options for a `Task` workload type.
|
||||
|
||||
```yaml
|
||||
name: my-app-name
|
||||
|
||||
services:
|
||||
my-service-name:
|
||||
type: task
|
||||
...
|
||||
image: perl
|
||||
count: 10
|
||||
cmd: ["perl", "-Mbignum=bpi", "-wle", "print bpi(2000)"]
|
||||
|
||||
@@ -11,11 +11,7 @@ Describes long-running, scalable, containerized services that have a stable netw
|
||||
List of all configuration options for a `Webservice` workload type.
|
||||
|
||||
```yaml
|
||||
name: my-app-name
|
||||
|
||||
services:
|
||||
my-service-name:
|
||||
type: webservice # could be skipped
|
||||
...
|
||||
image: oamdev/testapp:v1
|
||||
cmd: ["node", "server.js"]
|
||||
port: 8080
|
||||
|
||||
@@ -11,11 +11,7 @@ Describes long-running, scalable, containerized services that running at backend
|
||||
List of all configuration options for a `Worker` workload type.
|
||||
|
||||
```yaml
|
||||
name: my-app-name
|
||||
|
||||
services:
|
||||
my-service-name:
|
||||
type: worker
|
||||
...
|
||||
image: oamdev/testapp:v1
|
||||
cmd: ["node", "server.js"]
|
||||
```
|
||||
|
||||
@@ -270,7 +270,7 @@ Metrics server is already enabled.
|
||||
|
||||
Metrics server has to be enabled in `Operations/Add-ons` section of [Alibaba Cloud console](https://cs.console.aliyun.com/) as below.
|
||||
|
||||

|
||||

|
||||
|
||||
Please refer to [metrics server debug guide](https://help.aliyun.com/document_detail/176515.html) if you hit more issue.
|
||||
|
||||
|
||||
@@ -11,22 +11,16 @@ Automatically scales workloads by resource utilization metrics or cron triggers.
|
||||
List of all configuration options for a `Autoscale` trait.
|
||||
|
||||
```yaml
|
||||
name: testapp
|
||||
|
||||
services:
|
||||
express-server:
|
||||
...
|
||||
|
||||
autoscale:
|
||||
min: 1
|
||||
max: 4
|
||||
cron:
|
||||
startAt: "14:00"
|
||||
duration: "2h"
|
||||
days: "Monday, Thursday"
|
||||
replicas: 2
|
||||
timezone: "America/Los_Angeles"
|
||||
cpuPercent: 10
|
||||
...
|
||||
min: 1
|
||||
max: 4
|
||||
cron:
|
||||
startAt: "14:00"
|
||||
duration: "2h"
|
||||
days: "Monday, Thursday"
|
||||
replicas: 2
|
||||
timezone: "America/Los_Angeles"
|
||||
cpuPercent: 10
|
||||
```
|
||||
|
||||
## Properties
|
||||
|
||||
@@ -10,7 +10,12 @@ Configures K8s ingress and service to enable web traffic for your service. Pleas
|
||||
|
||||
List of all configuration options for a `Ingress` trait.
|
||||
|
||||
```yaml```
|
||||
```yaml
|
||||
...
|
||||
domain: testsvc.example.com
|
||||
http:
|
||||
/: 8000
|
||||
```
|
||||
|
||||
## Properties
|
||||
|
||||
|
||||
@@ -11,17 +11,12 @@ Configures monitoring metrics for your service.
|
||||
List of all configuration options for a `Metrics` trait.
|
||||
|
||||
```yaml
|
||||
name: my-app-name
|
||||
|
||||
services:
|
||||
my-service-name:
|
||||
...
|
||||
metrics:
|
||||
format: "prometheus"
|
||||
port: 8080
|
||||
path: "/metrics"
|
||||
scheme: "http"
|
||||
enabled: true
|
||||
...
|
||||
format: "prometheus"
|
||||
port: 8080
|
||||
path: "/metrics"
|
||||
scheme: "http"
|
||||
enabled: true
|
||||
```
|
||||
|
||||
## Properties
|
||||
|
||||
@@ -11,10 +11,7 @@ Configures Canary deployment strategy for your application.
|
||||
List of all configuration options for a `Rollout` trait.
|
||||
|
||||
```yaml
|
||||
servcies:
|
||||
express-server:
|
||||
...
|
||||
|
||||
...
|
||||
rollout:
|
||||
replicas: 2
|
||||
stepWeight: 50
|
||||
|
||||
@@ -11,17 +11,12 @@ Configures external access to your service.
|
||||
List of all configuration options for a `Route` trait.
|
||||
|
||||
```yaml
|
||||
name: my-app-name
|
||||
|
||||
services:
|
||||
my-service-name:
|
||||
...
|
||||
route:
|
||||
domain: example.com
|
||||
issuer: tls
|
||||
rules:
|
||||
- path: /testapp
|
||||
rewriteTarget: /
|
||||
...
|
||||
domain: example.com
|
||||
issuer: tls
|
||||
rules:
|
||||
- path: /testapp
|
||||
rewriteTarget: /
|
||||
```
|
||||
|
||||
## Properties
|
||||
|
||||
@@ -11,11 +11,7 @@ Configures replicas for your service.
|
||||
List of all configuration options for a `Scaler` trait.
|
||||
|
||||
```yaml
|
||||
name: my-app-name
|
||||
|
||||
services:
|
||||
my-service-name:
|
||||
...
|
||||
...
|
||||
scaler:
|
||||
replicas: 100
|
||||
```
|
||||
|
||||
242
docs/en/end-user/cloud-resources.md
Normal file
242
docs/en/end-user/cloud-resources.md
Normal file
@@ -0,0 +1,242 @@
|
||||
---
|
||||
title: Provision and Consume Cloud Resources
|
||||
---
|
||||
|
||||
> ⚠️ This section requires your platform builder has already installed the [cloud resources related capabilities](../platform-engineers/cloud-services).
|
||||
|
||||
## Provision and consume cloud resource in a single application v1 (one cloud resource)
|
||||
|
||||
Check the parameters of cloud resource component:
|
||||
|
||||
```shell
|
||||
$ kubectl vela show alibaba-rds
|
||||
|
||||
# Properties
|
||||
+---------------+------------------------------------------------+--------+----------+--------------------+
|
||||
| NAME | DESCRIPTION | TYPE | REQUIRED | DEFAULT |
|
||||
+---------------+------------------------------------------------+--------+----------+--------------------+
|
||||
| engine | RDS engine | string | true | mysql |
|
||||
| engineVersion | The version of RDS engine | string | true | 8.0 |
|
||||
| instanceClass | The instance class for the RDS | string | true | rds.mysql.c1.large |
|
||||
| username | RDS username | string | true | |
|
||||
| secretName | Secret name which RDS connection will write to | string | true | |
|
||||
+---------------+------------------------------------------------+--------+----------+--------------------+
|
||||
```
|
||||
|
||||
Use the service binding trait to bind cloud resources into workload as ENV.
|
||||
|
||||
Create an application with a cloud resource provisioning component and a consuming component as below.
|
||||
|
||||
```yaml
|
||||
apiVersion: core.oam.dev/v1beta1
|
||||
kind: Application
|
||||
metadata:
|
||||
name: webapp
|
||||
spec:
|
||||
components:
|
||||
- name: express-server
|
||||
type: webservice
|
||||
properties:
|
||||
image: zzxwill/flask-web-application:v0.3.1-crossplane
|
||||
ports: 80
|
||||
traits:
|
||||
- type: service-binding
|
||||
properties:
|
||||
envMappings:
|
||||
# environments refer to db-conn secret
|
||||
DB_PASSWORD:
|
||||
secret: db-conn
|
||||
key: password # 1) If the env name is different from secret key, secret key has to be set.
|
||||
endpoint:
|
||||
secret: db-conn # 2) If the env name is the same as the secret key, secret key can be omitted.
|
||||
username:
|
||||
secret: db-conn
|
||||
|
||||
- name: sample-db
|
||||
type: alibaba-rds
|
||||
properties:
|
||||
name: sample-db
|
||||
engine: mysql
|
||||
engineVersion: "8.0"
|
||||
instanceClass: rds.mysql.c1.large
|
||||
username: oamtest
|
||||
secretName: db-conn
|
||||
|
||||
```
|
||||
|
||||
Apply it and verify the application.
|
||||
|
||||
```shell
|
||||
$ kubectl get application
|
||||
NAME AGE
|
||||
webapp 46m
|
||||
|
||||
$ kubectl port-forward deployment/express-server 80:80
|
||||
Forwarding from 127.0.0.1:80 -> 80
|
||||
Forwarding from [::1]:80 -> 80
|
||||
Handling connection for 80
|
||||
Handling connection for 80
|
||||
```
|
||||
|
||||

|
||||
|
||||
## Provision and consume cloud resource in a single application v2 (two cloud resources)
|
||||
|
||||
Based on the section `Provisioning and consuming cloud resource in a single application v1 (one cloud resource)`,
|
||||
|
||||
Update the application to also consume cloud resource OSS.
|
||||
|
||||
```yaml
|
||||
apiVersion: core.oam.dev/v1beta1
|
||||
kind: Application
|
||||
metadata:
|
||||
name: webapp
|
||||
spec:
|
||||
components:
|
||||
- name: express-server
|
||||
type: webservice
|
||||
properties:
|
||||
image: zzxwill/flask-web-application:v0.3.1-crossplane
|
||||
ports: 80
|
||||
traits:
|
||||
- type: service-binding
|
||||
properties:
|
||||
envMappings:
|
||||
# environments refer to db-conn secret
|
||||
DB_PASSWORD:
|
||||
secret: db-conn
|
||||
key: password # 1) If the env name is different from secret key, secret key has to be set.
|
||||
endpoint:
|
||||
secret: db-conn # 2) If the env name is the same as the secret key, secret key can be omitted.
|
||||
username:
|
||||
secret: db-conn
|
||||
# environments refer to oss-conn secret
|
||||
BUCKET_NAME:
|
||||
secret: oss-conn
|
||||
key: Bucket
|
||||
|
||||
- name: sample-db
|
||||
type: alibaba-rds
|
||||
properties:
|
||||
name: sample-db
|
||||
engine: mysql
|
||||
engineVersion: "8.0"
|
||||
instanceClass: rds.mysql.c1.large
|
||||
username: oamtest
|
||||
secretName: db-conn
|
||||
|
||||
- name: sample-oss
|
||||
type: alibaba-oss
|
||||
properties:
|
||||
name: velaweb
|
||||
secretName: oss-conn
|
||||
```
|
||||
|
||||
Apply it and verify the application.
|
||||
|
||||
```shell
|
||||
$ kubectl port-forward deployment/express-server 80:80
|
||||
Forwarding from 127.0.0.1:80 -> 80
|
||||
Forwarding from [::1]:80 -> 80
|
||||
Handling connection for 80
|
||||
Handling connection for 80
|
||||
```
|
||||
|
||||

|
||||
|
||||
## Provision and consume cloud resource in different applications
|
||||
|
||||
In this section, cloud resource will be provisioned in one application and consumed in another application.
|
||||
|
||||
### Provision Cloud Resource
|
||||
|
||||
Instantiate RDS component with `alibaba-rds` workload type in an [Application](../application.md) to provide cloud resources.
|
||||
|
||||
As we have claimed an RDS instance with ComponentDefinition name `alibaba-rds`.
|
||||
The component in the application should refer to this type.
|
||||
|
||||
```yaml
|
||||
apiVersion: core.oam.dev/v1beta1
|
||||
kind: Application
|
||||
metadata:
|
||||
name: baas-rds
|
||||
spec:
|
||||
components:
|
||||
- name: sample-db
|
||||
type: alibaba-rds
|
||||
properties:
|
||||
name: sample-db
|
||||
engine: mysql
|
||||
engineVersion: "8.0"
|
||||
instanceClass: rds.mysql.c1.large
|
||||
username: oamtest
|
||||
secretName: db-conn
|
||||
```
|
||||
|
||||
Apply the application to Kubernetes and a RDS instance will be automatically provisioned (may take some time, ~2 mins).
|
||||
|
||||
A secret `db-conn` will also be created in the same namespace as that of the application.
|
||||
|
||||
```shell
|
||||
$ kubectl get application
|
||||
NAME AGE
|
||||
baas-rds 9h
|
||||
|
||||
$ kubectl get rdsinstance
|
||||
NAME READY SYNCED STATE ENGINE VERSION AGE
|
||||
sample-db-v1 True True Running mysql 8.0 9h
|
||||
|
||||
$ kubectl get secret
|
||||
NAME TYPE DATA AGE
|
||||
db-conn connection.crossplane.io/v1alpha1 4 9h
|
||||
|
||||
$ ✗ kubectl get secret db-conn -o yaml
|
||||
apiVersion: v1
|
||||
data:
|
||||
endpoint: xxx==
|
||||
password: yyy
|
||||
port: MzMwNg==
|
||||
username: b2FtdGVzdA==
|
||||
kind: Secret
|
||||
```
|
||||
|
||||
### Consume the Cloud Resource
|
||||
|
||||
In this section, we will show how another component consumes the RDS instance.
|
||||
|
||||
> Note: we recommend defining the cloud resource claiming to an independent application if that cloud resource has
|
||||
> standalone lifecycle.
|
||||
|
||||
Now create the Application to consume the data.
|
||||
|
||||
```yaml
|
||||
apiVersion: core.oam.dev/v1beta1
|
||||
kind: Application
|
||||
metadata:
|
||||
name: webapp
|
||||
spec:
|
||||
components:
|
||||
- name: express-server
|
||||
type: webconsumer
|
||||
properties:
|
||||
image: zzxwill/flask-web-application:v0.3.1-crossplane
|
||||
ports: 80
|
||||
dbSecret: db-conn
|
||||
```
|
||||
|
||||
```shell
|
||||
$ kubectl get application
|
||||
NAME AGE
|
||||
baas-rds 10h
|
||||
webapp 14h
|
||||
|
||||
$ kubectl get deployment
|
||||
NAME READY UP-TO-DATE AVAILABLE AGE
|
||||
express-server-v1 1/1 1 1 9h
|
||||
|
||||
$ kubectl port-forward deployment/express-server 80:80
|
||||
```
|
||||
|
||||
We can see the cloud resource is successfully consumed by the application.
|
||||
|
||||

|
||||
376
docs/en/end-user/diagnose.md
Normal file
376
docs/en/end-user/diagnose.md
Normal file
@@ -0,0 +1,376 @@
|
||||
---
|
||||
title: Debug and Test
|
||||
---
|
||||
|
||||
You can make further debug and test for your application by using [vela kubectl plugin](./kubectlplugin).
|
||||
|
||||
## Dry-Run the `Application`
|
||||
|
||||
Dry run will help you to understand what are the real resources which will to be expanded and deployed
|
||||
to the Kubernetes cluster. In other words, it will mock to run the same logic as KubeVela's controller
|
||||
and output the results locally.
|
||||
|
||||
For example, let's dry-run the following application:
|
||||
|
||||
```yaml
|
||||
# app.yaml
|
||||
apiVersion: core.oam.dev/v1beta1
|
||||
kind: Application
|
||||
metadata:
|
||||
name: vela-app
|
||||
spec:
|
||||
components:
|
||||
- name: express-server
|
||||
type: webservice
|
||||
properties:
|
||||
image: crccheck/hello-world
|
||||
port: 8000
|
||||
traits:
|
||||
- type: ingress
|
||||
properties:
|
||||
domain: testsvc.example.com
|
||||
http:
|
||||
"/": 8000
|
||||
```
|
||||
|
||||
```shell
|
||||
kubectl vela dry-run -f app.yaml
|
||||
---
|
||||
# Application(vela-app) -- Comopnent(express-server)
|
||||
---
|
||||
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
labels:
|
||||
app.oam.dev/appRevision: ""
|
||||
app.oam.dev/component: express-server
|
||||
app.oam.dev/name: vela-app
|
||||
workload.oam.dev/type: webservice
|
||||
spec:
|
||||
selector:
|
||||
matchLabels:
|
||||
app.oam.dev/component: express-server
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app.oam.dev/component: express-server
|
||||
spec:
|
||||
containers:
|
||||
- image: crccheck/hello-world
|
||||
name: express-server
|
||||
ports:
|
||||
- containerPort: 8000
|
||||
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
labels:
|
||||
app.oam.dev/appRevision: ""
|
||||
app.oam.dev/component: express-server
|
||||
app.oam.dev/name: vela-app
|
||||
trait.oam.dev/resource: service
|
||||
trait.oam.dev/type: ingress
|
||||
name: express-server
|
||||
spec:
|
||||
ports:
|
||||
- port: 8000
|
||||
targetPort: 8000
|
||||
selector:
|
||||
app.oam.dev/component: express-server
|
||||
|
||||
---
|
||||
apiVersion: networking.k8s.io/v1beta1
|
||||
kind: Ingress
|
||||
metadata:
|
||||
labels:
|
||||
app.oam.dev/appRevision: ""
|
||||
app.oam.dev/component: express-server
|
||||
app.oam.dev/name: vela-app
|
||||
trait.oam.dev/resource: ingress
|
||||
trait.oam.dev/type: ingress
|
||||
name: express-server
|
||||
spec:
|
||||
rules:
|
||||
- host: testsvc.example.com
|
||||
http:
|
||||
paths:
|
||||
- backend:
|
||||
serviceName: express-server
|
||||
servicePort: 8000
|
||||
path: /
|
||||
|
||||
---
|
||||
```
|
||||
|
||||
In this example, the definitions(`webservice` and `ingress`) which `vela-app` depends on is the built-in
|
||||
components and traits of KubeVela. You can also use `-d `or `--definitions` to specify your local definition files.
|
||||
|
||||
`-d `or `--definitions` permitting user to provide capability definitions used in the application from local files.
|
||||
`dry-run` cmd will prioritize the provided capabilities than the living ones in the cluster.
|
||||
|
||||
## Live-Diff the `Application`
|
||||
|
||||
Live-diff helps you to have a preview of what would change if you're going to upgrade an application without making any changes
|
||||
to the living cluster.
|
||||
This feature is extremely useful for serious production deployment, and make the upgrade under control
|
||||
|
||||
It basically generates a diff between the specific revision of running instance and the local candidate application.
|
||||
The result shows the changes (added/modified/removed/no_change) of the application as well as its sub-resources,
|
||||
such as components and traits.
|
||||
|
||||
Assume you have just deployed the application in dry-run section.
|
||||
Then you can list the revisions of the Application.
|
||||
|
||||
```shell
|
||||
$ kubectl get apprev -l app.oam.dev/name=vela-app
|
||||
NAME AGE
|
||||
vela-app-v1 50s
|
||||
```
|
||||
|
||||
Assume we're going to upgrade the application like below.
|
||||
|
||||
```yaml
|
||||
# new-app.yaml
|
||||
apiVersion: core.oam.dev/v1beta1
|
||||
kind: Application
|
||||
metadata:
|
||||
name: vela-app
|
||||
spec:
|
||||
components:
|
||||
- name: express-server
|
||||
type: webservice
|
||||
properties:
|
||||
image: crccheck/hello-world
|
||||
port: 8080 # change port
|
||||
cpu: 0.5 # add requests cpu units
|
||||
- name: my-task # add a component
|
||||
type: task
|
||||
properties:
|
||||
image: busybox
|
||||
cmd: ["sleep", "1000"]
|
||||
traits:
|
||||
- type: ingress
|
||||
properties:
|
||||
domain: testsvc.example.com
|
||||
http:
|
||||
"/": 8080 # change port
|
||||
```
|
||||
|
||||
Run live-diff like this:
|
||||
|
||||
```shell
|
||||
kubectl vela live-diff -f new-app.yaml -r vela-app-v1
|
||||
```
|
||||
|
||||
`-r` or `--revision` is a flag that specifies the name of a living ApplicationRevision with which you want to compare the updated application.
|
||||
|
||||
`-c` or `--context` is a flag that specifies the number of lines shown around a change. The unchanged lines
|
||||
which are out of the context of a change will be omitted. It's useful if the diff result contains a lot of unchanged content
|
||||
while you just want to focus on the changed ones.
|
||||
|
||||
<details><summary> Click to view the details of diff result </summary>
|
||||
|
||||
```bash
|
||||
---
|
||||
# Application (vela-app) has been modified(*)
|
||||
---
|
||||
apiVersion: core.oam.dev/v1beta1
|
||||
kind: Application
|
||||
metadata:
|
||||
creationTimestamp: null
|
||||
name: vela-app
|
||||
namespace: default
|
||||
spec:
|
||||
components:
|
||||
- name: express-server
|
||||
properties:
|
||||
+ cpu: 0.5
|
||||
image: crccheck/hello-world
|
||||
- port: 8000
|
||||
+ port: 8080
|
||||
+ type: webservice
|
||||
+ - name: my-task
|
||||
+ properties:
|
||||
+ cmd:
|
||||
+ - sleep
|
||||
+ - "1000"
|
||||
+ image: busybox
|
||||
traits:
|
||||
- properties:
|
||||
domain: testsvc.example.com
|
||||
http:
|
||||
- /: 8000
|
||||
+ /: 8080
|
||||
type: ingress
|
||||
- type: webservice
|
||||
+ type: task
|
||||
status:
|
||||
batchRollingState: ""
|
||||
currentBatch: 0
|
||||
rollingState: ""
|
||||
upgradedReadyReplicas: 0
|
||||
upgradedReplicas: 0
|
||||
|
||||
---
|
||||
## Component (express-server) has been modified(*)
|
||||
---
|
||||
apiVersion: core.oam.dev/v1alpha2
|
||||
kind: Component
|
||||
metadata:
|
||||
creationTimestamp: null
|
||||
labels:
|
||||
app.oam.dev/name: vela-app
|
||||
name: express-server
|
||||
spec:
|
||||
workload:
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
labels:
|
||||
app.oam.dev/appRevision: ""
|
||||
app.oam.dev/component: express-server
|
||||
app.oam.dev/name: vela-app
|
||||
workload.oam.dev/type: webservice
|
||||
spec:
|
||||
selector:
|
||||
matchLabels:
|
||||
app.oam.dev/component: express-server
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app.oam.dev/component: express-server
|
||||
spec:
|
||||
containers:
|
||||
- image: crccheck/hello-world
|
||||
name: express-server
|
||||
ports:
|
||||
- - containerPort: 8000
|
||||
+ - containerPort: 8080
|
||||
status:
|
||||
observedGeneration: 0
|
||||
|
||||
---
|
||||
### Component (express-server) / Trait (ingress/service) has been removed(-)
|
||||
---
|
||||
- apiVersion: v1
|
||||
- kind: Service
|
||||
- metadata:
|
||||
- labels:
|
||||
- app.oam.dev/appRevision: ""
|
||||
- app.oam.dev/component: express-server
|
||||
- app.oam.dev/name: vela-app
|
||||
- trait.oam.dev/resource: service
|
||||
- trait.oam.dev/type: ingress
|
||||
- name: express-server
|
||||
- spec:
|
||||
- ports:
|
||||
- - port: 8000
|
||||
- targetPort: 8000
|
||||
- selector:
|
||||
- app.oam.dev/component: express-server
|
||||
|
||||
---
|
||||
### Component (express-server) / Trait (ingress/ingress) has been removed(-)
|
||||
---
|
||||
- apiVersion: networking.k8s.io/v1beta1
|
||||
- kind: Ingress
|
||||
- metadata:
|
||||
- labels:
|
||||
- app.oam.dev/appRevision: ""
|
||||
- app.oam.dev/component: express-server
|
||||
- app.oam.dev/name: vela-app
|
||||
- trait.oam.dev/resource: ingress
|
||||
- trait.oam.dev/type: ingress
|
||||
- name: express-server
|
||||
- spec:
|
||||
- rules:
|
||||
- - host: testsvc.example.com
|
||||
- http:
|
||||
- paths:
|
||||
- - backend:
|
||||
- serviceName: express-server
|
||||
- servicePort: 8000
|
||||
- path: /
|
||||
|
||||
---
|
||||
## Component (my-task) has been added(+)
|
||||
---
|
||||
+ apiVersion: core.oam.dev/v1alpha2
|
||||
+ kind: Component
|
||||
+ metadata:
|
||||
+ creationTimestamp: null
|
||||
+ labels:
|
||||
+ app.oam.dev/name: vela-app
|
||||
+ name: my-task
|
||||
+ spec:
|
||||
+ workload:
|
||||
+ apiVersion: batch/v1
|
||||
+ kind: Job
|
||||
+ metadata:
|
||||
+ labels:
|
||||
+ app.oam.dev/appRevision: ""
|
||||
+ app.oam.dev/component: my-task
|
||||
+ app.oam.dev/name: vela-app
|
||||
+ workload.oam.dev/type: task
|
||||
+ spec:
|
||||
+ completions: 1
|
||||
+ parallelism: 1
|
||||
+ template:
|
||||
+ spec:
|
||||
+ containers:
|
||||
+ - command:
|
||||
+ - sleep
|
||||
+ - "1000"
|
||||
+ image: busybox
|
||||
+ name: my-task
|
||||
+ restartPolicy: Never
|
||||
+ status:
|
||||
+ observedGeneration: 0
|
||||
|
||||
---
|
||||
### Component (my-task) / Trait (ingress/service) has been added(+)
|
||||
---
|
||||
+ apiVersion: v1
|
||||
+ kind: Service
|
||||
+ metadata:
|
||||
+ labels:
|
||||
+ app.oam.dev/appRevision: ""
|
||||
+ app.oam.dev/component: my-task
|
||||
+ app.oam.dev/name: vela-app
|
||||
+ trait.oam.dev/resource: service
|
||||
+ trait.oam.dev/type: ingress
|
||||
+ name: my-task
|
||||
+ spec:
|
||||
+ ports:
|
||||
+ - port: 8080
|
||||
+ targetPort: 8080
|
||||
+ selector:
|
||||
+ app.oam.dev/component: my-task
|
||||
|
||||
---
|
||||
### Component (my-task) / Trait (ingress/ingress) has been added(+)
|
||||
---
|
||||
+ apiVersion: networking.k8s.io/v1beta1
|
||||
+ kind: Ingress
|
||||
+ metadata:
|
||||
+ labels:
|
||||
+ app.oam.dev/appRevision: ""
|
||||
+ app.oam.dev/component: my-task
|
||||
+ app.oam.dev/name: vela-app
|
||||
+ trait.oam.dev/resource: ingress
|
||||
+ trait.oam.dev/type: ingress
|
||||
+ name: my-task
|
||||
+ spec:
|
||||
+ rules:
|
||||
+ - host: testsvc.example.com
|
||||
+ http:
|
||||
+ paths:
|
||||
+ - backend:
|
||||
+ serviceName: my-task
|
||||
+ servicePort: 8080
|
||||
+ path: /
|
||||
```
|
||||
|
||||
</details>
|
||||
184
docs/en/end-user/explore.md
Normal file
184
docs/en/end-user/explore.md
Normal file
@@ -0,0 +1,184 @@
|
||||
---
|
||||
title: Explore Applications
|
||||
---
|
||||
|
||||
We will introduce how to explore application related resources in this section.
|
||||
|
||||
## List Application
|
||||
|
||||
```shell
|
||||
$ kubectl get application
|
||||
NAME COMPONENT TYPE PHASE HEALTHY STATUS AGE
|
||||
app-basic app-basic webservice running true 12d
|
||||
website frontend webservice running true 4m54s
|
||||
```
|
||||
|
||||
You can also use the short name `kubectl get app`.
|
||||
|
||||
### View Application Details
|
||||
|
||||
```shell
|
||||
$ kubectl get app website -o yaml
|
||||
apiVersion: core.oam.dev/v1beta1
|
||||
kind: Application
|
||||
metadata:
|
||||
generation: 1
|
||||
name: website
|
||||
namespace: default
|
||||
spec:
|
||||
components:
|
||||
- name: frontend
|
||||
properties:
|
||||
image: nginx
|
||||
traits:
|
||||
- properties:
|
||||
cpuPercent: 60
|
||||
max: 10
|
||||
min: 1
|
||||
type: cpuscaler
|
||||
- properties:
|
||||
image: fluentd
|
||||
name: sidecar-test
|
||||
type: sidecar
|
||||
type: webservice
|
||||
- name: backend
|
||||
properties:
|
||||
cmd:
|
||||
- sleep
|
||||
- "1000"
|
||||
image: busybox
|
||||
type: worker
|
||||
status:
|
||||
...
|
||||
latestRevision:
|
||||
name: website-v1
|
||||
revision: 1
|
||||
revisionHash: e9e062e2cddfe5fb
|
||||
services:
|
||||
- healthy: true
|
||||
name: frontend
|
||||
traits:
|
||||
- healthy: true
|
||||
type: cpuscaler
|
||||
- healthy: true
|
||||
type: sidecar
|
||||
- healthy: true
|
||||
name: backend
|
||||
status: running
|
||||
```
|
||||
|
||||
Here are some highlight information that you need to know:
|
||||
|
||||
1. `status.latestRevision` declares current revision of this application.
|
||||
2. `status.services` declares the component created by this application and the healthy state.
|
||||
3. `status.status` declares the global state of this application.
|
||||
|
||||
### List Application Revisions
|
||||
|
||||
When we update an application, if there's any difference on spec, KubeVela will create a new revision.
|
||||
|
||||
```shell
|
||||
$ kubectl get apprev -l app.oam.dev/name=website
|
||||
NAME AGE
|
||||
website-v1 35m
|
||||
```
|
||||
|
||||
## Explore Components
|
||||
|
||||
You can explore what kinds of component definitions supported in your system.
|
||||
|
||||
```shell
|
||||
kubectl get comp -n vela-system
|
||||
NAME WORKLOAD-KIND DESCRIPTION
|
||||
task Job Describes jobs that run code or a script to completion.
|
||||
webservice Deployment Describes long-running, scalable, containerized services that have a stable network endpoint to receive external network traffic from customers.
|
||||
worker Deployment Describes long-running, scalable, containerized services that running at backend. They do NOT have network endpoint to receive external network traffic.
|
||||
```
|
||||
|
||||
The component definition objects are namespace isolated align with application, while the `vela-system` is a common system namespace of KubeVela,
|
||||
definitions laid here can be used by every application.
|
||||
|
||||
You can use [vela kubectl plugin](./kubectlplugin) to view the detail usage of specific component definition.
|
||||
|
||||
```shell
|
||||
$ kubectl vela show webservice
|
||||
# Properties
|
||||
+------------------+----------------------------------------------------------------------------------+-----------------------+----------+---------+
|
||||
| NAME | DESCRIPTION | TYPE | REQUIRED | DEFAULT |
|
||||
+------------------+----------------------------------------------------------------------------------+-----------------------+----------+---------+
|
||||
| cmd | Commands to run in the container | []string | false | |
|
||||
| env | Define arguments by using environment variables | [[]env](#env) | false | |
|
||||
| addRevisionLabel | | bool | true | false |
|
||||
| image | Which image would you like to use for your service | string | true | |
|
||||
| port | Which port do you want customer traffic sent to | int | true | 80 |
|
||||
| cpu | Number of CPU units for the service, like `0.5` (0.5 CPU core), `1` (1 CPU core) | string | false | |
|
||||
| volumes | Declare volumes and volumeMounts | [[]volumes](#volumes) | false | |
|
||||
+------------------+----------------------------------------------------------------------------------+-----------------------+----------+---------+
|
||||
|
||||
|
||||
##### volumes
|
||||
+-----------+---------------------------------------------------------------------+--------+----------+---------+
|
||||
| NAME | DESCRIPTION | TYPE | REQUIRED | DEFAULT |
|
||||
+-----------+---------------------------------------------------------------------+--------+----------+---------+
|
||||
| name | | string | true | |
|
||||
| mountPath | | string | true | |
|
||||
| type | Specify volume type, options: "pvc","configMap","secret","emptyDir" | string | true | |
|
||||
+-----------+---------------------------------------------------------------------+--------+----------+---------+
|
||||
|
||||
|
||||
## env
|
||||
+-----------+-----------------------------------------------------------+-------------------------+----------+---------+
|
||||
| NAME | DESCRIPTION | TYPE | REQUIRED | DEFAULT |
|
||||
+-----------+-----------------------------------------------------------+-------------------------+----------+---------+
|
||||
| name | Environment variable name | string | true | |
|
||||
| value | The value of the environment variable | string | false | |
|
||||
| valueFrom | Specifies a source the value of this var should come from | [valueFrom](#valueFrom) | false | |
|
||||
+-----------+-----------------------------------------------------------+-------------------------+----------+---------+
|
||||
|
||||
|
||||
### valueFrom
|
||||
+--------------+--------------------------------------------------+-------------------------------+----------+---------+
|
||||
| NAME | DESCRIPTION | TYPE | REQUIRED | DEFAULT |
|
||||
+--------------+--------------------------------------------------+-------------------------------+----------+---------+
|
||||
| secretKeyRef | Selects a key of a secret in the pod's namespace | [secretKeyRef](#secretKeyRef) | true | |
|
||||
+--------------+--------------------------------------------------+-------------------------------+----------+---------+
|
||||
|
||||
|
||||
#### secretKeyRef
|
||||
+------+------------------------------------------------------------------+--------+----------+---------+
|
||||
| NAME | DESCRIPTION | TYPE | REQUIRED | DEFAULT |
|
||||
+------+------------------------------------------------------------------+--------+----------+---------+
|
||||
| name | The name of the secret in the pod's namespace to select from | string | true | |
|
||||
| key | The key of the secret to select from. Must be a valid secret key | string | true | |
|
||||
+------+------------------------------------------------------------------+--------+----------+---------+
|
||||
```
|
||||
|
||||
## Explore Traits
|
||||
|
||||
You can explore what kinds of trait definitions supported in your system.
|
||||
|
||||
```shell
|
||||
$ kubectl get trait -n vela-system
|
||||
NAME APPLIES-TO DESCRIPTION
|
||||
cpuscaler [webservice worker] configure k8s HPA with CPU metrics for Deployment
|
||||
ingress [webservice worker] Configures K8s ingress and service to enable web traffic for your service. Please use route trait in cap center for advanced usage.
|
||||
scaler [webservice worker] Configures replicas for your service.
|
||||
sidecar [webservice worker] inject a sidecar container into your app
|
||||
```
|
||||
|
||||
The trait definition objects are namespace isolated align with application, while the `vela-system` is a common system namespace of KubeVela,
|
||||
definitions laid here can be used by every application.
|
||||
|
||||
You can use `kubectl vela show` to see the usage of specific trait definition.
|
||||
|
||||
```shell
|
||||
$ kubectl vela show sidecar
|
||||
# Properties
|
||||
+---------+-----------------------------------------+----------+----------+---------+
|
||||
| NAME | DESCRIPTION | TYPE | REQUIRED | DEFAULT |
|
||||
+---------+-----------------------------------------+----------+----------+---------+
|
||||
| name | Specify the name of sidecar container | string | true | |
|
||||
| image | Specify the image of sidecar container | string | true | |
|
||||
| command | Specify the commands run in the sidecar | []string | false | |
|
||||
+---------+-----------------------------------------+----------+----------+---------+
|
||||
```
|
||||
98
docs/en/end-user/expose.md
Normal file
98
docs/en/end-user/expose.md
Normal file
@@ -0,0 +1,98 @@
|
||||
---
|
||||
title: Expose Application
|
||||
---
|
||||
|
||||
> ⚠️ This section requires your cluster has a working ingress.
|
||||
|
||||
To expose your application publicly, you just need to add an `ingress` trait.
|
||||
|
||||
View ingress schema by [vela kubectl plugin](./kubectlplugin).
|
||||
|
||||
```shell
|
||||
$ kubectl vela show ingress
|
||||
# Properties
|
||||
+--------+------------------------------------------------------------------------------+----------------+----------+---------+
|
||||
| NAME | DESCRIPTION | TYPE | REQUIRED | DEFAULT |
|
||||
+--------+------------------------------------------------------------------------------+----------------+----------+---------+
|
||||
| http | Specify the mapping relationship between the http path and the workload port | map[string]int | true | |
|
||||
| domain | Specify the domain you want to expose | string | true | |
|
||||
+--------+------------------------------------------------------------------------------+----------------+----------+---------+
|
||||
```
|
||||
|
||||
Then modify and deploy this application.
|
||||
|
||||
```yaml
|
||||
# vela-app.yaml
|
||||
apiVersion: core.oam.dev/v1beta1
|
||||
kind: Application
|
||||
metadata:
|
||||
name: first-vela-app
|
||||
spec:
|
||||
components:
|
||||
- name: express-server
|
||||
type: webservice
|
||||
properties:
|
||||
image: crccheck/hello-world
|
||||
port: 8000
|
||||
traits:
|
||||
- type: ingress
|
||||
properties:
|
||||
domain: testsvc.example.com
|
||||
http:
|
||||
"/": 8000
|
||||
```
|
||||
|
||||
```bash
|
||||
$ kubectl apply -f https://raw.githubusercontent.com/oam-dev/kubevela/master/docs/examples/vela-app.yaml
|
||||
application.core.oam.dev/first-vela-app created
|
||||
```
|
||||
|
||||
Check the status until we see `status` is `running` and services are `healthy`:
|
||||
|
||||
```bash
|
||||
$ kubectl get application first-vela-app -w
|
||||
NAME COMPONENT TYPE PHASE HEALTHY STATUS AGE
|
||||
first-vela-app express-server webservice healthChecking 14s
|
||||
first-vela-app express-server webservice running true 42s
|
||||
```
|
||||
|
||||
You can also see the trait detail for the visiting url:
|
||||
|
||||
```shell
|
||||
$ kubectl get application first-vela-app -o yaml
|
||||
apiVersion: core.oam.dev/v1beta1
|
||||
kind: Application
|
||||
metadata:
|
||||
name: first-vela-app
|
||||
namespace: default
|
||||
spec:
|
||||
...
|
||||
services:
|
||||
- healthy: true
|
||||
name: express-server
|
||||
traits:
|
||||
- healthy: true
|
||||
message: 'Visiting URL: testsvc.example.com, IP: 47.111.233.220'
|
||||
type: ingress
|
||||
status: running
|
||||
...
|
||||
```
|
||||
|
||||
Then you will be able to visit this service.
|
||||
|
||||
```
|
||||
$ curl -H "Host:testsvc.example.com" http://<your ip address>/
|
||||
<xmp>
|
||||
Hello World
|
||||
|
||||
|
||||
## .
|
||||
## ## ## ==
|
||||
## ## ## ## ## ===
|
||||
/""""""""""""""""\___/ ===
|
||||
~~~ {~~ ~~~~ ~~~ ~~~~ ~~ ~ / ===- ~~~
|
||||
\______ o _,/
|
||||
\ \ _,'
|
||||
`'--.._\..--''
|
||||
</xmp>
|
||||
```
|
||||
46
docs/en/end-user/kubectlplugin.md
Normal file
46
docs/en/end-user/kubectlplugin.md
Normal file
@@ -0,0 +1,46 @@
|
||||
---
|
||||
title: Install kubectl plugin
|
||||
---
|
||||
|
||||
Install vela kubectl plugin can help you to ship applications more easily!
|
||||
|
||||
## Installation
|
||||
|
||||
You can install kubectl plugin `kubectl vela` by:
|
||||
|
||||
**macOS/Linux**
|
||||
```shell script
|
||||
curl -fsSl https://kubevela.io/script/install-kubectl-vela.sh | bash
|
||||
```
|
||||
|
||||
You can also download the binary from [release pages ( >= v1.0.3)](https://github.com/oam-dev/kubevela/releases) manually.
|
||||
Kubectl will discover it from your system path automatically.
|
||||
|
||||
## Usage
|
||||
|
||||
```shell
|
||||
$ kubectl vela -h
|
||||
A Highly Extensible Platform Engine based on Kubernetes and Open Application Model.
|
||||
|
||||
Usage:
|
||||
kubectl vela [flags]
|
||||
kubectl vela [command]
|
||||
|
||||
Available Commands:
|
||||
|
||||
Flags:
|
||||
-h, --help help for vela
|
||||
|
||||
dry-run Dry Run an application, and output the K8s resources as
|
||||
result to stdout, only CUE template supported for now
|
||||
live-diff Dry-run an application, and do diff on a specific app
|
||||
revison. The provided capability definitions will be used
|
||||
during Dry-run. If any capabilities used in the app are not
|
||||
found in the provided ones, it will try to find from
|
||||
cluster.
|
||||
show Show the reference doc for a workload type or trait
|
||||
version Prints out build version information
|
||||
|
||||
|
||||
Use "kubectl vela [command] --help" for more information about a command.
|
||||
```
|
||||
74
docs/en/end-user/labels.md
Normal file
74
docs/en/end-user/labels.md
Normal file
@@ -0,0 +1,74 @@
|
||||
---
|
||||
title: Labels and Annotations
|
||||
---
|
||||
|
||||
We will introduce how to add labels and annotations to your Application.
|
||||
|
||||
## List Traits
|
||||
|
||||
```bash
|
||||
$ kubectl get trait -n vela-system
|
||||
NAME APPLIES-TO DESCRIPTION
|
||||
annotations ["webservice","worker"] Add annotations for your Workload.
|
||||
cpuscaler ["webservice","worker"] configure k8s HPA with CPU metrics for Deployment
|
||||
ingress ["webservice","worker"] Configures K8s ingress and service to enable web traffic for your service. Please use route trait in cap center for advanced usage.
|
||||
labels ["webservice","worker"] Add labels for your Workload.
|
||||
scaler ["webservice","worker"] Configures replicas for your service by patch replicas field.
|
||||
sidecar ["webservice","worker"] inject a sidecar container into your app
|
||||
```
|
||||
|
||||
You can use `label` and `annotations` traits to add labels and annotations for your workload.
|
||||
|
||||
## Apply Application
|
||||
|
||||
Let's use `label` and `annotations` traits in your Application.
|
||||
|
||||
```shell
|
||||
# myapp.yaml
|
||||
apiVersion: core.oam.dev/v1beta1
|
||||
kind: Application
|
||||
metadata:
|
||||
name: myapp
|
||||
spec:
|
||||
components:
|
||||
- name: express-server
|
||||
type: webservice
|
||||
properties:
|
||||
image: crccheck/hello-world
|
||||
port: 8000
|
||||
traits:
|
||||
- type: labels
|
||||
properties:
|
||||
"release": "stable"
|
||||
- type: annotations
|
||||
properties:
|
||||
"description": "web application"
|
||||
```
|
||||
|
||||
Apply this Application.
|
||||
|
||||
```shell
|
||||
kubectl apply -f myapp.yaml
|
||||
```
|
||||
|
||||
Check the workload has been created successfully.
|
||||
|
||||
```bash
|
||||
$ kubectl get deployments
|
||||
NAME READY UP-TO-DATE AVAILABLE AGE
|
||||
express-server 1/1 1 1 15s
|
||||
```
|
||||
|
||||
Check the `labels` trait.
|
||||
|
||||
```bash
|
||||
$ kubectl get deployments express-server -o jsonpath='{.spec.template.metadata.labels}'
|
||||
{"app.oam.dev/component":"express-server","release": "stable"}
|
||||
```
|
||||
|
||||
Check the `annotations` trait.
|
||||
|
||||
```bash
|
||||
$ kubectl get deployments express-server -o jsonpath='{.spec.template.metadata.annotations}'
|
||||
{"description":"web application"}
|
||||
```
|
||||
9
docs/en/end-user/monitoring.md
Normal file
9
docs/en/end-user/monitoring.md
Normal file
@@ -0,0 +1,9 @@
|
||||
---
|
||||
title: Monitoring
|
||||
---
|
||||
|
||||
TBD, Content Overview:
|
||||
|
||||
1. We will move all installation scripts to a separate doc may be named Install Capability Providers (e.g. https://knative.dev/docs/install/install-extensions/)Install monitoring trait(along with prometheus/grafana controller).
|
||||
2. Add monitoring trait into Application.
|
||||
3. View it with grafana.
|
||||
8
docs/en/end-user/pipeline.md
Normal file
8
docs/en/end-user/pipeline.md
Normal file
@@ -0,0 +1,8 @@
|
||||
---
|
||||
title: Build CI/CD Pipeline
|
||||
---
|
||||
|
||||
TBD, Content Overview:
|
||||
|
||||
1. install argo/teckton.
|
||||
2. run the pipeline example: https://github.com/oam-dev/kubevela/tree/master/docs/examples/argo
|
||||
68
docs/en/end-user/scale.md
Normal file
68
docs/en/end-user/scale.md
Normal file
@@ -0,0 +1,68 @@
|
||||
---
|
||||
title: Scale
|
||||
---
|
||||
|
||||
In the [Deploy Application](../application) section, we use `cpuscaler` trait as an auto-scaler for the sample application.
|
||||
|
||||
## Manuel Scale
|
||||
|
||||
You can use scale your application manually by using `scaler` trait.
|
||||
|
||||
```shell
|
||||
$ kubectl vela show scaler
|
||||
# Properties
|
||||
+----------+--------------------------------+------+----------+---------+
|
||||
| NAME | DESCRIPTION | TYPE | REQUIRED | DEFAULT |
|
||||
+----------+--------------------------------+------+----------+---------+
|
||||
| replicas | Specify replicas of workload | int | true | 1 |
|
||||
+----------+--------------------------------+------+----------+---------+
|
||||
```
|
||||
|
||||
Deploy the application.
|
||||
|
||||
```yaml
|
||||
# sample-manual.yaml
|
||||
apiVersion: core.oam.dev/v1beta1
|
||||
kind: Application
|
||||
metadata:
|
||||
name: website
|
||||
spec:
|
||||
components:
|
||||
- name: frontend
|
||||
type: webservice
|
||||
properties:
|
||||
image: nginx
|
||||
traits:
|
||||
- type: scaler
|
||||
properties:
|
||||
replicas: 2
|
||||
- type: sidecar
|
||||
properties:
|
||||
name: "sidecar-test"
|
||||
image: "fluentd"
|
||||
- name: backend
|
||||
type: worker
|
||||
properties:
|
||||
image: busybox
|
||||
cmd:
|
||||
- sleep
|
||||
- '1000'
|
||||
```
|
||||
|
||||
Change and Apply the sample application:
|
||||
|
||||
```shell
|
||||
$ kubectl apply -f https://raw.githubusercontent.com/oam-dev/kubevela/master/docs/examples/enduser/sample-manual.yaml
|
||||
application.core.oam.dev/website configured
|
||||
```
|
||||
|
||||
After a while, you can see the underlying deployment of `frontend` component has two replicas now.
|
||||
|
||||
```shell
|
||||
$ kubectl get deploy -l app.oam.dev/name=website
|
||||
NAME READY UP-TO-DATE AVAILABLE AGE
|
||||
backend 1/1 1 1 19h
|
||||
frontend 2/2 2 2 19h
|
||||
```
|
||||
|
||||
To scale up or scale down, you can just modify the `replicas` field of `scaler` trait and apply the application again.
|
||||
102
docs/en/end-user/sidecar.md
Normal file
102
docs/en/end-user/sidecar.md
Normal file
@@ -0,0 +1,102 @@
|
||||
---
|
||||
title: Attach Sidecar
|
||||
---
|
||||
|
||||
In this section, we will show you how to use `sidecar` trait to collect logs.
|
||||
|
||||
## Show the Usage of Sidecar
|
||||
|
||||
```shell
|
||||
$ kubectl vela show sidecar
|
||||
# Properties
|
||||
+---------+-----------------------------------------+-----------------------+----------+---------+
|
||||
| NAME | DESCRIPTION | TYPE | REQUIRED | DEFAULT |
|
||||
+---------+-----------------------------------------+-----------------------+----------+---------+
|
||||
| name | Specify the name of sidecar container | string | true | |
|
||||
| cmd | Specify the commands run in the sidecar | []string | false | |
|
||||
| image | Specify the image of sidecar container | string | true | |
|
||||
| volumes | Specify the shared volume path | [[]volumes](#volumes) | false | |
|
||||
+---------+-----------------------------------------+-----------------------+----------+---------+
|
||||
|
||||
|
||||
## volumes
|
||||
+-----------+-------------+--------+----------+---------+
|
||||
| NAME | DESCRIPTION | TYPE | REQUIRED | DEFAULT |
|
||||
+-----------+-------------+--------+----------+---------+
|
||||
| name | | string | true | |
|
||||
| path | | string | true | |
|
||||
+-----------+-------------+--------+----------+---------+
|
||||
```
|
||||
|
||||
## Apply the Application
|
||||
|
||||
In this Application, component `log-gen-worker` and sidecar share the data volume that saves the logs.
|
||||
The sidebar will re-output the log to stdout.
|
||||
|
||||
```yaml
|
||||
# app.yaml
|
||||
apiVersion: core.oam.dev/v1beta1
|
||||
kind: Application
|
||||
metadata:
|
||||
name: vela-app-with-sidecar
|
||||
spec:
|
||||
components:
|
||||
- name: log-gen-worker
|
||||
type: worker
|
||||
properties:
|
||||
image: busybox
|
||||
cmd:
|
||||
- /bin/sh
|
||||
- -c
|
||||
- >
|
||||
i=0;
|
||||
while true;
|
||||
do
|
||||
echo "$i: $(date)" >> /var/log/date.log;
|
||||
i=$((i+1));
|
||||
sleep 1;
|
||||
done
|
||||
volumes:
|
||||
- name: varlog
|
||||
mountPath: /var/log
|
||||
type: emptyDir
|
||||
traits:
|
||||
- type: sidecar
|
||||
properties:
|
||||
name: count-log
|
||||
image: busybox
|
||||
cmd: [ /bin/sh, -c, 'tail -n+1 -f /var/log/date.log']
|
||||
volumes:
|
||||
- name: varlog
|
||||
path: /var/log
|
||||
```
|
||||
|
||||
Apply this Application.
|
||||
|
||||
```shell
|
||||
kubectl apply -f app.yaml
|
||||
```
|
||||
|
||||
Check the workload generate by Application.
|
||||
|
||||
```shell
|
||||
$ kubectl get pod
|
||||
NAME READY STATUS RESTARTS AGE
|
||||
log-gen-worker-76945f458b-k7n9k 2/2 Running 0 90s
|
||||
```
|
||||
|
||||
Check the output of sidecar.
|
||||
|
||||
```shell
|
||||
$ kubectl logs -f log-gen-worker-76945f458b-k7n9k count-log
|
||||
0: Fri Apr 16 11:08:45 UTC 2021
|
||||
1: Fri Apr 16 11:08:46 UTC 2021
|
||||
2: Fri Apr 16 11:08:47 UTC 2021
|
||||
3: Fri Apr 16 11:08:48 UTC 2021
|
||||
4: Fri Apr 16 11:08:49 UTC 2021
|
||||
5: Fri Apr 16 11:08:50 UTC 2021
|
||||
6: Fri Apr 16 11:08:51 UTC 2021
|
||||
7: Fri Apr 16 11:08:52 UTC 2021
|
||||
8: Fri Apr 16 11:08:53 UTC 2021
|
||||
9: Fri Apr 16 11:08:54 UTC 2021
|
||||
```
|
||||
8
docs/en/end-user/volumes.md
Normal file
8
docs/en/end-user/volumes.md
Normal file
@@ -0,0 +1,8 @@
|
||||
---
|
||||
title: Attach Volumes
|
||||
---
|
||||
|
||||
TBD, Content Overview:
|
||||
|
||||
1. attach basic volume for application.
|
||||
2. Extend custom volume types and attach.
|
||||
@@ -8,7 +8,7 @@ In this section, it will introduce how to declare Helm charts as app components
|
||||
|
||||
## Prerequisite
|
||||
|
||||
* [fluxcd/flux2](../install#3-optional-install-flux2), make sure you have installed the flux2 in the [installation guide](/docs/install).
|
||||
* Make sure you have enabled Helm support in the [installation guide](/docs/install).
|
||||
|
||||
## Declare `ComponentDefinition`
|
||||
|
||||
|
||||
@@ -5,17 +5,17 @@ title: Installation
|
||||
import Tabs from '@theme/Tabs';
|
||||
import TabItem from '@theme/TabItem';
|
||||
|
||||
If you have installed the kubevale chart before, please read the [Upgrade](#upgrade) step directly.
|
||||
> For upgrading existing KubeVela, please read the [upgrade guide](./advanced-install#upgrade).
|
||||
|
||||
## 1. Setup Kubernetes cluster
|
||||
## 1. Choose Kubernetes Cluster
|
||||
|
||||
Requirements:
|
||||
- Kubernetes cluster >= v1.15.0
|
||||
- kubectl installed and configured
|
||||
- `kubectl` installed and configured
|
||||
|
||||
If you don't have K8s cluster from Cloud Provider, you may pick either Minikube or KinD as local cluster testing option.
|
||||
KubeVela is a simple custom controller that can be installed on any Kubernetes cluster including managed offerings or your own clusters. The only requirement is please ensure [ingress-nginx](https://kubernetes.github.io/ingress-nginx/deploy/) is installed and enabled.
|
||||
|
||||
> NOTE: If you are not using minikube or kind, please make sure to [install or enable ingress-nginx](https://kubernetes.github.io/ingress-nginx/deploy/) by yourself.
|
||||
For for local deployment and test, you could use `minikube` or `kind`.
|
||||
|
||||
<Tabs
|
||||
className="unique-tabs"
|
||||
@@ -28,7 +28,7 @@ values={[
|
||||
|
||||
Follow the minikube [installation guide](https://minikube.sigs.k8s.io/docs/start/).
|
||||
|
||||
Once minikube is installed, create a cluster:
|
||||
Then spins up a minikube cluster
|
||||
|
||||
```shell script
|
||||
minikube start
|
||||
@@ -80,8 +80,6 @@ kubectl apply -f https://raw.githubusercontent.com/kubernetes/ingress-nginx/mast
|
||||
|
||||
## 2. Install KubeVela Controller
|
||||
|
||||
These steps will install KubeVela controller and its dependency.
|
||||
|
||||
1. Add helm chart repo for KubeVela
|
||||
```shell script
|
||||
helm repo add kubevela https://kubevelacharts.oss-cn-hangzhou.aliyuncs.com/core
|
||||
@@ -96,75 +94,43 @@ These steps will install KubeVela controller and its dependency.
|
||||
```shell script
|
||||
helm install --create-namespace -n vela-system kubevela kubevela/vela-core
|
||||
```
|
||||
By default, it will enable the webhook with a self-signed certificate provided by [kube-webhook-certgen](https://github.com/jet/kube-webhook-certgen)
|
||||
|
||||
If you want to try the latest master branch, add flag `--devel` in command `helm search` to choose a pre-release
|
||||
version in format `<next_version>-rc-master` which means the next release candidate version build on `master` branch,
|
||||
like `0.4.0-rc-master`.
|
||||
|
||||
By default, it will enable the webhook with a self-signed certificate provided by [kube-webhook-certgen](https://github.com/jet/kube-webhook-certgen).
|
||||
You can also [install it with `cert-manager`](./advanced-install#install-kubevela-with-cert-manager).
|
||||
|
||||
4. Verify chart installed successfully
|
||||
```shell script
|
||||
helm search repo kubevela/vela-core -l --devel
|
||||
helm test kubevela -n vela-system
|
||||
```
|
||||
```console
|
||||
NAME CHART VERSION APP VERSION DESCRIPTION
|
||||
kubevela/vela-core 0.4.0-rc-master 0.4.0-rc-master A Helm chart for KubeVela core
|
||||
kubevela/vela-core 0.3.2 0.3.2 A Helm chart for KubeVela core
|
||||
kubevela/vela-core 0.3.1 0.3.1 A Helm chart for KubeVela core
|
||||
```
|
||||
|
||||
And try the following command to install it.
|
||||
|
||||
```shell script
|
||||
helm install --create-namespace -n vela-system kubevela kubevela/vela-core --version <next_version>-rc-master
|
||||
```
|
||||
```console
|
||||
|
||||
<details> <summary> Click to see the expected output of helm test </summary>
|
||||
|
||||
```shell
|
||||
Pod kubevela-application-test pending
|
||||
Pod kubevela-application-test pending
|
||||
Pod kubevela-application-test running
|
||||
Pod kubevela-application-test succeeded
|
||||
NAME: kubevela
|
||||
LAST DEPLOYED: Thu Apr 1 19:41:30 2021
|
||||
LAST DEPLOYED: Tue Apr 13 18:42:20 2021
|
||||
NAMESPACE: vela-system
|
||||
STATUS: deployed
|
||||
REVISION: 1
|
||||
TEST SUITE: kubevela-application-test
|
||||
Last Started: Fri Apr 16 20:49:10 2021
|
||||
Last Completed: Fri Apr 16 20:50:04 2021
|
||||
Phase: Succeeded
|
||||
TEST SUITE: first-vela-app
|
||||
Last Started: Fri Apr 16 20:49:10 2021
|
||||
Last Completed: Fri Apr 16 20:49:10 2021
|
||||
Phase: Succeeded
|
||||
NOTES:
|
||||
Welcome to use the KubeVela! Enjoy your shipping application journey!
|
||||
```
|
||||
|
||||
4. Install Kubevela with cert-manager (optional)
|
||||
|
||||
If cert-manager has been installed, it can be used to take care about generating certs.
|
||||
|
||||
You need to install cert-manager before the kubevela chart.
|
||||
```shell script
|
||||
helm repo add jetstack https://charts.jetstack.io
|
||||
helm repo update
|
||||
helm install cert-manager jetstack/cert-manager --namespace cert-manager --version v1.2.0 --create-namespace --set installCRDs=true
|
||||
```
|
||||
|
||||
Install kubevela with enabled certmanager:
|
||||
```shell script
|
||||
helm install --create-namespace -n vela-system --set admissionWebhooks.certManager.enabled=true kubevela kubevela/vela-core
|
||||
```
|
||||
|
||||
## 3. (Optional) Install flux2
|
||||
</details>
|
||||
|
||||
This installation step is optional, it's required if you want to register [Helm Chart](https://helm.sh/) as KubeVela capabilities.
|
||||
## 3. Get KubeVela CLI
|
||||
|
||||
KubeVela relies on several CRDs and controllers from [fluxcd/flux2](https://github.com/fluxcd/flux2).
|
||||
|
||||
| CRD | Controller Image |
|
||||
| ----------- | ----------- |
|
||||
| helmrepositories.source.toolkit.fluxcd.io | fluxcd/source-controller:v0.9.0 |
|
||||
| helmcharts.source.toolkit.fluxcd.io | - |
|
||||
| buckets.source.toolkit.fluxcd.io | - |
|
||||
| gitrepositories.source.toolkit.fluxcd.io | - |
|
||||
| helmreleases.helm.toolkit.fluxcd.io | fluxcd/helm-controller:v0.8.0 |
|
||||
|
||||
You can install the whole flux2 from their [official website](https://github.com/fluxcd/flux2)
|
||||
or install the chart with minimal parts provided by KubeVela:
|
||||
|
||||
```shell
|
||||
$ helm install --create-namespace -n flux-system helm-flux http://oam.dev/catalog/helm-flux2-0.1.0.tgz
|
||||
```
|
||||
|
||||
## 4. (Optional) Get KubeVela CLI
|
||||
Using KubeVela CLI gives you a simplified workflow with optimized output comparing to using `kubectl`. It is not mandatory though.
|
||||
|
||||
Here are three ways to get KubeVela Cli:
|
||||
|
||||
@@ -222,9 +188,23 @@ sudo mv ./vela /usr/local/bin/vela
|
||||
</TabItem>
|
||||
</Tabs>
|
||||
|
||||
## 5. (Optional) Sync Capability from Cluster
|
||||
|
||||
If you want to run application from `vela` cli, then you should sync capabilities first like below:
|
||||
## 4. Enable Helm Support
|
||||
|
||||
KubeVela leverages Helm controller from [Flux v2](https://github.com/fluxcd/flux2) to deploy [Helm](https://helm.sh/) based components.
|
||||
|
||||
You can enable this feature by installing a minimal Flux v2 chart as below:
|
||||
|
||||
```shell
|
||||
$ helm install --create-namespace -n flux-system helm-flux http://oam.dev/catalog/helm-flux2-0.1.0.tgz
|
||||
```
|
||||
|
||||
Or you could install full Flux v2 following its own guide of course.
|
||||
|
||||
|
||||
## 5. Verify
|
||||
|
||||
Checking available application components and traits by `vela` CLI tool:
|
||||
|
||||
```shell script
|
||||
vela components
|
||||
@@ -240,84 +220,8 @@ worker vela-system deployments.apps Describes long-running, scalable, contai
|
||||
to receive external network traffic.
|
||||
```
|
||||
|
||||
## 6. (Optional) Clean Up
|
||||
These capabilities are built-in so they are ready to use if showed up. KubeVela is designed to be programmable and fully self-service, so the assumption is more capabilities will be added later per your own needs.
|
||||
|
||||
<details>
|
||||
Run:
|
||||
Also, whenever new capabilities are added in the platform, you will immediately see them in above output.
|
||||
|
||||
```shell script
|
||||
helm uninstall -n vela-system kubevela
|
||||
rm -r ~/.vela
|
||||
```
|
||||
|
||||
This will uninstall KubeVela server component and its dependency components.
|
||||
This also cleans up local CLI cache.
|
||||
|
||||
Then clean up CRDs (CRDs are not removed via helm by default):
|
||||
|
||||
```shell script
|
||||
kubectl delete crd \
|
||||
appdeployments.core.oam.dev \
|
||||
applicationconfigurations.core.oam.dev \
|
||||
applicationcontexts.core.oam.dev \
|
||||
applicationdeployments.core.oam.dev \
|
||||
applicationrevisions.core.oam.dev \
|
||||
applications.core.oam.dev \
|
||||
approllouts.core.oam.dev \
|
||||
componentdefinitions.core.oam.dev \
|
||||
components.core.oam.dev \
|
||||
containerizedworkloads.core.oam.dev \
|
||||
healthscopes.core.oam.dev \
|
||||
manualscalertraits.core.oam.dev \
|
||||
podspecworkloads.standard.oam.dev \
|
||||
scopedefinitions.core.oam.dev \
|
||||
traitdefinitions.core.oam.dev \
|
||||
workloaddefinitions.core.oam.dev
|
||||
```
|
||||
</details>
|
||||
|
||||
# Upgrade
|
||||
|
||||
If you have already installed KubeVela and wants to upgrade to the new version, you could follow below instructions.
|
||||
|
||||
## Step 1. Update helm repo
|
||||
|
||||
```shell
|
||||
helm repo update
|
||||
```
|
||||
|
||||
you can get the new version kubevela chart by run:
|
||||
|
||||
```shell
|
||||
helm search repo kubevela/vela-core -l
|
||||
```
|
||||
|
||||
## Step 2. Upgrade KubeVela CRDs
|
||||
|
||||
```shell
|
||||
kubectl apply -f https://raw.githubusercontent.com/oam-dev/kubevela/master/charts/vela-core/crds/core.oam.dev_componentdefinitions.yaml
|
||||
kubectl apply -f https://raw.githubusercontent.com/oam-dev/kubevela/master/charts/vela-core/crds/core.oam.dev_workloaddefinitions.yaml
|
||||
kubectl apply -f https://raw.githubusercontent.com/oam-dev/kubevela/master/charts/vela-core/crds/core.oam.dev_traitdefinitions.yaml
|
||||
kubectl apply -f https://raw.githubusercontent.com/oam-dev/kubevela/master/charts/vela-core/crds/core.oam.dev_applications.yaml
|
||||
kubectl apply -f https://raw.githubusercontent.com/oam-dev/kubevela/master/charts/vela-core/crds/core.oam.dev_approllouts.yaml
|
||||
kubectl apply -f https://raw.githubusercontent.com/oam-dev/kubevela/master/charts/vela-core/crds/core.oam.dev_applicationrevisions.yaml
|
||||
kubectl apply -f https://raw.githubusercontent.com/oam-dev/kubevela/master/charts/vela-core/crds/core.oam.dev_scopedefinitions.yaml
|
||||
kubectl apply -f https://raw.githubusercontent.com/oam-dev/kubevela/master/charts/vela-core/crds/core.oam.dev_appdeployments.yaml
|
||||
kubectl apply -f https://raw.githubusercontent.com/oam-dev/kubevela/master/charts/vela-core/crds/core.oam.dev_applicationcontexts.yaml
|
||||
```
|
||||
|
||||
> Tips: If you meet errors like `* is invalid: spec.scope: Invalid value: "Namespaced": filed is immutable`. Please delete the crd with the error.
|
||||
and re-apply the kubevela crds.
|
||||
|
||||
```shell
|
||||
kubectl delete crd \
|
||||
scopedefinitions.core.oam.dev \
|
||||
traitdefinitions.core.oam.dev \
|
||||
workloaddefinitions.core.oam.dev
|
||||
```
|
||||
|
||||
## Step 3. Upgrade KubeVela helm chart
|
||||
|
||||
```shell
|
||||
helm upgrade --install --create-namespace --namespace vela-system kubevela kubevela/vela-core --version <the_new_version>
|
||||
```
|
||||
> See the [advanced installation guide](./advanced-install) to learn more about installation details.
|
||||
@@ -4,7 +4,7 @@ slug: /
|
||||
|
||||
---
|
||||
|
||||

|
||||

|
||||
|
||||
## Motivation
|
||||
|
||||
@@ -22,25 +22,27 @@ In the end, developers complain those platforms are too rigid and slow in respon
|
||||
|
||||
For platform builders, KubeVela serves as a framework that relieves the pains of building developer focused platforms by doing the following:
|
||||
|
||||
- Developer Centric. KubeVela abstracts away the infrastructure level primitives by introducing the *Application* concept to capture a full deployment of microservices, and then building operational features around the applications' needs only.
|
||||
- Developer Centric. KubeVela introduces the Application as the main API to capture a full deployment of microservices, and builds features around the application needs only. Progressive rollout and multi-cluster deployment are provided out-of-box. No infrastructure level concerns, simply deploy.
|
||||
|
||||
- Extending Natively. The *Application* is composed of modularized building blocks that support [CUELang](https://github.com/cuelang/cue) and [Helm](https://helm.sh) as template engines. This enable you to abstract Kubernetes capabilities in LEGO-style and ship them to end users via plain `kubectl apply -f`. Changes made to the abstraction templates take effect at runtime, neither recompilation nor redeployment of KubeVela is required.
|
||||
- Extending Natively. In KubeVela, all platform features (such as workloads, operational behaviors, and cloud services) are defined as reusable [CUE](https://github.com/cuelang/cue) and/or [Helm](https://helm.sh) components, per needs of the application deployment. And when application's needs grow, your platform capabilities expand naturally in a programmable approach.
|
||||
|
||||
- Simple yet Reliable Abstraction Mechanism. Unlike most IaC (Infrastructure-as-Code) solutions, the abstractions in KubeVela is built with [Kubernetes Control Loop](https://kubernetes.io/docs/concepts/architecture/controller/) so they will never leave *Configuration Drift* in your cluster. As a [Kubernetes Custom Resource](https://kubernetes.io/docs/concepts/extend-kubernetes/api-extension/custom-resources/), KubeVela works with any CI/CD or GitOps tools seamlessly, no integration effort needed.
|
||||
- Simple yet Reliable. Perfect in flexibility though, X-as-Code may lead to configuration drift (i.e. the running instances are not in line with the expected configuration). KubeVela solves this by modeling its capabilities as code but enforce them via Kubernetes control loop which will never leave inconsistency in your clusters. This also makes KubeVela work with any CI/CD or GitOps tools via declarative API without integration burden.
|
||||
|
||||
With KubeVela, the platform builders finally have the tooling supports to design easy-to-use abstractions and ship them to end-users with high confidence and low turn around time.
|
||||
|
||||
For end-users (e.g. app developers), such abstractions built with KubeVela will enable them to design and ship applications to Kubernetes with minimal effort - instead of managing a handful infrastructure details, a simple application definition that can be easily integrated with any CI/CD pipeline is all they need.
|
||||
For end-users (e.g. app developers and operators), these abstractions will enable them to design and ship applications to Kubernetes clusters with minimal effort, and instead of managing a handful infrastructure details, a simple application definition that can be easily integrated with any CI/CD pipeline is all they need.
|
||||
|
||||
## Comparisons
|
||||
|
||||
### KubeVela vs. Platform-as-a-Service (PaaS)
|
||||
|
||||
The typical examples are Heroku and Cloud Foundry. They provide full application management capabilities and aim to improve developer experience and efficiency. In this context, KubeVela can provide similar experience.
|
||||
The typical examples are Heroku and Cloud Foundry. They provide full application management capabilities and aim to improve developer experience and efficiency. In this context, KubeVela shares the same goal.
|
||||
|
||||
Though the biggest difference lies in **flexibility**.
|
||||
|
||||
KubeVela is a Kubernetes plug-in that enabling you to serve end users with simplicity by defining your own abstractions, and this is achieved by templating Kubernetes API resources as application-centric abstractions in your cluster. Comparing to this mechanism, most existing PaaS systems are highly restricted and inflexible, i.e. they have to enforce constraints in the type of supported applications and capabilities, and as application needs grows, they always outgrow the capabilities of a PaaS system - this will never happen in KubeVela.
|
||||
KubeVela is a Kubernetes add-on that enabling you to serve end users with programmable building blocks which are fully flexible and coded by yourself. Comparing to this mechanism, traditional PaaS systems are highly restricted, i.e. they have to enforce constraints in the type of supported applications and capabilities, and as application needs grows, you always outgrow the capabilities of the PaaS system - this will never happen in KubeVela platform.
|
||||
|
||||
So think of KubeVela as a Heroku that is fully extensible to serve your needs as you grow.
|
||||
|
||||
### KubeVela vs. Serverless
|
||||
|
||||
@@ -52,17 +54,17 @@ Kubernetes based serverless platforms such as Knative, OpenFaaS can be easily in
|
||||
|
||||
The typical example is Hashicorp's Waypoint. Waypoint is a developer facing tool which introduces a consistent workflow (i.e., build, deploy, release) to ship applications on top of different platforms.
|
||||
|
||||
KubeVela can be integrated into Waypoint as a supported platform. In this case, developers could use the Waypoint workflow to manage applications with leverage of abstractions (e.g. application, rollout, ingress, autoscaling etc) you built via KubeVela.
|
||||
KubeVela can be integrated into such tools as an application platform. In this case, developers could use the Waypoint workflow to manage applications with leverage of abstractions (e.g. application, rollout, ingress, autoscaling etc) you built via KubeVela.
|
||||
|
||||
### KubeVela vs. Helm
|
||||
|
||||
Helm is a package manager for Kubernetes that provides package, install, and upgrade a set of YAML files for Kubernetes as a unit. KubeVela leverages Helm heavily to package the capability dependencies and Helm is also one of the core templating engines behind *Application* abstraction.
|
||||
Helm is a package manager for Kubernetes that provides package, install, and upgrade a set of YAML files for Kubernetes as a unit. KubeVela can patch, deploy and rollout Helm packaged application components, and it also leverages Helm to manage the capability dependencies in system level.
|
||||
|
||||
Though KubeVela itself is not a package manager, it's a core engine for platform builders to create upper layer platforms in easy and repeatable approach.
|
||||
Though KubeVela itself is not a package manager, it's a core engine for platform builders to create developer-centric deployment system with easy and repeatable approach.
|
||||
|
||||
### KubeVela vs. Kubernetes
|
||||
|
||||
KubeVela is a Kubernetes plugin for building higher level abstractions. It leverages [Open Application Model](https://github.com/oam-dev/spec) and the native Kubernetes extensibility to resolve a hard problem - making shipping applications enjoyable on Kubernetes.
|
||||
KubeVela is a Kubernetes add-on for building developer-centric deployment system. It leverages [Open Application Model](https://github.com/oam-dev/spec) and the native Kubernetes extensibility to resolve a hard problem - making shipping applications enjoyable on Kubernetes.
|
||||
|
||||
## Getting Started
|
||||
|
||||
|
||||
@@ -1,8 +1,8 @@
|
||||
---
|
||||
title: Define and Consume Cloud Resource
|
||||
title: Crossplane
|
||||
---
|
||||
|
||||
KubeVela provides unified abstraction even for cloud services.
|
||||
Cloud services is also part of your application deployment.
|
||||
|
||||
## Should a Cloud Service be a Component or Trait?
|
||||
|
||||
@@ -17,7 +17,7 @@ In a single application, they are in form of Traits, and in multiple application
|
||||
|
||||
## Install and Configure Crossplane
|
||||
|
||||
KubeVela uses [Crossplane](https://crossplane.io/) as the cloud service operator. Please Refer to [Installation](https://github.com/crossplane/provider-alibaba/releases/tag/v0.5.0)
|
||||
This guide will use [Crossplane](https://crossplane.io/) as the cloud service provider. Please Refer to [Installation](https://github.com/crossplane/provider-alibaba/releases/tag/v0.5.0)
|
||||
to install Crossplane Alibaba provider v0.5.0.
|
||||
|
||||
If you'd like to configure any other Crossplane providers, please refer to [Crossplane Select a Getting Started Configuration](https://crossplane.io/docs/v1.1/getting-started/install-configure.html#select-a-getting-started-configuration).
|
||||
@@ -57,11 +57,11 @@ spec:
|
||||
Note: We currently just use Crossplane Alibaba provider. But we are about to use [Crossplane](https://crossplane.io/) as the
|
||||
cloud resource operator for Kubernetes in the near future.
|
||||
|
||||
## Provisioning and consuming cloud resource in a single application v1 (one cloud resource)
|
||||
## Register ComponentDefinition and TraitDefinition
|
||||
|
||||
### Step 1: Register ComponentDefinition `alibaba-rds` as RDS cloud resource producer
|
||||
### Register ComponentDefinition `alibaba-rds` as RDS cloud resource producer
|
||||
|
||||
First, register the `alibaba-rds` workload type to KubeVela.
|
||||
Register the `alibaba-rds` workload type to KubeVela.
|
||||
|
||||
```yaml
|
||||
apiVersion: core.oam.dev/v1beta1
|
||||
@@ -102,149 +102,22 @@ spec:
|
||||
}
|
||||
}
|
||||
parameter: {
|
||||
engine: *"mysql" | string
|
||||
// +usage=RDS engine
|
||||
engine: *"mysql" | string
|
||||
// +usage=The version of RDS engine
|
||||
engineVersion: *"8.0" | string
|
||||
// +usage=The instance class for the RDS
|
||||
instanceClass: *"rds.mysql.c1.large" | string
|
||||
username: string
|
||||
secretName: string
|
||||
// +usage=RDS username
|
||||
username: string
|
||||
// +usage=Secret name which RDS connection will write to
|
||||
secretName: string
|
||||
}
|
||||
|
||||
```
|
||||
|
||||
### Step 2: Prepare TraitDefinition `service-binding` to do env-secret mapping
|
||||
|
||||
As for data binding in Application, KubeVela recommends defining a trait to finish the job. We have prepared a common
|
||||
trait for convenience. This trait works well for binding resources' info into pod spec Env.
|
||||
|
||||
```yaml
|
||||
apiVersion: core.oam.dev/v1beta1
|
||||
kind: TraitDefinition
|
||||
metadata:
|
||||
annotations:
|
||||
definition.oam.dev/description: "binding cloud resource secrets to pod env"
|
||||
name: service-binding
|
||||
spec:
|
||||
appliesToWorkloads:
|
||||
- webservice
|
||||
- worker
|
||||
schematic:
|
||||
cue:
|
||||
template: |
|
||||
patch: {
|
||||
spec: template: spec: {
|
||||
// +patchKey=name
|
||||
containers: [{
|
||||
name: context.name
|
||||
// +patchKey=name
|
||||
env: [
|
||||
for envName, v in parameter.envMappings {
|
||||
name: envName
|
||||
valueFrom: {
|
||||
secretKeyRef: {
|
||||
name: v.secret
|
||||
if v["key"] != _|_ {
|
||||
key: v.key
|
||||
}
|
||||
if v["key"] == _|_ {
|
||||
key: envName
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
]
|
||||
}]
|
||||
}
|
||||
}
|
||||
|
||||
parameter: {
|
||||
envMappings: [string]: [string]: string
|
||||
}
|
||||
```
|
||||
|
||||
With the help of this `service-binding` trait, developers can explicitly set parameter `envMappings` to mapping all environment names with secret key. Here is an example.
|
||||
|
||||
```yaml
|
||||
...
|
||||
traits:
|
||||
- type: service-binding
|
||||
properties:
|
||||
envMappings:
|
||||
# environments refer to db-conn secret
|
||||
DB_PASSWORD:
|
||||
secret: db-conn
|
||||
key: password # 1) If the env name is different from secret key, secret key has to be set.
|
||||
endpoint:
|
||||
secret: db-conn # 2) If the env name is the same as the secret key, secret key can be omitted.
|
||||
username:
|
||||
secret: db-conn
|
||||
# environments refer to oss-conn secret
|
||||
BUCKET_NAME:
|
||||
secret: oss-conn
|
||||
key: Bucket
|
||||
...
|
||||
```
|
||||
|
||||
### Step 3: Create an application to provision and consume cloud resource
|
||||
|
||||
Create an application with a cloud resource provisioning component and a consuming component as below.
|
||||
|
||||
```yaml
|
||||
apiVersion: core.oam.dev/v1beta1
|
||||
kind: Application
|
||||
metadata:
|
||||
name: webapp
|
||||
spec:
|
||||
components:
|
||||
- name: express-server
|
||||
type: webservice
|
||||
properties:
|
||||
image: zzxwill/flask-web-application:v0.3.1-crossplane
|
||||
ports: 80
|
||||
traits:
|
||||
- type: service-binding
|
||||
properties:
|
||||
envMappings:
|
||||
# environments refer to db-conn secret
|
||||
DB_PASSWORD:
|
||||
secret: db-conn
|
||||
key: password # 1) If the env name is different from secret key, secret key has to be set.
|
||||
endpoint:
|
||||
secret: db-conn # 2) If the env name is the same as the secret key, secret key can be omitted.
|
||||
username:
|
||||
secret: db-conn
|
||||
|
||||
- name: sample-db
|
||||
type: alibaba-rds
|
||||
properties:
|
||||
name: sample-db
|
||||
engine: mysql
|
||||
engineVersion: "8.0"
|
||||
instanceClass: rds.mysql.c1.large
|
||||
username: oamtest
|
||||
secretName: db-conn
|
||||
|
||||
```
|
||||
|
||||
Apply it and verify the application.
|
||||
|
||||
```shell
|
||||
$ kubectl get application
|
||||
NAME AGE
|
||||
webapp 46m
|
||||
|
||||
$ kubectl port-forward deployment/express-server 80:80
|
||||
Forwarding from 127.0.0.1:80 -> 80
|
||||
Forwarding from [::1]:80 -> 80
|
||||
Handling connection for 80
|
||||
Handling connection for 80
|
||||
```
|
||||
|
||||

|
||||
|
||||
## Provisioning and consuming cloud resource in a single application v2 (two cloud resources)
|
||||
|
||||
Based on the section `Provisioning and consuming cloud resource in a single application v1 (one cloud resource)`, register
|
||||
one more cloud resource workload type `alibaba-oss` to KubeVela.
|
||||
### Register ComponentDefinition `alibaba-oss` as OSS cloud resource producer
|
||||
|
||||
```yaml
|
||||
apiVersion: core.oam.dev/v1beta1
|
||||
@@ -281,138 +154,21 @@ spec:
|
||||
}
|
||||
}
|
||||
parameter: {
|
||||
name: string
|
||||
acl: *"private" | string
|
||||
storageClass: *"Standard" | string
|
||||
// +usage=OSS bucket name
|
||||
name: string
|
||||
// +usage=The access control list of the OSS bucket
|
||||
acl: *"private" | string
|
||||
// +usage=The storage type of OSS bucket
|
||||
storageClass: *"Standard" | string
|
||||
// +usage=The data Redundancy type of OSS bucket
|
||||
dataRedundancyType: *"LRS" | string
|
||||
secretName: string
|
||||
// +usage=Secret name which RDS connection will write to
|
||||
secretName: string
|
||||
}
|
||||
|
||||
```
|
||||
|
||||
Update the application to also consume cloud resource OSS.
|
||||
|
||||
```yaml
|
||||
apiVersion: core.oam.dev/v1beta1
|
||||
kind: Application
|
||||
metadata:
|
||||
name: webapp
|
||||
spec:
|
||||
components:
|
||||
- name: express-server
|
||||
type: webservice
|
||||
properties:
|
||||
image: zzxwill/flask-web-application:v0.3.1-crossplane
|
||||
ports: 80
|
||||
traits:
|
||||
- type: service-binding
|
||||
properties:
|
||||
envMappings:
|
||||
# environments refer to db-conn secret
|
||||
DB_PASSWORD:
|
||||
secret: db-conn
|
||||
key: password # 1) If the env name is different from secret key, secret key has to be set.
|
||||
endpoint:
|
||||
secret: db-conn # 2) If the env name is the same as the secret key, secret key can be omitted.
|
||||
username:
|
||||
secret: db-conn
|
||||
# environments refer to oss-conn secret
|
||||
BUCKET_NAME:
|
||||
secret: oss-conn
|
||||
key: Bucket
|
||||
|
||||
- name: sample-db
|
||||
type: alibaba-rds
|
||||
properties:
|
||||
name: sample-db
|
||||
engine: mysql
|
||||
engineVersion: "8.0"
|
||||
instanceClass: rds.mysql.c1.large
|
||||
username: oamtest
|
||||
secretName: db-conn
|
||||
|
||||
- name: sample-oss
|
||||
type: alibaba-oss
|
||||
properties:
|
||||
name: velaweb
|
||||
secretName: oss-conn
|
||||
```
|
||||
|
||||
Apply it and verify the application.
|
||||
|
||||
```shell
|
||||
$ kubectl port-forward deployment/express-server 80:80
|
||||
Forwarding from 127.0.0.1:80 -> 80
|
||||
Forwarding from [::1]:80 -> 80
|
||||
Handling connection for 80
|
||||
Handling connection for 80
|
||||
```
|
||||
|
||||

|
||||
|
||||
## Provisioning and consuming cloud resource in different applications
|
||||
|
||||
In this section, cloud resource will be provisioned in one application and consumed in another application.
|
||||
|
||||
### Provision Cloud Resource
|
||||
|
||||
Instantiate RDS component with `alibaba-rds` workload type in an [Application](../application.md) to provide cloud resources.
|
||||
|
||||
As we have claimed an RDS instance with ComponentDefinition name `alibaba-rds`.
|
||||
The component in the application should refer to this type.
|
||||
|
||||
```yaml
|
||||
apiVersion: core.oam.dev/v1beta1
|
||||
kind: Application
|
||||
metadata:
|
||||
name: baas-rds
|
||||
spec:
|
||||
components:
|
||||
- name: sample-db
|
||||
type: alibaba-rds
|
||||
properties:
|
||||
name: sample-db
|
||||
engine: mysql
|
||||
engineVersion: "8.0"
|
||||
instanceClass: rds.mysql.c1.large
|
||||
username: oamtest
|
||||
secretName: db-conn
|
||||
```
|
||||
|
||||
Apply the application to Kubernetes and a RDS instance will be automatically provisioned (may take some time, ~2 mins).
|
||||
|
||||
A secret `db-conn` will also be created in the same namespace as that of the application.
|
||||
|
||||
```shell
|
||||
$ kubectl get application
|
||||
NAME AGE
|
||||
baas-rds 9h
|
||||
|
||||
$ kubectl get rdsinstance
|
||||
NAME READY SYNCED STATE ENGINE VERSION AGE
|
||||
sample-db-v1 True True Running mysql 8.0 9h
|
||||
|
||||
$ kubectl get secret
|
||||
NAME TYPE DATA AGE
|
||||
db-conn connection.crossplane.io/v1alpha1 4 9h
|
||||
|
||||
$ ✗ kubectl get secret db-conn -o yaml
|
||||
apiVersion: v1
|
||||
data:
|
||||
endpoint: xxx==
|
||||
password: yyy
|
||||
port: MzMwNg==
|
||||
username: b2FtdGVzdA==
|
||||
kind: Secret
|
||||
```
|
||||
|
||||
### Consuming the Cloud Resource
|
||||
|
||||
In this section, we will show how another component consumes the RDS instance.
|
||||
|
||||
> Note: we recommend defining the cloud resource claiming to an independent application if that cloud resource has
|
||||
> standalone lifecycle.
|
||||
|
||||
#### Step 1: Define a ComponentDefinition with Secret Reference
|
||||
### Register ComponentDefinition `webconsumer` with Secret Reference
|
||||
|
||||
```yaml
|
||||
apiVersion: core.oam.dev/v1beta1
|
||||
@@ -517,39 +273,83 @@ spec:
|
||||
The key point is the annotation `//+insertSecretTo=dbConn`, KubeVela will know the parameter is a K8s secret, it will parse
|
||||
the secret and bind the data into the CUE struct `dbConn`.
|
||||
|
||||
Then the `output` can reference the `dbConn` struct for the data value. The name `dbConn` can be any name.
|
||||
Then the `output` can reference the `dbConn` struct for the data value. The name `dbConn` can be any name.
|
||||
It's just an example in this case. The `+insertSecretTo` is keyword, it defines the data binding mechanism.
|
||||
|
||||
Now create the Application to consume the data.
|
||||
### Prepare TraitDefinition `service-binding` to do env-secret mapping
|
||||
|
||||
As for data binding in Application, KubeVela recommends defining a trait to finish the job. We have prepared a common
|
||||
trait for convenience. This trait works well for binding resources' info into pod spec Env.
|
||||
|
||||
```yaml
|
||||
apiVersion: core.oam.dev/v1beta1
|
||||
kind: Application
|
||||
kind: TraitDefinition
|
||||
metadata:
|
||||
name: webapp
|
||||
annotations:
|
||||
definition.oam.dev/description: "binding cloud resource secrets to pod env"
|
||||
name: service-binding
|
||||
spec:
|
||||
components:
|
||||
- name: express-server
|
||||
type: webconsumer
|
||||
properties:
|
||||
image: zzxwill/flask-web-application:v0.3.1-crossplane
|
||||
ports: 80
|
||||
dbSecret: db-conn
|
||||
appliesToWorkloads:
|
||||
- webservice
|
||||
- worker
|
||||
schematic:
|
||||
cue:
|
||||
template: |
|
||||
patch: {
|
||||
spec: template: spec: {
|
||||
// +patchKey=name
|
||||
containers: [{
|
||||
name: context.name
|
||||
// +patchKey=name
|
||||
env: [
|
||||
for envName, v in parameter.envMappings {
|
||||
name: envName
|
||||
valueFrom: {
|
||||
secretKeyRef: {
|
||||
name: v.secret
|
||||
if v["key"] != _|_ {
|
||||
key: v.key
|
||||
}
|
||||
if v["key"] == _|_ {
|
||||
key: envName
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
]
|
||||
}]
|
||||
}
|
||||
}
|
||||
|
||||
parameter: {
|
||||
// +usage=The mapping of environment variables to secret
|
||||
envMappings: [string]: [string]: string
|
||||
}
|
||||
|
||||
```
|
||||
|
||||
```shell
|
||||
$ kubectl get application
|
||||
NAME AGE
|
||||
baas-rds 10h
|
||||
webapp 14h
|
||||
With the help of this `service-binding` trait, developers can explicitly set parameter `envMappings` to mapping all
|
||||
environment names with secret key. Here is an example.
|
||||
|
||||
$ kubectl get deployment
|
||||
NAME READY UP-TO-DATE AVAILABLE AGE
|
||||
express-server-v1 1/1 1 1 9h
|
||||
|
||||
$ kubectl port-forward deployment/express-server 80:80
|
||||
```yaml
|
||||
...
|
||||
traits:
|
||||
- type: service-binding
|
||||
properties:
|
||||
envMappings:
|
||||
# environments refer to db-conn secret
|
||||
DB_PASSWORD:
|
||||
secret: db-conn
|
||||
key: password # 1) If the env name is different from secret key, secret key has to be set.
|
||||
endpoint:
|
||||
secret: db-conn # 2) If the env name is the same as the secret key, secret key can be omitted.
|
||||
username:
|
||||
secret: db-conn
|
||||
# environments refer to oss-conn secret
|
||||
BUCKET_NAME:
|
||||
secret: oss-conn
|
||||
key: Bucket
|
||||
...
|
||||
```
|
||||
|
||||
We can see the cloud resource is successfully consumed by the application.
|
||||
|
||||

|
||||
You can see [the end user usage workflow](../end-user/cloud-resources) to know how it used.
|
||||
@@ -571,7 +571,7 @@ context:
|
||||
|
||||
## Dry-Run the `Application`
|
||||
|
||||
When CUE template is good, we can use `vela system dry-run` to dry run and check the rendered resources in real Kubernetes cluster. This command will exactly execute the same render logic in KubeVela's `Application` Controller adn output the result for you.
|
||||
When CUE template is good, we can use `vela system dry-run` to dry run and check the rendered resources in real Kubernetes cluster. This command will exactly execute the same render logic in KubeVela's `Application` Controller and output the result for you.
|
||||
|
||||
First, we need use `mergedef.sh` to merge the definition and cue files.
|
||||
|
||||
@@ -664,3 +664,485 @@ spec:
|
||||
|
||||
---
|
||||
```
|
||||
|
||||
`-d` or `--definitions` is a useful flag permitting user to provide capability
|
||||
definitions used in the application from local files.
|
||||
`dry-run` cmd will prioritize the provided capabilities than the living
|
||||
ones in the cluster.
|
||||
If the capability is not found in local files and cluster, it will raise an error.
|
||||
|
||||
## Live-Diff the `Application`
|
||||
|
||||
`vela system live-diff` allows users to have a preview of what would change if
|
||||
upgrade an application.
|
||||
It basically generates a diff between the specific revision of an application
|
||||
and the result of `vela system dry-run`.
|
||||
The result shows the changes (added/modified/removed/no_change) of the application as well as its sub-resources, such as components and traits.
|
||||
`live-diff` will not make any changes to the living cluster, so it's very
|
||||
helpful if you want to update an application but worry about the unknown results
|
||||
that may be produced.
|
||||
|
||||
Let's prepare an application and deploy it.
|
||||
|
||||
> ComponentDefinitions and TraitDefinitions used in this sample are stored in
|
||||
`./doc/examples/live-diff/definitions`.
|
||||
|
||||
```yaml
|
||||
# app.yaml
|
||||
apiVersion: core.oam.dev/v1beta1
|
||||
kind: Application
|
||||
metadata:
|
||||
name: livediff-demo
|
||||
spec:
|
||||
components:
|
||||
- name: myweb-1
|
||||
type: myworker
|
||||
properties:
|
||||
image: "busybox"
|
||||
cmd:
|
||||
- sleep
|
||||
- "1000"
|
||||
lives: "3"
|
||||
enemies: "alien"
|
||||
traits:
|
||||
- type: myingress
|
||||
properties:
|
||||
domain: "www.example.com"
|
||||
http:
|
||||
"/": 80
|
||||
- type: myscaler
|
||||
properties:
|
||||
replicas: 2
|
||||
- name: myweb-2
|
||||
type: myworker
|
||||
properties:
|
||||
image: "busybox"
|
||||
cmd:
|
||||
- sleep
|
||||
- "1000"
|
||||
lives: "3"
|
||||
enemies: "alien"
|
||||
```
|
||||
|
||||
```shell
|
||||
kubectl apply ./doc/examples/live-diff/definitions
|
||||
kubectl apply ./doc/examples/live-diff/app.yaml
|
||||
```
|
||||
|
||||
Then, assume we want to update the application with below configuration.
|
||||
To preview changes brought by updating while not really apply updated
|
||||
configuration into the cluster, we can use `live-diff` here.
|
||||
|
||||
```yaml
|
||||
# app-updated.yaml
|
||||
apiVersion: core.oam.dev/v1beta1
|
||||
kind: Application
|
||||
metadata:
|
||||
name: livediff-demo
|
||||
spec:
|
||||
components:
|
||||
- name: myweb-1
|
||||
type: myworker
|
||||
properties:
|
||||
image: "busybox"
|
||||
cmd:
|
||||
- sleep
|
||||
- "2000" # change a component property
|
||||
lives: "3"
|
||||
enemies: "alien"
|
||||
traits:
|
||||
- type: myingress
|
||||
properties:
|
||||
domain: "www.example.com"
|
||||
http:
|
||||
"/": 90 # change a trait
|
||||
# - type: myscaler # remove a trait
|
||||
# properties:
|
||||
# replicas: 2
|
||||
- name: myweb-2
|
||||
type: myworker
|
||||
properties: # no change on component property
|
||||
image: "busybox"
|
||||
cmd:
|
||||
- sleep
|
||||
- "1000"
|
||||
lives: "3"
|
||||
enemies: "alien"
|
||||
traits:
|
||||
- type: myingress # add a trait
|
||||
properties:
|
||||
domain: "www.example.com"
|
||||
http:
|
||||
"/": 90
|
||||
- name: myweb-3 # add a component
|
||||
type: myworker
|
||||
properties:
|
||||
image: "busybox"
|
||||
cmd:
|
||||
- sleep
|
||||
- "1000"
|
||||
lives: "3"
|
||||
enemies: "alien"
|
||||
traits:
|
||||
- type: myingress
|
||||
properties:
|
||||
domain: "www.example.com"
|
||||
http:
|
||||
"/": 90
|
||||
```
|
||||
|
||||
```shell
|
||||
vela system live-diff -f ./doc/examples/live-diff/app-modified.yaml -r livediff-demo-v1
|
||||
```
|
||||
|
||||
`-r` or `--revision` is a flag that specifies the name of a living
|
||||
`ApplicationRevision` with which you want to compare the updated application.
|
||||
|
||||
`-c` or `--context` is a flag that specifies the number of lines shown around a
|
||||
change.
|
||||
The unchanged lines which are out of the context of a change will be omitted.
|
||||
It's useful if the diff result contains a lot of unchanged content while
|
||||
you just want to focus on the changed ones.
|
||||
|
||||
<details><summary> Click to view diff result </summary>
|
||||
|
||||
```shell
|
||||
---
|
||||
# Application (application-sample) has been modified(*)
|
||||
---
|
||||
apiVersion: core.oam.dev/v1beta1
|
||||
kind: Application
|
||||
metadata:
|
||||
creationTimestamp: null
|
||||
- name: application-sample
|
||||
+ name: livediff-demo
|
||||
namespace: default
|
||||
spec:
|
||||
components:
|
||||
- name: myweb-1
|
||||
+ properties:
|
||||
+ cmd:
|
||||
+ - sleep
|
||||
+ - "2000"
|
||||
+ enemies: alien
|
||||
+ image: busybox
|
||||
+ lives: "3"
|
||||
+ traits:
|
||||
+ - properties:
|
||||
+ domain: www.example.com
|
||||
+ http:
|
||||
+ /: 90
|
||||
+ type: myingress
|
||||
+ type: myworker
|
||||
+ - name: myweb-2
|
||||
properties:
|
||||
cmd:
|
||||
- sleep
|
||||
- "1000"
|
||||
enemies: alien
|
||||
image: busybox
|
||||
lives: "3"
|
||||
traits:
|
||||
- properties:
|
||||
domain: www.example.com
|
||||
http:
|
||||
- /: 80
|
||||
+ /: 90
|
||||
type: myingress
|
||||
- - properties:
|
||||
- replicas: 2
|
||||
- type: myscaler
|
||||
type: myworker
|
||||
- - name: myweb-2
|
||||
+ - name: myweb-3
|
||||
properties:
|
||||
cmd:
|
||||
- sleep
|
||||
- "1000"
|
||||
enemies: alien
|
||||
image: busybox
|
||||
lives: "3"
|
||||
+ traits:
|
||||
+ - properties:
|
||||
+ domain: www.example.com
|
||||
+ http:
|
||||
+ /: 90
|
||||
+ type: myingress
|
||||
type: myworker
|
||||
status:
|
||||
batchRollingState: ""
|
||||
currentBatch: 0
|
||||
rollingState: ""
|
||||
upgradedReadyReplicas: 0
|
||||
upgradedReplicas: 0
|
||||
|
||||
---
|
||||
## Component (myweb-1) has been modified(*)
|
||||
---
|
||||
apiVersion: core.oam.dev/v1alpha2
|
||||
kind: Component
|
||||
metadata:
|
||||
creationTimestamp: null
|
||||
labels:
|
||||
- app.oam.dev/name: application-sample
|
||||
+ app.oam.dev/name: livediff-demo
|
||||
name: myweb-1
|
||||
spec:
|
||||
workload:
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
labels:
|
||||
app.oam.dev/appRevision: ""
|
||||
app.oam.dev/component: myweb-1
|
||||
- app.oam.dev/name: application-sample
|
||||
+ app.oam.dev/name: livediff-demo
|
||||
workload.oam.dev/type: myworker
|
||||
spec:
|
||||
selector:
|
||||
matchLabels:
|
||||
app.oam.dev/component: myweb-1
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app.oam.dev/component: myweb-1
|
||||
spec:
|
||||
containers:
|
||||
- command:
|
||||
- sleep
|
||||
- - "1000"
|
||||
+ - "2000"
|
||||
image: busybox
|
||||
name: myweb-1
|
||||
status:
|
||||
observedGeneration: 0
|
||||
|
||||
---
|
||||
### Component (myweb-1) / Trait (myingress/ingress) has been modified(*)
|
||||
---
|
||||
apiVersion: networking.k8s.io/v1beta1
|
||||
kind: Ingress
|
||||
metadata:
|
||||
labels:
|
||||
app.oam.dev/appRevision: ""
|
||||
app.oam.dev/component: myweb-1
|
||||
- app.oam.dev/name: application-sample
|
||||
+ app.oam.dev/name: livediff-demo
|
||||
trait.oam.dev/resource: ingress
|
||||
trait.oam.dev/type: myingress
|
||||
name: myweb-1
|
||||
spec:
|
||||
rules:
|
||||
- host: www.example.com
|
||||
http:
|
||||
paths:
|
||||
- backend:
|
||||
serviceName: myweb-1
|
||||
- servicePort: 80
|
||||
+ servicePort: 90
|
||||
path: /
|
||||
|
||||
---
|
||||
### Component (myweb-1) / Trait (myingress/service) has been modified(*)
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
labels:
|
||||
app.oam.dev/appRevision: ""
|
||||
app.oam.dev/component: myweb-1
|
||||
- app.oam.dev/name: application-sample
|
||||
+ app.oam.dev/name: livediff-demo
|
||||
trait.oam.dev/resource: service
|
||||
trait.oam.dev/type: myingress
|
||||
name: myweb-1
|
||||
spec:
|
||||
ports:
|
||||
- - port: 80
|
||||
- targetPort: 80
|
||||
+ - port: 90
|
||||
+ targetPort: 90
|
||||
selector:
|
||||
app.oam.dev/component: myweb-1
|
||||
|
||||
---
|
||||
### Component (myweb-1) / Trait (myscaler/scaler) has been removed(-)
|
||||
---
|
||||
- apiVersion: core.oam.dev/v1alpha2
|
||||
- kind: ManualScalerTrait
|
||||
- metadata:
|
||||
- labels:
|
||||
- app.oam.dev/appRevision: ""
|
||||
- app.oam.dev/component: myweb-1
|
||||
- app.oam.dev/name: application-sample
|
||||
- trait.oam.dev/resource: scaler
|
||||
- trait.oam.dev/type: myscaler
|
||||
- spec:
|
||||
- replicaCount: 2
|
||||
|
||||
---
|
||||
## Component (myweb-2) has been modified(*)
|
||||
---
|
||||
apiVersion: core.oam.dev/v1alpha2
|
||||
kind: Component
|
||||
metadata:
|
||||
creationTimestamp: null
|
||||
labels:
|
||||
- app.oam.dev/name: application-sample
|
||||
+ app.oam.dev/name: livediff-demo
|
||||
name: myweb-2
|
||||
spec:
|
||||
workload:
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
labels:
|
||||
app.oam.dev/appRevision: ""
|
||||
app.oam.dev/component: myweb-2
|
||||
- app.oam.dev/name: application-sample
|
||||
+ app.oam.dev/name: livediff-demo
|
||||
workload.oam.dev/type: myworker
|
||||
spec:
|
||||
selector:
|
||||
matchLabels:
|
||||
app.oam.dev/component: myweb-2
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app.oam.dev/component: myweb-2
|
||||
spec:
|
||||
containers:
|
||||
- command:
|
||||
- sleep
|
||||
- "1000"
|
||||
image: busybox
|
||||
name: myweb-2
|
||||
status:
|
||||
observedGeneration: 0
|
||||
|
||||
---
|
||||
### Component (myweb-2) / Trait (myingress/ingress) has been added(+)
|
||||
---
|
||||
+ apiVersion: networking.k8s.io/v1beta1
|
||||
+ kind: Ingress
|
||||
+ metadata:
|
||||
+ labels:
|
||||
+ app.oam.dev/appRevision: ""
|
||||
+ app.oam.dev/component: myweb-2
|
||||
+ app.oam.dev/name: livediff-demo
|
||||
+ trait.oam.dev/resource: ingress
|
||||
+ trait.oam.dev/type: myingress
|
||||
+ name: myweb-2
|
||||
+ spec:
|
||||
+ rules:
|
||||
+ - host: www.example.com
|
||||
+ http:
|
||||
+ paths:
|
||||
+ - backend:
|
||||
+ serviceName: myweb-2
|
||||
+ servicePort: 90
|
||||
+ path: /
|
||||
|
||||
---
|
||||
### Component (myweb-2) / Trait (myingress/service) has been added(+)
|
||||
---
|
||||
+ apiVersion: v1
|
||||
+ kind: Service
|
||||
+ metadata:
|
||||
+ labels:
|
||||
+ app.oam.dev/appRevision: ""
|
||||
+ app.oam.dev/component: myweb-2
|
||||
+ app.oam.dev/name: livediff-demo
|
||||
+ trait.oam.dev/resource: service
|
||||
+ trait.oam.dev/type: myingress
|
||||
+ name: myweb-2
|
||||
+ spec:
|
||||
+ ports:
|
||||
+ - port: 90
|
||||
+ targetPort: 90
|
||||
+ selector:
|
||||
+ app.oam.dev/component: myweb-2
|
||||
|
||||
---
|
||||
## Component (myweb-3) has been added(+)
|
||||
---
|
||||
+ apiVersion: core.oam.dev/v1alpha2
|
||||
+ kind: Component
|
||||
+ metadata:
|
||||
+ creationTimestamp: null
|
||||
+ labels:
|
||||
+ app.oam.dev/name: livediff-demo
|
||||
+ name: myweb-3
|
||||
+ spec:
|
||||
+ workload:
|
||||
+ apiVersion: apps/v1
|
||||
+ kind: Deployment
|
||||
+ metadata:
|
||||
+ labels:
|
||||
+ app.oam.dev/appRevision: ""
|
||||
+ app.oam.dev/component: myweb-3
|
||||
+ app.oam.dev/name: livediff-demo
|
||||
+ workload.oam.dev/type: myworker
|
||||
+ spec:
|
||||
+ selector:
|
||||
+ matchLabels:
|
||||
+ app.oam.dev/component: myweb-3
|
||||
+ template:
|
||||
+ metadata:
|
||||
+ labels:
|
||||
+ app.oam.dev/component: myweb-3
|
||||
+ spec:
|
||||
+ containers:
|
||||
+ - command:
|
||||
+ - sleep
|
||||
+ - "1000"
|
||||
+ image: busybox
|
||||
+ name: myweb-3
|
||||
+ status:
|
||||
+ observedGeneration: 0
|
||||
|
||||
---
|
||||
### Component (myweb-3) / Trait (myingress/ingress) has been added(+)
|
||||
---
|
||||
+ apiVersion: networking.k8s.io/v1beta1
|
||||
+ kind: Ingress
|
||||
+ metadata:
|
||||
+ labels:
|
||||
+ app.oam.dev/appRevision: ""
|
||||
+ app.oam.dev/component: myweb-3
|
||||
+ app.oam.dev/name: livediff-demo
|
||||
+ trait.oam.dev/resource: ingress
|
||||
+ trait.oam.dev/type: myingress
|
||||
+ name: myweb-3
|
||||
+ spec:
|
||||
+ rules:
|
||||
+ - host: www.example.com
|
||||
+ http:
|
||||
+ paths:
|
||||
+ - backend:
|
||||
+ serviceName: myweb-3
|
||||
+ servicePort: 90
|
||||
+ path: /
|
||||
|
||||
---
|
||||
### Component (myweb-3) / Trait (myingress/service) has been added(+)
|
||||
---
|
||||
+ apiVersion: v1
|
||||
+ kind: Service
|
||||
+ metadata:
|
||||
+ labels:
|
||||
+ app.oam.dev/appRevision: ""
|
||||
+ app.oam.dev/component: myweb-3
|
||||
+ app.oam.dev/name: livediff-demo
|
||||
+ trait.oam.dev/resource: service
|
||||
+ trait.oam.dev/type: myingress
|
||||
+ name: myweb-3
|
||||
+ spec:
|
||||
+ ports:
|
||||
+ - port: 90
|
||||
+ targetPort: 90
|
||||
+ selector:
|
||||
+ app.oam.dev/component: myweb-3
|
||||
```
|
||||
|
||||
</details>
|
||||
|
||||
@@ -1,14 +1,12 @@
|
||||
---
|
||||
title: Definition CRD
|
||||
title: Programmable Building Blocks
|
||||
---
|
||||
|
||||
This documentation explains `ComponentDefinition` and `TraitDefinition` in detail.
|
||||
|
||||
> All definition objects are expected to be maintained and installed by platform team, think them as *capability providers* in your platform.
|
||||
|
||||
## Overview
|
||||
|
||||
Essentially, a definition object in KubeVela is consisted by three section:
|
||||
Essentially, a definition object in KubeVela is a programmable build block. A definition object normally includes several information to model a certain platform capability that would used in further application deployment:
|
||||
- **Capability Indicator**
|
||||
- `ComponentDefinition` uses `spec.workload` to indicate the workload type of this component.
|
||||
- `TraitDefinition` uses `spec.definitionRef` to indicate the provider of this trait.
|
||||
@@ -132,7 +130,7 @@ Please take care of this field, it's really important and useful for serious lar
|
||||
|
||||
### Capability Encapsulation and Abstraction
|
||||
|
||||
The templating and parameterizing of given capability are defined in `spec.schematic` field. For example, below is the full definition of *Web Service* type in KubeVela:
|
||||
The programmable template of given capability are defined in `spec.schematic` field. For example, below is the full definition of *Web Service* type in KubeVela:
|
||||
|
||||
<details>
|
||||
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
---
|
||||
title: Defining KEDA as Autoscaling Trait
|
||||
title: KEDA as Autoscaling Trait
|
||||
---
|
||||
|
||||
> Before continue, make sure you have learned about the concepts of [Definition Objects](definition-and-templates) and [Defining Traits with CUE](/docs/cue/trait) section.
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
---
|
||||
title: Generate Forms from Definitions
|
||||
title: Generating UI Forms
|
||||
---
|
||||
|
||||
For any capabilities installed via [Definition Objects](./definition-and-templates),
|
||||
@@ -60,7 +60,7 @@ You can render above schema into a form by [form-render](https://github.com/alib
|
||||
|
||||
Below is a form rendered with `form-render`:
|
||||
|
||||

|
||||

|
||||
|
||||
# What's Next
|
||||
|
||||
|
||||
@@ -1,20 +1,12 @@
|
||||
---
|
||||
title: Overview
|
||||
title: Resource Model
|
||||
---
|
||||
|
||||
This documentation will explain what is `Application` object and why you need it.
|
||||
|
||||
## Motivation
|
||||
|
||||
Encapsulation based abstraction is probably the mostly widely used approach to enable easier developer experience and allow users to deliver the whole application resources as one unit. For example, many tools today encapsulate Kubernetes *Deployment* and *Service* into a *Web Service* module, and then instantiate this module by simply providing parameters such as *image=foo* and *ports=80*. This pattern can be found in cdk8s (e.g. [`web-service.ts` ](https://github.com/awslabs/cdk8s/blob/master/examples/typescript/web-service/web-service.ts)), CUE (e.g. [`kube.cue`](https://github.com/cuelang/cue/blob/b8b489251a3f9ea318830788794c1b4a753031c0/doc/tutorial/kubernetes/quick/services/kube.cue#L70)), and many widely used Helm charts (e.g. [Web Service](https://docs.bitnami.com/tutorials/create-your-first-helm-chart/)).
|
||||
|
||||
Despite the efficiency and extensibility in defining abstractions, both DSL tools (e.g. cdk8s , CUE and Helm templating) are mostly used as client side tools and can be barely used as a platform level building block. This leaves platform builders either have to create restricted/inextensible abstractions, or re-invent the wheels of what DSL/templating has already been doing great.
|
||||
|
||||
KubeVela allows platform teams to create developer-centric abstractions with DSL/templating but maintain them with the battle tested [Kubernetes Control Loop](https://kubernetes.io/docs/concepts/architecture/controller/).
|
||||
This documentation will explain the core resource model of KubeVela which is fully powered by Open Application Model (OAM).
|
||||
|
||||
## Application
|
||||
|
||||
First of all, KubeVela introduces an `Application` CRD as its main abstraction that could capture a full application deployment. To model the modern microservices, every application is composed by multiple components with attached traits (operational behaviors). For example:
|
||||
KubeVela introduces an `Application` CRD as its main API that captures a full application deployment. Every application is composed by multiple components with attachable operational behaviors (traits). For example:
|
||||
|
||||
```yaml
|
||||
apiVersion: core.oam.dev/v1beta1
|
||||
@@ -43,29 +35,168 @@ spec:
|
||||
bucket: "my-bucket"
|
||||
```
|
||||
|
||||
The schema of *component* and *trait* specification in this application is actually enforced by another set of building block objects named *"definitions"*, for example, `ComponentDefinition` and `TraitDefinition`.
|
||||
Though the application object doesn't have fixed schema, it is a composition object assembled by several *programmable building blocks* as shown below.
|
||||
|
||||
`XxxDefinition` resources are designed to leverage encapsulation solutions such as `CUE`, `Helm` and `Terraform modules` to template and parameterize Kubernetes resources as well as cloud services. This enables users to assemble templated capabilities into an `Application` by simply setting parameters. In the `application-sample` above, it models a Kubernetes Deployment (component `foo`) to run container and a Alibaba Cloud OSS bucket (component `bar`) alongside.
|
||||
## Component
|
||||
|
||||
This abstraction mechanism is the key for KubeVela to provide *PaaS-like* experience (*i.e. app-centric, higher level abstractions, self-service operations etc*) to end users, with benefits highlighted as below.
|
||||
The component model in KubeVela is designed to allow *component providers* to encapsulate deployable/provisionable entities by leveraging widely adopted tools such as CUE, Helm etc, and give a easier path to developers to deploy complicated microservices with ease.
|
||||
|
||||
### No "Juggling" Approach to Manage Kubernetes Objects
|
||||
Templates based encapsulation is probably the mostly widely used approach to enable efficient application deployment and exposes easier interfaces to end users. For example, many tools today encapsulate Kubernetes *Deployment* and *Service* into a *Web Service* module, and then instantiate this module by simply providing parameters such as *image=foo* and *ports=80*. This pattern can be found in cdk8s (e.g. [`web-service.ts` ](https://github.com/awslabs/cdk8s/blob/master/examples/typescript/web-service/web-service.ts)), CUE (e.g. [`kube.cue`](https://github.com/cuelang/cue/blob/b8b489251a3f9ea318830788794c1b4a753031c0/doc/tutorial/kubernetes/quick/services/kube.cue#L70)), and many widely used Helm charts (e.g. [Web Service](https://docs.bitnami.com/tutorials/create-your-first-helm-chart/)).
|
||||
|
||||
For example, as the platform team we want to leverage Istio as the Service Mesh layer to control the traffic to certain `Deployment` instances. But this could be really painful today because we have to enforce end users to define and manage a set of Kubernetes resources in a "juggling" approach. For example, in a simple canary rollout case, the end users have to carefully manage a primary *Deployment*, a primary *Service*, a *root Service*, a canary *Deployment*, a canary *Service*, and have to probably rename the *Deployment* instance after canary promotion (this is actually unacceptable in production because renaming will lead to the app restart). What's worse, we have to expect the users properly set the labels and selectors on those objects carefully because they are the key to ensure proper accessibility of every app instance and the only revision mechanism our Istio controller could count on.
|
||||
> Hence, a components provider could be anyone who packages software components in form of Helm chart of CUE modules. Think about 3rd-party software distributor, DevOps team, or even your CI pipeline.
|
||||
|
||||
The issue above could be even painful if the component instance is not *Deployment*, but *StatefulSet* or custom workload type. For example, normally it doesn't make sense to replicate a *StatefulSet* instance during rollout, this means the users have to maintain the name, revision, label, selector, app instances in a totally different approach from *Deployment*.
|
||||
In above example, it describes an application composed with Kubernetes stateless workload (component `foo`) and a Alibaba Cloud OSS bucket (component `bar`) alongside.
|
||||
|
||||
#### Standard Contract Behind The Abstraction
|
||||
### How it Works?
|
||||
|
||||
KubeVela is designed to relieve such burden of managing versionized Kubernetes resources manually. In nutshell, all the needed Kubernetes resources for an app are now encapsulated in a single abstraction, and KubeVela will maintain the instance name, revisions, labels and selector by the battle tested reconcile loop automation, not by human hand. At the meantime, the existence of definition objects allow the platform team to customize the details of all above metadata behind the abstraction, even control the behavior of how to do revision.
|
||||
In above example, `type: worker` means the specification of this component (claimed in following `properties` section) will be enforced by a `ComponentDefinition` object named `worker` as below:
|
||||
|
||||
Thus, all those metadata now become a standard contract that any "day 2" operation controller such as Istio or rollout can rely on. This is the key to ensure our platform could provide user friendly experience but keep "transparent" to the operational behaviors.
|
||||
```yaml
|
||||
apiVersion: core.oam.dev/v1beta1
|
||||
kind: ComponentDefinition
|
||||
metadata:
|
||||
name: worker
|
||||
annotations:
|
||||
definition.oam.dev/description: "Describes long-running, scalable, containerized services that running at backend. They do NOT have network endpoint to receive external network traffic."
|
||||
spec:
|
||||
workload:
|
||||
definition:
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
schematic:
|
||||
cue:
|
||||
template: |
|
||||
output: {
|
||||
apiVersion: "apps/v1"
|
||||
kind: "Deployment"
|
||||
spec: {
|
||||
selector: matchLabels: {
|
||||
"app.oam.dev/component": context.name
|
||||
}
|
||||
template: {
|
||||
metadata: labels: {
|
||||
"app.oam.dev/component": context.name
|
||||
}
|
||||
spec: {
|
||||
containers: [{
|
||||
name: context.name
|
||||
image: parameter.image
|
||||
|
||||
### No Configuration Drift
|
||||
if parameter["cmd"] != _|_ {
|
||||
command: parameter.cmd
|
||||
}
|
||||
}]
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
parameter: {
|
||||
image: string
|
||||
cmd?: [...string]
|
||||
}
|
||||
```
|
||||
|
||||
Light-weighted and flexible in defining abstractions, any of the existing encapsulation solutions today work at client side, for example, DSL/IaC (Infrastructure as Code) tools and Helm. This approach is easy to be adopted and has less invasion in the user cluster.
|
||||
|
||||
But client side abstractions always lead to an issue called *Infrastructure/Configuration Drift*, i.e. the generated component instances are not in line with the expected configuration. This could be caused by incomplete coverage, less-than-perfect processes or emergency changes.
|
||||
Hence, the `properties` section of `backend` only exposes two parameters to fill: `image` and `cmd`, this is enforced by the `parameter` list of the `.spec.template` field of the definition.
|
||||
|
||||
Hence, all abstractions in KubeVela is designed to be maintained with [Kubernetes Control Loop](https://kubernetes.io/docs/concepts/architecture/controller/) and leverage Kubernetes control plane to eliminate the issue of configuration drifting, and still keeps the flexibly and velocity enabled by existing encapsulation solutions (e.g. DSL/IaC and templating).
|
||||
|
||||
## Traits
|
||||
|
||||
Traits are operational features that can be attached to component per needs. Traits are normally considered as platform features and maintained by platform team. In the above example, `type: autoscaler` in `frontend` means the specification (i.e. `properties` section)
|
||||
of this trait will be enforced by a `TraitDefinition` object named `autoscaler` as below:
|
||||
|
||||
```yaml
|
||||
apiVersion: core.oam.dev/v1beta1
|
||||
kind: TraitDefinition
|
||||
metadata:
|
||||
annotations:
|
||||
definition.oam.dev/description: "configure k8s HPA for Deployment"
|
||||
name: hpa
|
||||
spec:
|
||||
appliesToWorkloads:
|
||||
- webservice
|
||||
- worker
|
||||
schematic:
|
||||
cue:
|
||||
template: |
|
||||
outputs: hpa: {
|
||||
apiVersion: "autoscaling/v2beta2"
|
||||
kind: "HorizontalPodAutoscaler"
|
||||
metadata: name: context.name
|
||||
spec: {
|
||||
scaleTargetRef: {
|
||||
apiVersion: "apps/v1"
|
||||
kind: "Deployment"
|
||||
name: context.name
|
||||
}
|
||||
minReplicas: parameter.min
|
||||
maxReplicas: parameter.max
|
||||
metrics: [{
|
||||
type: "Resource"
|
||||
resource: {
|
||||
name: "cpu"
|
||||
target: {
|
||||
type: "Utilization"
|
||||
averageUtilization: parameter.cpuUtil
|
||||
}
|
||||
}
|
||||
}]
|
||||
}
|
||||
}
|
||||
parameter: {
|
||||
min: *1 | int
|
||||
max: *10 | int
|
||||
cpuUtil: *50 | int
|
||||
}
|
||||
```
|
||||
|
||||
The application also have a `sidecar` trait.
|
||||
|
||||
```yaml
|
||||
apiVersion: core.oam.dev/v1beta1
|
||||
kind: TraitDefinition
|
||||
metadata:
|
||||
annotations:
|
||||
definition.oam.dev/description: "add sidecar to the app"
|
||||
name: sidecar
|
||||
spec:
|
||||
appliesToWorkloads:
|
||||
- webservice
|
||||
- worker
|
||||
schematic:
|
||||
cue:
|
||||
template: |-
|
||||
patch: {
|
||||
// +patchKey=name
|
||||
spec: template: spec: containers: [parameter]
|
||||
}
|
||||
parameter: {
|
||||
name: string
|
||||
image: string
|
||||
command?: [...string]
|
||||
}
|
||||
```
|
||||
|
||||
Please note that the end users of KubeVela do NOT need to know about definition objects, they learn how to use a given capability with visualized forms (or the JSON schema of parameters if they prefer). Please check the [Generate Forms from Definitions](/docs/platform-engineers/openapi-v3-json-schema) section about how this is achieved.
|
||||
|
||||
## Standard Contract Behind The Abstractions
|
||||
|
||||
Once the application is deployed, KubeVela will index and manage the underlying instances with name, revisions, labels and selector etc in automatic approach. These metadata are shown as below.
|
||||
|
||||
| Label | Description |
|
||||
| :--: | :---------: |
|
||||
|`workload.oam.dev/type=<component definition name>` | The name of its corresponding `ComponentDefinition` |
|
||||
|`trait.oam.dev/type=<trait definition name>` | The name of its corresponding `TraitDefinition` |
|
||||
|`app.oam.dev/name=<app name>` | The name of the application it belongs to |
|
||||
|`app.oam.dev/component=<component name>` | The name of the component it belongs to |
|
||||
|`trait.oam.dev/resource=<name of trait resource instance>` | The name of trait resource instance |
|
||||
|`app.oam.dev/appRevision=<name of app revision>` | The name of the application revision it belongs to |
|
||||
|
||||
|
||||
Consider these metadata as a standard contract for any "day 2" operation controller such as rollout controller to work on KubeVela deployed applications. This is the key to ensure the interoperability for KubeVela based platform as well.
|
||||
|
||||
## No Configuration Drift
|
||||
|
||||
Despite the efficiency and extensibility in abstracting application deployment, IaC (Infrastructure-as-Code) tools may lead to an issue called *Infrastructure/Configuration Drift*, i.e. the generated component instances are not in line with the expected configuration. This could be caused by incomplete coverage, less-than-perfect processes or emergency changes. This makes them can be barely used as a platform level building block.
|
||||
|
||||
Hence, KubeVela is designed to maintain all these programmable capabilities with [Kubernetes Control Loop](https://kubernetes.io/docs/concepts/architecture/controller/) and leverage Kubernetes control plane to eliminate the issue of configuration drifting, while still keeps the flexibly and velocity enabled by IaC.
|
||||
|
||||
|
Before Width: | Height: | Size: 82 KiB After Width: | Height: | Size: 82 KiB |
|
Before Width: | Height: | Size: 94 KiB After Width: | Height: | Size: 94 KiB |
@@ -1,16 +1,16 @@
|
||||
---
|
||||
title: Multi-Cluster Application Deployment
|
||||
title: Multi-Cluster Deployment
|
||||
---
|
||||
|
||||
# Introduction
|
||||
## Introduction
|
||||
|
||||
Modern application infrastructure involves multiple clusters to ensure high availability and maximize service throughput. In this section, we will introduce how to use KubeVela to achieve application deployment across multiple clusters with following features supported:
|
||||
- Rolling Upgrade: To continuously deploy apps requires to rollout in a safe manner which usually involves step by step rollout batches and analysis.
|
||||
- Traffic shifting: When rolling upgrade an app, it needs to split the traffic onto both the old and new revisions to verify the new version while preserving service availability.
|
||||
|
||||
## AppDeployment CRD
|
||||
### AppDeployment
|
||||
|
||||
The `AppDeployment` CRD has been provided to satisfy such requirements. Here's an overview of the API:
|
||||
The `AppDeployment` API in KubeVela is provided to satisfy such requirements. Here's an overview of the API:
|
||||
|
||||
```yaml
|
||||
apiVersion: core.oam.dev/v1beta1
|
||||
@@ -72,7 +72,7 @@ spec:
|
||||
replicas: 5
|
||||
```
|
||||
|
||||
## Cluster CRD
|
||||
### Cluster
|
||||
|
||||
The clusters selected in the `placement` part from above is defined in Cluster CRD. Here's what it looks like:
|
||||
|
||||
@@ -99,7 +99,7 @@ data:
|
||||
config: ... # kubeconfig data
|
||||
```
|
||||
|
||||
# Quickstart
|
||||
## Quickstart
|
||||
|
||||
Here's a step-by-step tutorial for you to try out. All of the yaml files are from [`docs/examples/appdeployment/`](https://github.com/oam-dev/kubevela/tree/master/docs/examples/appdeployment).
|
||||
You must run all commands in that directory.
|
||||
|
||||
@@ -25,7 +25,7 @@ We design KubeVela rollout solutions with the following principles in mind
|
||||
in a production environment including Blue/Green, Canary and A/B testing.
|
||||
|
||||
|
||||
## AppRollout Example
|
||||
## AppRollout
|
||||
Here is a simple `AppRollout` that upgrade an application from v1 to v2 in three batches. The
|
||||
first batch contains only 1 pod while the rest of the batches split the rest.
|
||||
|
||||
@@ -48,218 +48,113 @@ spec:
|
||||
batchPartition: 1
|
||||
```
|
||||
|
||||
## User Experience Workflow
|
||||
## User Workflow
|
||||
Here is the end to end user experience based on [CloneSet](https://openkruise.io/en-us/docs/cloneset.html)
|
||||
|
||||
1. Install CloneSet and create a `ComponentDefinition` for it.
|
||||
```shell
|
||||
helm install kruise https://github.com/openkruise/kruise/releases/download/v0.7.0/kruise-chart.tgz
|
||||
```
|
||||
|
||||
```yaml
|
||||
apiVersion: core.oam.dev/v1beta1
|
||||
kind: ComponentDefinition
|
||||
metadata:
|
||||
name: clonesetservice
|
||||
namespace: vela-system
|
||||
annotations:
|
||||
definition.oam.dev/description: "Describes long-running, scalable, containerized services that have a stable network endpoint to receive external network traffic from customers.
|
||||
If workload type is skipped for any service defined in Appfile, it will be defaulted to `webservice` type."
|
||||
spec:
|
||||
workload:
|
||||
definition:
|
||||
apiVersion: apps.kruise.io/v1alpha1
|
||||
kind: CloneSet
|
||||
schematic:
|
||||
cue:
|
||||
template: |
|
||||
output: {
|
||||
apiVersion: "apps.kruise.io/v1alpha1"
|
||||
kind: "CloneSet"
|
||||
metadata: labels: {
|
||||
"app.oam.dev/component": context.name
|
||||
}
|
||||
spec: {
|
||||
if parameter["replicas"] != _|_ {
|
||||
replicas: parameter.replicas
|
||||
}
|
||||
selector: matchLabels: {
|
||||
"app.oam.dev/component": context.name
|
||||
}
|
||||
1. Install CloneSet and its `ComponentDefinition`.
|
||||
|
||||
template: {
|
||||
metadata: labels: {
|
||||
"app.oam.dev/component": context.name
|
||||
}
|
||||
Since CloneSet is an customized workload for Kubernetes, we need to install its controller and component definition manually to KubeVela platform.
|
||||
|
||||
spec: {
|
||||
containers: [{
|
||||
name: context.name
|
||||
image: parameter.image
|
||||
|
||||
if parameter["cmd"] != _|_ {
|
||||
command: parameter.cmd
|
||||
}
|
||||
|
||||
if parameter["env"] != _|_ {
|
||||
env: parameter.env
|
||||
}
|
||||
|
||||
if context["config"] != _|_ {
|
||||
env: context.config
|
||||
}
|
||||
|
||||
ports: [{
|
||||
containerPort: parameter.port
|
||||
}]
|
||||
|
||||
if parameter["cpu"] != _|_ {
|
||||
resources: {
|
||||
limits:
|
||||
cpu: parameter.cpu
|
||||
requests:
|
||||
cpu: parameter.cpu
|
||||
}
|
||||
}
|
||||
}]
|
||||
}
|
||||
}
|
||||
if parameter["updateStrategyType"] != _|_ {
|
||||
updateStrategy: {
|
||||
type: parameter.updateStrategyType
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
parameter: {
|
||||
// +usage=Which image would you like to use for your service
|
||||
// +short=i
|
||||
image: string
|
||||
|
||||
// +usage=Commands to run in the container
|
||||
cmd?: [...string]
|
||||
|
||||
// +usage=Which port do you want customer traffic sent to
|
||||
// +short=p
|
||||
port: *80 | int
|
||||
// +usage=Define arguments by using environment variables
|
||||
env?: [...{
|
||||
// +usage=Environment variable name
|
||||
name: string
|
||||
// +usage=The value of the environment variable
|
||||
value?: string
|
||||
// +usage=Specifies a source the value of this var should come from
|
||||
valueFrom?: {
|
||||
// +usage=Selects a key of a secret in the pod's namespace
|
||||
secretKeyRef: {
|
||||
// +usage=The name of the secret in the pod's namespace to select from
|
||||
name: string
|
||||
// +usage=The key of the secret to select from. Must be a valid secret key
|
||||
key: string
|
||||
}
|
||||
}
|
||||
}]
|
||||
// +usage=Number of CPU units for the service, like `0.5` (0.5 CPU core), `1` (1 CPU core)
|
||||
cpu?: string
|
||||
// +usage=Cloneset updateStrategy, candidates are `ReCreate`/`InPlaceIfPossible`/`InPlaceOnly`
|
||||
updateStrategyType?: string
|
||||
// +usage=Number of pods in the cloneset
|
||||
replicas?: int
|
||||
}
|
||||
```shell
|
||||
helm install kruise https://github.com/openkruise/kruise/releases/download/v0.7.0/kruise-chart.tgz
|
||||
```
|
||||
|
||||
2. Apply an application to the cluster
|
||||
```yaml
|
||||
apiVersion: core.oam.dev/v1beta1
|
||||
kind: Application
|
||||
metadata:
|
||||
name: test-rolling
|
||||
annotations:
|
||||
"app.oam.dev/rolling-components": "metrics-provider"
|
||||
"app.oam.dev/rollout-template": "true"
|
||||
spec:
|
||||
components:
|
||||
- name: metrics-provider
|
||||
type: clonesetservice
|
||||
properties:
|
||||
cmd:
|
||||
- ./podinfo
|
||||
- stress-cpu=1
|
||||
image: stefanprodan/podinfo:4.0.6
|
||||
port: 8080
|
||||
updateStrategyType: InPlaceIfPossible
|
||||
replicas: 5
|
||||
```
|
||||
```shell
|
||||
kubectl apply -f https://raw.githubusercontent.com/oam-dev/kubevela/master/docs/examples/cloneset-rollout/clonesetDefinition.yaml
|
||||
```
|
||||
|
||||
3. Apply the following rollout to upgrade the application to v1
|
||||
```yaml
|
||||
apiVersion: core.oam.dev/v1beta1
|
||||
kind: AppRollout
|
||||
metadata:
|
||||
name: rolling-example
|
||||
spec:
|
||||
# application (revision) reference
|
||||
targetAppRevisionName: test-rolling-v1
|
||||
componentList:
|
||||
- metrics-provider
|
||||
rolloutPlan:
|
||||
rolloutStrategy: "IncreaseFirst"
|
||||
rolloutBatches:
|
||||
- replicas: 10%
|
||||
- replicas: 40%
|
||||
- replicas: 50%
|
||||
```
|
||||
Use can check the status of the ApplicationRollout and wait for the rollout to complete.
|
||||
2. Deploy application to the cluster
|
||||
```yaml
|
||||
apiVersion: core.oam.dev/v1beta1
|
||||
kind: Application
|
||||
metadata:
|
||||
name: test-rolling
|
||||
annotations:
|
||||
"app.oam.dev/rolling-components": "metrics-provider"
|
||||
"app.oam.dev/rollout-template": "true"
|
||||
spec:
|
||||
components:
|
||||
- name: metrics-provider
|
||||
type: clonesetservice
|
||||
properties:
|
||||
cmd:
|
||||
- ./podinfo
|
||||
- stress-cpu=1
|
||||
image: stefanprodan/podinfo:4.0.6
|
||||
port: 8080
|
||||
updateStrategyType: InPlaceIfPossible
|
||||
replicas: 5
|
||||
```
|
||||
|
||||
3. Attach the following rollout plan to upgrade the application to v1
|
||||
```yaml
|
||||
apiVersion: core.oam.dev/v1beta1
|
||||
kind: AppRollout
|
||||
metadata:
|
||||
name: rolling-example
|
||||
spec:
|
||||
# application (revision) reference
|
||||
targetAppRevisionName: test-rolling-v1
|
||||
componentList:
|
||||
- metrics-provider
|
||||
rolloutPlan:
|
||||
rolloutStrategy: "IncreaseFirst"
|
||||
rolloutBatches:
|
||||
- replicas: 10%
|
||||
- replicas: 40%
|
||||
- replicas: 50%
|
||||
```
|
||||
Use can check the status of the ApplicationRollout and wait for the rollout to complete.
|
||||
|
||||
4. User can continue to modify the application image tag and apply
|
||||
```yaml
|
||||
apiVersion: core.oam.dev/v1beta1
|
||||
kind: Application
|
||||
metadata:
|
||||
name: test-rolling
|
||||
annotations:
|
||||
"app.oam.dev/rolling-components": "metrics-provider"
|
||||
"app.oam.dev/rollout-template": "true"
|
||||
spec:
|
||||
components:
|
||||
- name: metrics-provider
|
||||
type: clonesetservice
|
||||
properties:
|
||||
cmd:
|
||||
- ./podinfo
|
||||
- stress-cpu=1
|
||||
image: stefanprodan/podinfo:5.0.2
|
||||
port: 8080
|
||||
updateStrategyType: InPlaceIfPossible
|
||||
replicas: 5
|
||||
```
|
||||
```yaml
|
||||
apiVersion: core.oam.dev/v1beta1
|
||||
kind: Application
|
||||
metadata:
|
||||
name: test-rolling
|
||||
annotations:
|
||||
"app.oam.dev/rolling-components": "metrics-provider"
|
||||
"app.oam.dev/rollout-template": "true"
|
||||
spec:
|
||||
components:
|
||||
- name: metrics-provider
|
||||
type: clonesetservice
|
||||
properties:
|
||||
cmd:
|
||||
- ./podinfo
|
||||
- stress-cpu=1
|
||||
image: stefanprodan/podinfo:5.0.2
|
||||
port: 8080
|
||||
updateStrategyType: InPlaceIfPossible
|
||||
replicas: 5
|
||||
```
|
||||
|
||||
5. Apply the application rollout that upgrade the application from v1 to v2
|
||||
```yaml
|
||||
apiVersion: core.oam.dev/v1beta1
|
||||
kind: AppRollout
|
||||
metadata:
|
||||
name: rolling-example
|
||||
spec:
|
||||
# application (revision) reference
|
||||
sourceAppRevisionName: test-rolling-v1
|
||||
targetAppRevisionName: test-rolling-v2
|
||||
componentList:
|
||||
- metrics-provider
|
||||
rolloutPlan:
|
||||
rolloutStrategy: "IncreaseFirst"
|
||||
rolloutBatches:
|
||||
- replicas: 1
|
||||
- replicas: 2
|
||||
- replicas: 2
|
||||
```
|
||||
User can check the status of the ApplicationRollout and see the rollout completes, and the
|
||||
ApplicationRollout's "Rolling State" becomes `rolloutSucceed`
|
||||
```yaml
|
||||
apiVersion: core.oam.dev/v1beta1
|
||||
kind: AppRollout
|
||||
metadata:
|
||||
name: rolling-example
|
||||
spec:
|
||||
# application (revision) reference
|
||||
sourceAppRevisionName: test-rolling-v1
|
||||
targetAppRevisionName: test-rolling-v2
|
||||
componentList:
|
||||
- metrics-provider
|
||||
rolloutPlan:
|
||||
rolloutStrategy: "IncreaseFirst"
|
||||
rolloutBatches:
|
||||
- replicas: 1
|
||||
- replicas: 2
|
||||
- replicas: 2
|
||||
```
|
||||
User can check the status of the ApplicationRollout and see the rollout completes, and the
|
||||
ApplicationRollout's "Rolling State" becomes `rolloutSucceed`
|
||||
|
||||
## State Transition
|
||||
Here is the high level state transition graph
|
||||
|
||||

|
||||

|
||||
|
||||
## Roadmap
|
||||
|
||||
|
||||
38
docs/examples/app-with-volumes/app-websvc-volumes.yaml
Normal file
38
docs/examples/app-with-volumes/app-websvc-volumes.yaml
Normal file
@@ -0,0 +1,38 @@
|
||||
apiVersion: core.oam.dev/v1beta1
|
||||
kind: Application
|
||||
metadata:
|
||||
name: app-webservice
|
||||
spec:
|
||||
components:
|
||||
- name: mywebsvc
|
||||
type: webservice
|
||||
properties:
|
||||
image: "busybox"
|
||||
cmd:
|
||||
- sleep
|
||||
- "1000"
|
||||
volumes:
|
||||
- name: "my-pvc"
|
||||
mountPath: "/var/www/html1"
|
||||
type: "pvc"
|
||||
claimName: "myclaim"
|
||||
- name: "my-cm"
|
||||
mountPath: "/var/www/html2"
|
||||
type: "configMap"
|
||||
cmName: "myCmName"
|
||||
items:
|
||||
- key: "k1"
|
||||
path: "./a1"
|
||||
- key: "k2"
|
||||
path: "./a2"
|
||||
- name: "my-cm-noitems"
|
||||
mountPath: "/var/www/html22"
|
||||
type: "configMap"
|
||||
cmName: "myCmName2"
|
||||
- name: "mysecret"
|
||||
type: "secret"
|
||||
mountPath: "/var/www/html3"
|
||||
secretName: "mysecret"
|
||||
- name: "my-empty-dir"
|
||||
type: "emptyDir"
|
||||
mountPath: "/var/www/html4"
|
||||
38
docs/examples/app-with-volumes/app-worker-volumes.yaml
Normal file
38
docs/examples/app-with-volumes/app-worker-volumes.yaml
Normal file
@@ -0,0 +1,38 @@
|
||||
apiVersion: core.oam.dev/v1beta1
|
||||
kind: Application
|
||||
metadata:
|
||||
name: app-worker
|
||||
spec:
|
||||
components:
|
||||
- name: myworker
|
||||
type: worker
|
||||
properties:
|
||||
image: "busybox"
|
||||
cmd:
|
||||
- sleep
|
||||
- "1000"
|
||||
volumes:
|
||||
- name: "my-pvc"
|
||||
mountPath: "/var/www/html1"
|
||||
type: "pvc"
|
||||
claimName: "myclaim"
|
||||
- name: "my-cm"
|
||||
mountPath: "/var/www/html2"
|
||||
type: "configMap"
|
||||
cmName: "myCmName"
|
||||
items:
|
||||
- key: "k1"
|
||||
path: "./a1"
|
||||
- key: "k2"
|
||||
path: "./a2"
|
||||
- name: "my-cm-noitems"
|
||||
mountPath: "/var/www/html22"
|
||||
type: "configMap"
|
||||
cmName: "myCmName2"
|
||||
- name: "mysecret"
|
||||
type: "secret"
|
||||
mountPath: "/var/www/html3"
|
||||
secretName: "mysecret"
|
||||
- name: "my-empty-dir"
|
||||
type: "emptyDir"
|
||||
mountPath: "/var/www/html4"
|
||||
@@ -8,10 +8,7 @@ spec:
|
||||
componentList:
|
||||
- metrics-provider
|
||||
rolloutPlan:
|
||||
targetSize: 6
|
||||
rolloutStrategy: "IncreaseFirst"
|
||||
targetSize: 5
|
||||
rolloutBatches:
|
||||
- replicas: 1
|
||||
- replicas: 20%
|
||||
- replicas: 40%
|
||||
- replicas: 40%
|
||||
- replicas: 3
|
||||
25
docs/examples/enduser/sample-manual.yaml
Normal file
25
docs/examples/enduser/sample-manual.yaml
Normal file
@@ -0,0 +1,25 @@
|
||||
apiVersion: core.oam.dev/v1beta1
|
||||
kind: Application
|
||||
metadata:
|
||||
name: website
|
||||
spec:
|
||||
components:
|
||||
- name: frontend
|
||||
type: webservice
|
||||
properties:
|
||||
image: nginx
|
||||
traits:
|
||||
- type: scaler
|
||||
properties:
|
||||
replicas: 2
|
||||
- type: sidecar
|
||||
properties:
|
||||
name: "sidecar-test"
|
||||
image: "fluentd"
|
||||
- name: backend
|
||||
type: worker
|
||||
properties:
|
||||
image: busybox
|
||||
cmd:
|
||||
- sleep
|
||||
- '1000'
|
||||
27
docs/examples/enduser/sample.yaml
Normal file
27
docs/examples/enduser/sample.yaml
Normal file
@@ -0,0 +1,27 @@
|
||||
apiVersion: core.oam.dev/v1beta1
|
||||
kind: Application
|
||||
metadata:
|
||||
name: website
|
||||
spec:
|
||||
components:
|
||||
- name: frontend
|
||||
type: webservice
|
||||
properties:
|
||||
image: nginx
|
||||
traits:
|
||||
- type: cpuscaler
|
||||
properties:
|
||||
min: 1
|
||||
max: 10
|
||||
cpuPercent: 60
|
||||
- type: sidecar
|
||||
properties:
|
||||
name: "sidecar-test"
|
||||
image: "fluentd"
|
||||
- name: backend
|
||||
type: worker
|
||||
properties:
|
||||
image: busybox
|
||||
cmd:
|
||||
- sleep
|
||||
- '1000'
|
||||
54
docs/examples/live-diff/app-modified.yaml
Normal file
54
docs/examples/live-diff/app-modified.yaml
Normal file
@@ -0,0 +1,54 @@
|
||||
apiVersion: core.oam.dev/v1beta1
|
||||
kind: Application
|
||||
metadata:
|
||||
name: livediff-demo
|
||||
spec:
|
||||
components:
|
||||
- name: myweb-1
|
||||
type: myworker
|
||||
properties:
|
||||
image: "busybox"
|
||||
cmd:
|
||||
- sleep
|
||||
- "2000" # change a component property
|
||||
lives: "3"
|
||||
enemies: "alien"
|
||||
traits:
|
||||
- type: myingress
|
||||
properties:
|
||||
domain: "www.example.com"
|
||||
http:
|
||||
"/": 90 # change a trait
|
||||
# - type: myscaler # remove a trait
|
||||
# properties:
|
||||
# replicas: 2
|
||||
- name: myweb-2
|
||||
type: myworker
|
||||
properties: # no change on component property
|
||||
image: "busybox"
|
||||
cmd:
|
||||
- sleep
|
||||
- "1000"
|
||||
lives: "3"
|
||||
enemies: "alien"
|
||||
traits:
|
||||
- type: myingress # add a trait
|
||||
properties:
|
||||
domain: "www.example.com"
|
||||
http:
|
||||
"/": 90
|
||||
- name: myweb-3 # add a component
|
||||
type: myworker
|
||||
properties:
|
||||
image: "busybox"
|
||||
cmd:
|
||||
- sleep
|
||||
- "1000"
|
||||
lives: "3"
|
||||
enemies: "alien"
|
||||
traits:
|
||||
- type: myingress
|
||||
properties:
|
||||
domain: "www.example.com"
|
||||
http:
|
||||
"/": 90
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user