mirror of
https://github.com/kubevela/kubevela.git
synced 2026-02-23 22:33:58 +00:00
Compare commits
25 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
657a374ded | ||
|
|
dfe12cd9ca | ||
|
|
cd42f67848 | ||
|
|
61d2c588e3 | ||
|
|
b3dad698a5 | ||
|
|
ec5159c2ca | ||
|
|
a7b2b221e0 | ||
|
|
caa495a5d9 | ||
|
|
15bea4fb64 | ||
|
|
1a094a4eea | ||
|
|
65b6f47330 | ||
|
|
3a4cd2dca6 | ||
|
|
9fabd950e5 | ||
|
|
0a012b4d34 | ||
|
|
7c231e6c48 | ||
|
|
36b6c3e7b5 | ||
|
|
4cc019722c | ||
|
|
b040ae65da | ||
|
|
f0fb4ed099 | ||
|
|
7f89d12059 | ||
|
|
3c61bcb8f0 | ||
|
|
a14b536fd1 | ||
|
|
ba5a726854 | ||
|
|
ffb9d06427 | ||
|
|
819dc26ace |
89
.github/workflows/chart.yaml
vendored
Normal file
89
.github/workflows/chart.yaml
vendored
Normal file
@@ -0,0 +1,89 @@
|
||||
name: Publish Chart
|
||||
|
||||
on:
|
||||
push:
|
||||
tags:
|
||||
- "v*"
|
||||
workflow_dispatch: { }
|
||||
|
||||
env:
|
||||
BUCKET: ${{ secrets.OSS_BUCKET }}
|
||||
ENDPOINT: ${{ secrets.OSS_ENDPOINT }}
|
||||
ACCESS_KEY: ${{ secrets.OSS_ACCESS_KEY }}
|
||||
ACCESS_KEY_SECRET: ${{ secrets.OSS_ACCESS_KEY_SECRET }}
|
||||
ARTIFACT_HUB_REPOSITORY_ID: ${{ secrets.ARTIFACT_HUB_REPOSITORY_ID }}
|
||||
|
||||
jobs:
|
||||
publish-charts:
|
||||
env:
|
||||
HELM_CHARTS_DIR: charts
|
||||
HELM_CHART: charts/vela-core
|
||||
MINIMAL_HELM_CHART: charts/vela-minimal
|
||||
LEGACY_HELM_CHART: legacy/charts/vela-core-legacy
|
||||
VELA_ROLLOUT_HELM_CHART: runtime/rollout/charts
|
||||
LOCAL_OSS_DIRECTORY: .oss/
|
||||
runs-on: ubuntu-20.04
|
||||
steps:
|
||||
- uses: actions/checkout@master
|
||||
- name: Get git revision
|
||||
id: vars
|
||||
shell: bash
|
||||
run: |
|
||||
echo "::set-output name=git_revision::$(git rev-parse --short HEAD)"
|
||||
- name: Install Helm
|
||||
uses: azure/setup-helm@v1
|
||||
with:
|
||||
version: v3.4.0
|
||||
- name: Setup node
|
||||
uses: actions/setup-node@v2
|
||||
with:
|
||||
node-version: '14'
|
||||
- name: Generate helm doc
|
||||
run: |
|
||||
make helm-doc-gen
|
||||
- name: Prepare legacy chart
|
||||
run: |
|
||||
rsync -r $LEGACY_HELM_CHART $HELM_CHARTS_DIR
|
||||
rsync -r $HELM_CHART/* $LEGACY_HELM_CHART --exclude=Chart.yaml --exclude=crds
|
||||
- name: Prepare vela chart
|
||||
run: |
|
||||
rsync -r $VELA_ROLLOUT_HELM_CHART $HELM_CHARTS_DIR
|
||||
- name: Get the version
|
||||
id: get_version
|
||||
run: |
|
||||
VERSION=${GITHUB_REF#refs/tags/}
|
||||
echo ::set-output name=VERSION::${VERSION}
|
||||
- name: Tag helm chart image
|
||||
run: |
|
||||
image_tag=${{ steps.get_version.outputs.VERSION }}
|
||||
chart_version=${{ steps.get_version.outputs.VERSION }}
|
||||
sed -i "s/latest/${image_tag}/g" $HELM_CHART/values.yaml
|
||||
sed -i "s/latest/${image_tag}/g" $MINIMAL_HELM_CHART/values.yaml
|
||||
sed -i "s/latest/${image_tag}/g" $LEGACY_HELM_CHART/values.yaml
|
||||
sed -i "s/latest/${image_tag}/g" $VELA_ROLLOUT_HELM_CHART/values.yaml
|
||||
chart_smever=${chart_version#"v"}
|
||||
sed -i "s/0.1.0/$chart_smever/g" $HELM_CHART/Chart.yaml
|
||||
sed -i "s/0.1.0/$chart_smever/g" $MINIMAL_HELM_CHART/Chart.yaml
|
||||
sed -i "s/0.1.0/$chart_smever/g" $LEGACY_HELM_CHART/Chart.yaml
|
||||
sed -i "s/0.1.0/$chart_smever/g" $VELA_ROLLOUT_HELM_CHART/Chart.yaml
|
||||
- name: Install ossutil
|
||||
run: wget http://gosspublic.alicdn.com/ossutil/1.7.0/ossutil64 && chmod +x ossutil64 && mv ossutil64 ossutil
|
||||
- name: Configure Alibaba Cloud OSSUTIL
|
||||
run: ./ossutil --config-file .ossutilconfig config -i ${ACCESS_KEY} -k ${ACCESS_KEY_SECRET} -e ${ENDPOINT} -c .ossutilconfig
|
||||
- name: sync cloud to local
|
||||
run: ./ossutil --config-file .ossutilconfig sync oss://$BUCKET/core $LOCAL_OSS_DIRECTORY
|
||||
- name: add artifacthub stuff to the repo
|
||||
run: |
|
||||
rsync $HELM_CHART/README.md $LEGACY_HELM_CHART/README.md
|
||||
rsync $HELM_CHART/README.md $VELA_ROLLOUT_HELM_CHART/README.md
|
||||
sed -i "s/ARTIFACT_HUB_REPOSITORY_ID/$ARTIFACT_HUB_REPOSITORY_ID/g" hack/artifacthub/artifacthub-repo.yml
|
||||
rsync hack/artifacthub/artifacthub-repo.yml $LOCAL_OSS_DIRECTORY
|
||||
- name: Package helm charts
|
||||
run: |
|
||||
helm package $HELM_CHART --destination $LOCAL_OSS_DIRECTORY
|
||||
helm package $MINIMAL_HELM_CHART --destination $LOCAL_OSS_DIRECTORY
|
||||
helm package $LEGACY_HELM_CHART --destination $LOCAL_OSS_DIRECTORY
|
||||
helm package $VELA_ROLLOUT_HELM_CHART --destination $LOCAL_OSS_DIRECTORY
|
||||
helm repo index --url https://$BUCKET.$ENDPOINT/core $LOCAL_OSS_DIRECTORY
|
||||
- name: sync local to cloud
|
||||
run: ./ossutil --config-file .ossutilconfig sync $LOCAL_OSS_DIRECTORY oss://$BUCKET/core -f
|
||||
103
.github/workflows/registry.yml
vendored
103
.github/workflows/registry.yml
vendored
@@ -8,11 +8,8 @@ on:
|
||||
workflow_dispatch: {}
|
||||
|
||||
env:
|
||||
BUCKET: ${{ secrets.OSS_BUCKET }}
|
||||
ENDPOINT: ${{ secrets.OSS_ENDPOINT }}
|
||||
ACCESS_KEY: ${{ secrets.OSS_ACCESS_KEY }}
|
||||
ACCESS_KEY_SECRET: ${{ secrets.OSS_ACCESS_KEY_SECRET }}
|
||||
ARTIFACT_HUB_REPOSITORY_ID: ${{ secrets.ARTIFACT_HUB_REPOSITORY_ID }}
|
||||
|
||||
jobs:
|
||||
publish-core-images:
|
||||
@@ -47,8 +44,8 @@ jobs:
|
||||
- name: Login Alibaba Cloud ACR
|
||||
uses: docker/login-action@v1
|
||||
with:
|
||||
registry: kubevela-registry.cn-hangzhou.cr.aliyuncs.com
|
||||
username: ${{ secrets.ACR_USERNAME }}@aliyun-inner.com
|
||||
registry: ${{ secrets.ACR_DOMAIN }}
|
||||
username: ${{ secrets.ACR_USERNAME }}
|
||||
password: ${{ secrets.ACR_PASSWORD }}
|
||||
- uses: docker/setup-qemu-action@v1
|
||||
- uses: docker/setup-buildx-action@v1
|
||||
@@ -72,7 +69,7 @@ jobs:
|
||||
tags: |-
|
||||
docker.io/oamdev/vela-core:${{ steps.get_version.outputs.VERSION }}
|
||||
ghcr.io/${{ github.repository_owner }}/oamdev/vela-core:${{ steps.get_version.outputs.VERSION }}
|
||||
kubevela-registry.cn-hangzhou.cr.aliyuncs.com/oamdev/vela-core:${{ steps.get_version.outputs.VERSION }}
|
||||
${{ secrets.ACR_DOMAIN }}/oamdev/vela-core:${{ steps.get_version.outputs.VERSION }}
|
||||
|
||||
- uses: docker/build-push-action@v2
|
||||
name: Build & Pushing CLI for Dockerhub, GHCR and ACR
|
||||
@@ -91,7 +88,7 @@ jobs:
|
||||
tags: |-
|
||||
docker.io/oamdev/vela-cli:${{ steps.get_version.outputs.VERSION }}
|
||||
ghcr.io/${{ github.repository_owner }}/oamdev/vela-cli:${{ steps.get_version.outputs.VERSION }}
|
||||
kubevela-registry.cn-hangzhou.cr.aliyuncs.com/oamdev/vela-cli:${{ steps.get_version.outputs.VERSION }}
|
||||
${{ secrets.ACR_DOMAIN }}/oamdev/vela-cli:${{ steps.get_version.outputs.VERSION }}
|
||||
|
||||
publish-addon-images:
|
||||
runs-on: ubuntu-latest
|
||||
@@ -125,8 +122,8 @@ jobs:
|
||||
- name: Login Alibaba Cloud ACR
|
||||
uses: docker/login-action@v1
|
||||
with:
|
||||
registry: kubevela-registry.cn-hangzhou.cr.aliyuncs.com
|
||||
username: ${{ secrets.ACR_USERNAME }}@aliyun-inner.com
|
||||
registry: ${{ secrets.ACR_DOMAIN }}
|
||||
username: ${{ secrets.ACR_USERNAME }}
|
||||
password: ${{ secrets.ACR_PASSWORD }}
|
||||
- uses: docker/setup-qemu-action@v1
|
||||
- uses: docker/setup-buildx-action@v1
|
||||
@@ -150,7 +147,7 @@ jobs:
|
||||
tags: |-
|
||||
docker.io/oamdev/vela-apiserver:${{ steps.get_version.outputs.VERSION }}
|
||||
ghcr.io/${{ github.repository_owner }}/oamdev/vela-apiserver:${{ steps.get_version.outputs.VERSION }}
|
||||
kubevela-registry.cn-hangzhou.cr.aliyuncs.com/oamdev/vela-apiserver:${{ steps.get_version.outputs.VERSION }}
|
||||
${{ secrets.ACR_DOMAIN }}/oamdev/vela-apiserver:${{ steps.get_version.outputs.VERSION }}
|
||||
|
||||
- uses: docker/build-push-action@v2
|
||||
name: Build & Pushing runtime rollout Dockerhub, GHCR and ACR
|
||||
@@ -169,91 +166,7 @@ jobs:
|
||||
tags: |-
|
||||
docker.io/oamdev/vela-rollout:${{ steps.get_version.outputs.VERSION }}
|
||||
ghcr.io/${{ github.repository_owner }}/oamdev/vela-rollout:${{ steps.get_version.outputs.VERSION }}
|
||||
kubevela-registry.cn-hangzhou.cr.aliyuncs.com/oamdev/vela-rollout:${{ steps.get_version.outputs.VERSION }}
|
||||
|
||||
publish-charts:
|
||||
env:
|
||||
HELM_CHARTS_DIR: charts
|
||||
HELM_CHART: charts/vela-core
|
||||
MINIMAL_HELM_CHART: charts/vela-minimal
|
||||
LEGACY_HELM_CHART: legacy/charts/vela-core-legacy
|
||||
VELA_ROLLOUT_HELM_CHART: runtime/rollout/charts
|
||||
LOCAL_OSS_DIRECTORY: .oss/
|
||||
runs-on: ubuntu-20.04
|
||||
steps:
|
||||
- uses: actions/checkout@master
|
||||
- name: Get git revision
|
||||
id: vars
|
||||
shell: bash
|
||||
run: |
|
||||
echo "::set-output name=git_revision::$(git rev-parse --short HEAD)"
|
||||
- name: Install Helm
|
||||
uses: azure/setup-helm@v1
|
||||
with:
|
||||
version: v3.4.0
|
||||
- name: Setup node
|
||||
uses: actions/setup-node@v2
|
||||
with:
|
||||
node-version: '14'
|
||||
- name: Generate helm doc
|
||||
run: |
|
||||
make helm-doc-gen
|
||||
- name: Prepare legacy chart
|
||||
run: |
|
||||
rsync -r $LEGACY_HELM_CHART $HELM_CHARTS_DIR
|
||||
rsync -r $HELM_CHART/* $LEGACY_HELM_CHART --exclude=Chart.yaml --exclude=crds
|
||||
- name: Prepare vela chart
|
||||
run: |
|
||||
rsync -r $VELA_ROLLOUT_HELM_CHART $HELM_CHARTS_DIR
|
||||
- uses: oprypin/find-latest-tag@v1
|
||||
with:
|
||||
repository: oam-dev/kubevela
|
||||
releases-only: true
|
||||
id: latest_tag
|
||||
- name: Tag helm chart image
|
||||
run: |
|
||||
latest_repo_tag=${{ steps.latest_tag.outputs.tag }}
|
||||
sub="."
|
||||
major="$(cut -d"$sub" -f1 <<<"$latest_repo_tag")"
|
||||
minor="$(cut -d"$sub" -f2 <<<"$latest_repo_tag")"
|
||||
patch="0"
|
||||
current_repo_tag="$major.$minor.$patch"
|
||||
image_tag=${GITHUB_REF#refs/tags/}
|
||||
chart_version=$latest_repo_tag
|
||||
if [[ ${GITHUB_REF} == "refs/heads/master" ]]; then
|
||||
image_tag=latest
|
||||
chart_version=${current_repo_tag}-nightly-build
|
||||
fi
|
||||
sed -i "s/latest/${image_tag}/g" $HELM_CHART/values.yaml
|
||||
sed -i "s/latest/${image_tag}/g" $MINIMAL_HELM_CHART/values.yaml
|
||||
sed -i "s/latest/${image_tag}/g" $LEGACY_HELM_CHART/values.yaml
|
||||
sed -i "s/latest/${image_tag}/g" $VELA_ROLLOUT_HELM_CHART/values.yaml
|
||||
chart_smever=${chart_version#"v"}
|
||||
sed -i "s/0.1.0/$chart_smever/g" $HELM_CHART/Chart.yaml
|
||||
sed -i "s/0.1.0/$chart_smever/g" $MINIMAL_HELM_CHART/Chart.yaml
|
||||
sed -i "s/0.1.0/$chart_smever/g" $LEGACY_HELM_CHART/Chart.yaml
|
||||
sed -i "s/0.1.0/$chart_smever/g" $VELA_ROLLOUT_HELM_CHART/Chart.yaml
|
||||
- name: Install ossutil
|
||||
run: wget http://gosspublic.alicdn.com/ossutil/1.7.0/ossutil64 && chmod +x ossutil64 && mv ossutil64 ossutil
|
||||
- name: Configure Alibaba Cloud OSSUTIL
|
||||
run: ./ossutil --config-file .ossutilconfig config -i ${ACCESS_KEY} -k ${ACCESS_KEY_SECRET} -e ${ENDPOINT} -c .ossutilconfig
|
||||
- name: sync cloud to local
|
||||
run: ./ossutil --config-file .ossutilconfig sync oss://$BUCKET/core $LOCAL_OSS_DIRECTORY
|
||||
- name: add artifacthub stuff to the repo
|
||||
run: |
|
||||
rsync $HELM_CHART/README.md $LEGACY_HELM_CHART/README.md
|
||||
rsync $HELM_CHART/README.md $VELA_ROLLOUT_HELM_CHART/README.md
|
||||
sed -i "s/ARTIFACT_HUB_REPOSITORY_ID/$ARTIFACT_HUB_REPOSITORY_ID/g" hack/artifacthub/artifacthub-repo.yml
|
||||
rsync hack/artifacthub/artifacthub-repo.yml $LOCAL_OSS_DIRECTORY
|
||||
- name: Package helm charts
|
||||
run: |
|
||||
helm package $HELM_CHART --destination $LOCAL_OSS_DIRECTORY
|
||||
helm package $MINIMAL_HELM_CHART --destination $LOCAL_OSS_DIRECTORY
|
||||
helm package $LEGACY_HELM_CHART --destination $LOCAL_OSS_DIRECTORY
|
||||
helm package $VELA_ROLLOUT_HELM_CHART --destination $LOCAL_OSS_DIRECTORY
|
||||
helm repo index --url https://$BUCKET.$ENDPOINT/core $LOCAL_OSS_DIRECTORY
|
||||
- name: sync local to cloud
|
||||
run: ./ossutil --config-file .ossutilconfig sync $LOCAL_OSS_DIRECTORY oss://$BUCKET/core -f
|
||||
${{ secrets.ACR_DOMAIN }}/oamdev/vela-rollout:${{ steps.get_version.outputs.VERSION }}
|
||||
|
||||
publish-capabilities:
|
||||
env:
|
||||
|
||||
5
.github/workflows/release.yml
vendored
5
.github/workflows/release.yml
vendored
@@ -123,6 +123,11 @@ jobs:
|
||||
- name: sync the latest version file
|
||||
if: ${{ !contains(env.VELA_VERSION,'alpha') && !contains(env.VELA_VERSION,'beta') }}
|
||||
run: |
|
||||
LATEST_VERSION=$(curl -fsSl https://static.kubevela.net/binary/vela/latest_version)
|
||||
verlte() {
|
||||
[ "$1" = "`echo -e "$1\n$2" | sort -V | head -n1`" ]
|
||||
}
|
||||
verlte ${{ env.VELA_VERSION }} $LATEST_VERSION && echo "${{ env.VELA_VERSION }} <= $LATEST_VERSION, skip update" && exit 0
|
||||
echo ${{ env.VELA_VERSION }} > ./latest_version
|
||||
./ossutil --config-file .ossutilconfig cp -u ./latest_version oss://$BUCKET/binary/vela/latest_version
|
||||
|
||||
|
||||
@@ -4,7 +4,7 @@ apiVersion: core.oam.dev/v1beta1
|
||||
kind: TraitDefinition
|
||||
metadata:
|
||||
annotations:
|
||||
definition.oam.dev/description: affinity specify affinity and tolerationon K8s pod for your workload which follows the pod spec in path 'spec.template'.
|
||||
definition.oam.dev/description: Affinity specifies affinity and toleration K8s pod for your workload which follows the pod spec in path 'spec.template'.
|
||||
labels:
|
||||
custom.definition.oam.dev/ui-hidden: "true"
|
||||
name: affinity
|
||||
|
||||
@@ -196,14 +196,14 @@ spec:
|
||||
// +usage=Specifies a source the value of this var should come from
|
||||
valueFrom?: {
|
||||
// +usage=Selects a key of a secret in the pod's namespace
|
||||
secretKeyRef: {
|
||||
secretKeyRef?: {
|
||||
// +usage=The name of the secret in the pod's namespace to select from
|
||||
name: string
|
||||
// +usage=The key of the secret to select from. Must be a valid secret key
|
||||
key: string
|
||||
}
|
||||
// +usage=Selects a key of a config map in the pod's namespace
|
||||
configMapKeyRef: {
|
||||
configMapKeyRef?: {
|
||||
// +usage=The name of the config map in the pod's namespace to select from
|
||||
name: string
|
||||
// +usage=The key of the config map to select from. Must be a valid secret key
|
||||
|
||||
@@ -43,7 +43,7 @@ spec:
|
||||
volumeMounts: [{
|
||||
name: parameter.mountName
|
||||
mountPath: parameter.initMountPath
|
||||
}]
|
||||
}] + parameter.extraVolumeMounts
|
||||
}]
|
||||
// +patchKey=name
|
||||
volumes: [{
|
||||
@@ -97,5 +97,13 @@ spec:
|
||||
|
||||
// +usage=Specify the mount path of init container
|
||||
initMountPath: string
|
||||
|
||||
// +usage=Specify the extra volume mounts for the init container
|
||||
extraVolumeMounts: [...{
|
||||
// +usage=The name of the volume to be mounted
|
||||
name: string
|
||||
// +usage=The mountPath for mount in the init container
|
||||
mountPath: string
|
||||
}]
|
||||
}
|
||||
|
||||
|
||||
@@ -14,12 +14,12 @@ spec:
|
||||
cue:
|
||||
template: |
|
||||
#K8sObject: {
|
||||
apiVersion: string
|
||||
kind: string
|
||||
metadata: {
|
||||
name: string
|
||||
...
|
||||
}
|
||||
resource?: string
|
||||
group?: string
|
||||
name?: string
|
||||
namespace?: string
|
||||
cluster?: string
|
||||
labelSelector?: [string]: string
|
||||
...
|
||||
}
|
||||
output: parameter.objects[0]
|
||||
|
||||
@@ -14,10 +14,114 @@ spec:
|
||||
schematic:
|
||||
cue:
|
||||
template: |
|
||||
#Privileges: {
|
||||
// +usage=Specify the verbs to be allowed for the resource
|
||||
verbs: [...string]
|
||||
// +usage=Specify the apiGroups of the resource
|
||||
apiGroups?: [...string]
|
||||
// +usage=Specify the resources to be allowed
|
||||
resources?: [...string]
|
||||
// +usage=Specify the resourceNames to be allowed
|
||||
resourceNames?: [...string]
|
||||
// +usage=Specify the resource url to be allowed
|
||||
nonResourceURLs?: [...string]
|
||||
// +usage=Specify the scope of the privileges, default to be namespace scope
|
||||
scope: *"namespace" | "cluster"
|
||||
}
|
||||
parameter: {
|
||||
// +usage=Specify the name of ServiceAccount
|
||||
name: string
|
||||
// +usage=Specify whether to create new ServiceAccount or not
|
||||
create: *false | bool
|
||||
// +usage=Specify the privileges of the ServiceAccount, if not empty, RoleBindings(ClusterRoleBindings) will be created
|
||||
privileges?: [...#Privileges]
|
||||
}
|
||||
// +patchStrategy=retainKeys
|
||||
patch: spec: template: spec: serviceAccountName: parameter.name
|
||||
_clusterPrivileges: [ for p in parameter.privileges if p.scope == "cluster" {p}]
|
||||
_namespacePrivileges: [ for p in parameter.privileges if p.scope == "namespace" {p}]
|
||||
outputs: {
|
||||
if parameter.create {
|
||||
"service-account": {
|
||||
apiVersion: "v1"
|
||||
kind: "ServiceAccount"
|
||||
metadata: name: parameter.name
|
||||
}
|
||||
}
|
||||
if parameter.privileges != _|_ {
|
||||
if len(_clusterPrivileges) > 0 {
|
||||
"cluster-role": {
|
||||
apiVersion: "rbac.authorization.k8s.io/v1"
|
||||
kind: "ClusterRole"
|
||||
metadata: name: "\(context.namespace):\(parameter.name)"
|
||||
rules: [ for p in _clusterPrivileges {
|
||||
verbs: p.verbs
|
||||
if p.apiGroups != _|_ {
|
||||
apiGroups: p.apiGroups
|
||||
}
|
||||
if p.resources != _|_ {
|
||||
resources: p.resources
|
||||
}
|
||||
if p.resourceNames != _|_ {
|
||||
resourceNames: p.resourceNames
|
||||
}
|
||||
if p.nonResourceURLs != _|_ {
|
||||
nonResourceURLs: p.nonResourceURLs
|
||||
}
|
||||
}]
|
||||
}
|
||||
"cluster-role-binding": {
|
||||
apiVersion: "rbac.authorization.k8s.io/v1"
|
||||
kind: "ClusterRoleBinding"
|
||||
metadata: name: "\(context.namespace):\(parameter.name)"
|
||||
roleRef: {
|
||||
apiGroup: "rbac.authorization.k8s.io"
|
||||
kind: "ClusterRole"
|
||||
name: "\(context.namespace):\(parameter.name)"
|
||||
}
|
||||
subjects: [{
|
||||
kind: "ServiceAccount"
|
||||
name: parameter.name
|
||||
namespace: "\(context.namespace)"
|
||||
}]
|
||||
}
|
||||
}
|
||||
if len(_namespacePrivileges) > 0 {
|
||||
role: {
|
||||
apiVersion: "rbac.authorization.k8s.io/v1"
|
||||
kind: "Role"
|
||||
metadata: name: parameter.name
|
||||
rules: [ for p in _namespacePrivileges {
|
||||
verbs: p.verbs
|
||||
if p.apiGroups != _|_ {
|
||||
apiGroups: p.apiGroups
|
||||
}
|
||||
if p.resources != _|_ {
|
||||
resources: p.resources
|
||||
}
|
||||
if p.resourceNames != _|_ {
|
||||
resourceNames: p.resourceNames
|
||||
}
|
||||
if p.nonResourceURLs != _|_ {
|
||||
nonResourceURLs: p.nonResourceURLs
|
||||
}
|
||||
}]
|
||||
}
|
||||
"role-binding": {
|
||||
apiVersion: "rbac.authorization.k8s.io/v1"
|
||||
kind: "RoleBinding"
|
||||
metadata: name: parameter.name
|
||||
roleRef: {
|
||||
apiGroup: "rbac.authorization.k8s.io"
|
||||
kind: "Role"
|
||||
name: parameter.name
|
||||
}
|
||||
subjects: [{
|
||||
kind: "ServiceAccount"
|
||||
name: parameter.name
|
||||
}]
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -82,6 +82,11 @@ spec:
|
||||
// +usage=The key of the config map to select from. Must be a valid secret key
|
||||
key: string
|
||||
}
|
||||
// +usage=Specify the field reference for env
|
||||
fieldRef?: {
|
||||
// +usage=Specify the field path for env
|
||||
fieldPath: string
|
||||
}
|
||||
}
|
||||
}]
|
||||
|
||||
|
||||
@@ -149,14 +149,14 @@ spec:
|
||||
// +usage=Specifies a source the value of this var should come from
|
||||
valueFrom?: {
|
||||
// +usage=Selects a key of a secret in the pod's namespace
|
||||
secretKeyRef: {
|
||||
secretKeyRef?: {
|
||||
// +usage=The name of the secret in the pod's namespace to select from
|
||||
name: string
|
||||
// +usage=The key of the secret to select from. Must be a valid secret key
|
||||
key: string
|
||||
}
|
||||
// +usage=Selects a key of a config map in the pod's namespace
|
||||
configMapKeyRef: {
|
||||
configMapKeyRef?: {
|
||||
// +usage=The name of the config map in the pod's namespace to select from
|
||||
name: string
|
||||
// +usage=The key of the config map to select from. Must be a valid secret key
|
||||
|
||||
@@ -122,6 +122,7 @@ metadata:
|
||||
name: {{ include "kubevela.fullname" . }}
|
||||
namespace: {{ .Release.Namespace }}
|
||||
labels:
|
||||
controller.oam.dev/name: vela-core
|
||||
{{- include "kubevela.labels" . | nindent 4 }}
|
||||
spec:
|
||||
replicas: {{ .Values.replicaCount }}
|
||||
|
||||
@@ -4,7 +4,7 @@ apiVersion: core.oam.dev/v1beta1
|
||||
kind: TraitDefinition
|
||||
metadata:
|
||||
annotations:
|
||||
definition.oam.dev/description: affinity specify affinity and tolerationon K8s pod for your workload which follows the pod spec in path 'spec.template'.
|
||||
definition.oam.dev/description: Affinity specifies affinity and toleration K8s pod for your workload which follows the pod spec in path 'spec.template'.
|
||||
labels:
|
||||
custom.definition.oam.dev/ui-hidden: "true"
|
||||
name: affinity
|
||||
|
||||
@@ -196,14 +196,14 @@ spec:
|
||||
// +usage=Specifies a source the value of this var should come from
|
||||
valueFrom?: {
|
||||
// +usage=Selects a key of a secret in the pod's namespace
|
||||
secretKeyRef: {
|
||||
secretKeyRef?: {
|
||||
// +usage=The name of the secret in the pod's namespace to select from
|
||||
name: string
|
||||
// +usage=The key of the secret to select from. Must be a valid secret key
|
||||
key: string
|
||||
}
|
||||
// +usage=Selects a key of a config map in the pod's namespace
|
||||
configMapKeyRef: {
|
||||
configMapKeyRef?: {
|
||||
// +usage=The name of the config map in the pod's namespace to select from
|
||||
name: string
|
||||
// +usage=The key of the config map to select from. Must be a valid secret key
|
||||
|
||||
@@ -43,7 +43,7 @@ spec:
|
||||
volumeMounts: [{
|
||||
name: parameter.mountName
|
||||
mountPath: parameter.initMountPath
|
||||
}]
|
||||
}] + parameter.extraVolumeMounts
|
||||
}]
|
||||
// +patchKey=name
|
||||
volumes: [{
|
||||
@@ -97,5 +97,13 @@ spec:
|
||||
|
||||
// +usage=Specify the mount path of init container
|
||||
initMountPath: string
|
||||
|
||||
// +usage=Specify the extra volume mounts for the init container
|
||||
extraVolumeMounts: [...{
|
||||
// +usage=The name of the volume to be mounted
|
||||
name: string
|
||||
// +usage=The mountPath for mount in the init container
|
||||
mountPath: string
|
||||
}]
|
||||
}
|
||||
|
||||
|
||||
@@ -14,12 +14,12 @@ spec:
|
||||
cue:
|
||||
template: |
|
||||
#K8sObject: {
|
||||
apiVersion: string
|
||||
kind: string
|
||||
metadata: {
|
||||
name: string
|
||||
...
|
||||
}
|
||||
resource?: string
|
||||
group?: string
|
||||
name?: string
|
||||
namespace?: string
|
||||
cluster?: string
|
||||
labelSelector?: [string]: string
|
||||
...
|
||||
}
|
||||
output: parameter.objects[0]
|
||||
|
||||
@@ -14,10 +14,114 @@ spec:
|
||||
schematic:
|
||||
cue:
|
||||
template: |
|
||||
#Privileges: {
|
||||
// +usage=Specify the verbs to be allowed for the resource
|
||||
verbs: [...string]
|
||||
// +usage=Specify the apiGroups of the resource
|
||||
apiGroups?: [...string]
|
||||
// +usage=Specify the resources to be allowed
|
||||
resources?: [...string]
|
||||
// +usage=Specify the resourceNames to be allowed
|
||||
resourceNames?: [...string]
|
||||
// +usage=Specify the resource url to be allowed
|
||||
nonResourceURLs?: [...string]
|
||||
// +usage=Specify the scope of the privileges, default to be namespace scope
|
||||
scope: *"namespace" | "cluster"
|
||||
}
|
||||
parameter: {
|
||||
// +usage=Specify the name of ServiceAccount
|
||||
name: string
|
||||
// +usage=Specify whether to create new ServiceAccount or not
|
||||
create: *false | bool
|
||||
// +usage=Specify the privileges of the ServiceAccount, if not empty, RoleBindings(ClusterRoleBindings) will be created
|
||||
privileges?: [...#Privileges]
|
||||
}
|
||||
// +patchStrategy=retainKeys
|
||||
patch: spec: template: spec: serviceAccountName: parameter.name
|
||||
_clusterPrivileges: [ for p in parameter.privileges if p.scope == "cluster" {p}]
|
||||
_namespacePrivileges: [ for p in parameter.privileges if p.scope == "namespace" {p}]
|
||||
outputs: {
|
||||
if parameter.create {
|
||||
"service-account": {
|
||||
apiVersion: "v1"
|
||||
kind: "ServiceAccount"
|
||||
metadata: name: parameter.name
|
||||
}
|
||||
}
|
||||
if parameter.privileges != _|_ {
|
||||
if len(_clusterPrivileges) > 0 {
|
||||
"cluster-role": {
|
||||
apiVersion: "rbac.authorization.k8s.io/v1"
|
||||
kind: "ClusterRole"
|
||||
metadata: name: "\(context.namespace):\(parameter.name)"
|
||||
rules: [ for p in _clusterPrivileges {
|
||||
verbs: p.verbs
|
||||
if p.apiGroups != _|_ {
|
||||
apiGroups: p.apiGroups
|
||||
}
|
||||
if p.resources != _|_ {
|
||||
resources: p.resources
|
||||
}
|
||||
if p.resourceNames != _|_ {
|
||||
resourceNames: p.resourceNames
|
||||
}
|
||||
if p.nonResourceURLs != _|_ {
|
||||
nonResourceURLs: p.nonResourceURLs
|
||||
}
|
||||
}]
|
||||
}
|
||||
"cluster-role-binding": {
|
||||
apiVersion: "rbac.authorization.k8s.io/v1"
|
||||
kind: "ClusterRoleBinding"
|
||||
metadata: name: "\(context.namespace):\(parameter.name)"
|
||||
roleRef: {
|
||||
apiGroup: "rbac.authorization.k8s.io"
|
||||
kind: "ClusterRole"
|
||||
name: "\(context.namespace):\(parameter.name)"
|
||||
}
|
||||
subjects: [{
|
||||
kind: "ServiceAccount"
|
||||
name: parameter.name
|
||||
namespace: "\(context.namespace)"
|
||||
}]
|
||||
}
|
||||
}
|
||||
if len(_namespacePrivileges) > 0 {
|
||||
role: {
|
||||
apiVersion: "rbac.authorization.k8s.io/v1"
|
||||
kind: "Role"
|
||||
metadata: name: parameter.name
|
||||
rules: [ for p in _namespacePrivileges {
|
||||
verbs: p.verbs
|
||||
if p.apiGroups != _|_ {
|
||||
apiGroups: p.apiGroups
|
||||
}
|
||||
if p.resources != _|_ {
|
||||
resources: p.resources
|
||||
}
|
||||
if p.resourceNames != _|_ {
|
||||
resourceNames: p.resourceNames
|
||||
}
|
||||
if p.nonResourceURLs != _|_ {
|
||||
nonResourceURLs: p.nonResourceURLs
|
||||
}
|
||||
}]
|
||||
}
|
||||
"role-binding": {
|
||||
apiVersion: "rbac.authorization.k8s.io/v1"
|
||||
kind: "RoleBinding"
|
||||
metadata: name: parameter.name
|
||||
roleRef: {
|
||||
apiGroup: "rbac.authorization.k8s.io"
|
||||
kind: "Role"
|
||||
name: parameter.name
|
||||
}
|
||||
subjects: [{
|
||||
kind: "ServiceAccount"
|
||||
name: parameter.name
|
||||
}]
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -82,6 +82,11 @@ spec:
|
||||
// +usage=The key of the config map to select from. Must be a valid secret key
|
||||
key: string
|
||||
}
|
||||
// +usage=Specify the field reference for env
|
||||
fieldRef?: {
|
||||
// +usage=Specify the field path for env
|
||||
fieldPath: string
|
||||
}
|
||||
}
|
||||
}]
|
||||
|
||||
|
||||
@@ -149,14 +149,14 @@ spec:
|
||||
// +usage=Specifies a source the value of this var should come from
|
||||
valueFrom?: {
|
||||
// +usage=Selects a key of a secret in the pod's namespace
|
||||
secretKeyRef: {
|
||||
secretKeyRef?: {
|
||||
// +usage=The name of the secret in the pod's namespace to select from
|
||||
name: string
|
||||
// +usage=The key of the secret to select from. Must be a valid secret key
|
||||
key: string
|
||||
}
|
||||
// +usage=Selects a key of a config map in the pod's namespace
|
||||
configMapKeyRef: {
|
||||
configMapKeyRef?: {
|
||||
// +usage=The name of the config map in the pod's namespace to select from
|
||||
name: string
|
||||
// +usage=The key of the config map to select from. Must be a valid secret key
|
||||
|
||||
@@ -125,6 +125,7 @@ metadata:
|
||||
name: {{ include "kubevela.fullname" . }}
|
||||
namespace: {{ .Release.Namespace }}
|
||||
labels:
|
||||
controller.oam.dev/name: vela-core
|
||||
{{- include "kubevela.labels" . | nindent 4 }}
|
||||
spec:
|
||||
replicas: {{ .Values.replicaCount }}
|
||||
|
||||
@@ -1058,21 +1058,22 @@ func Convert2SecName(name string) string {
|
||||
|
||||
// Installer helps addon enable, dependency-check, dispatch resources
|
||||
type Installer struct {
|
||||
ctx context.Context
|
||||
config *rest.Config
|
||||
addon *InstallPackage
|
||||
cli client.Client
|
||||
apply apply.Applicator
|
||||
r *Registry
|
||||
registryMeta map[string]SourceMeta
|
||||
args map[string]interface{}
|
||||
cache *Cache
|
||||
dc *discovery.DiscoveryClient
|
||||
ctx context.Context
|
||||
config *rest.Config
|
||||
addon *InstallPackage
|
||||
cli client.Client
|
||||
apply apply.Applicator
|
||||
r *Registry
|
||||
registryMeta map[string]SourceMeta
|
||||
args map[string]interface{}
|
||||
cache *Cache
|
||||
dc *discovery.DiscoveryClient
|
||||
skipVersionValidate bool
|
||||
}
|
||||
|
||||
// NewAddonInstaller will create an installer for addon
|
||||
func NewAddonInstaller(ctx context.Context, cli client.Client, discoveryClient *discovery.DiscoveryClient, apply apply.Applicator, config *rest.Config, r *Registry, args map[string]interface{}, cache *Cache) Installer {
|
||||
return Installer{
|
||||
func NewAddonInstaller(ctx context.Context, cli client.Client, discoveryClient *discovery.DiscoveryClient, apply apply.Applicator, config *rest.Config, r *Registry, args map[string]interface{}, cache *Cache, opts ...InstallOption) Installer {
|
||||
i := Installer{
|
||||
ctx: ctx,
|
||||
config: config,
|
||||
cli: cli,
|
||||
@@ -1082,14 +1083,21 @@ func NewAddonInstaller(ctx context.Context, cli client.Client, discoveryClient *
|
||||
cache: cache,
|
||||
dc: discoveryClient,
|
||||
}
|
||||
for _, opt := range opts {
|
||||
opt(&i)
|
||||
}
|
||||
return i
|
||||
}
|
||||
|
||||
func (h *Installer) enableAddon(addon *InstallPackage) error {
|
||||
var err error
|
||||
h.addon = addon
|
||||
err = checkAddonVersionMeetRequired(h.ctx, addon.SystemRequirements, h.cli, h.dc)
|
||||
if err != nil {
|
||||
return VersionUnMatchError{addonName: addon.Name, err: err}
|
||||
|
||||
if !h.skipVersionValidate {
|
||||
err = checkAddonVersionMeetRequired(h.ctx, addon.SystemRequirements, h.cli, h.dc)
|
||||
if err != nil {
|
||||
return VersionUnMatchError{addonName: addon.Name, err: err}
|
||||
}
|
||||
}
|
||||
|
||||
if err = h.installDependency(addon); err != nil {
|
||||
@@ -1445,10 +1453,23 @@ func checkSemVer(actual string, require string) (bool, error) {
|
||||
}
|
||||
|
||||
func fetchVelaCoreImageTag(ctx context.Context, k8sClient client.Client) (string, error) {
|
||||
deploy := &appsv1.Deployment{}
|
||||
if err := k8sClient.Get(ctx, types2.NamespacedName{Namespace: types.DefaultKubeVelaNS, Name: types.KubeVelaControllerDeployment}, deploy); err != nil {
|
||||
deployList := &appsv1.DeploymentList{}
|
||||
if err := k8sClient.List(ctx, deployList, client.MatchingLabels{oam.LabelControllerName: oam.ApplicationControllerName}); err != nil {
|
||||
return "", err
|
||||
}
|
||||
deploy := appsv1.Deployment{}
|
||||
if len(deployList.Items) == 0 {
|
||||
// backward compatible logic old version which vela-core controller has no this label
|
||||
if err := k8sClient.Get(ctx, types2.NamespacedName{Namespace: types.DefaultKubeVelaNS, Name: types.KubeVelaControllerDeployment}, &deploy); err != nil {
|
||||
if apierrors.IsNotFound(err) {
|
||||
return "", errors.New("can't find a running KubeVela instance, please install it first")
|
||||
}
|
||||
return "", err
|
||||
}
|
||||
} else {
|
||||
deploy = deployList.Items[0]
|
||||
}
|
||||
|
||||
var tag string
|
||||
for _, c := range deploy.Spec.Template.Spec.Containers {
|
||||
if c.Name == types.DefaultKubeVelaReleaseName {
|
||||
|
||||
@@ -196,7 +196,7 @@ var _ = Describe("Addon func test", func() {
|
||||
It("fetchVelaCoreImageTag func test", func() {
|
||||
deploy = appsv1.Deployment{}
|
||||
tag, err := fetchVelaCoreImageTag(ctx, k8sClient)
|
||||
Expect(err).Should(util.NotFoundMatcher{})
|
||||
Expect(err).ShouldNot(BeNil())
|
||||
Expect(tag).Should(BeEquivalentTo(""))
|
||||
|
||||
Expect(yaml.Unmarshal([]byte(deployYaml), &deploy)).Should(BeNil())
|
||||
@@ -217,7 +217,7 @@ var _ = Describe("Addon func test", func() {
|
||||
|
||||
It("checkAddonVersionMeetRequired func test", func() {
|
||||
deploy = appsv1.Deployment{}
|
||||
Expect(checkAddonVersionMeetRequired(ctx, &SystemRequirements{VelaVersion: ">=v1.2.1"}, k8sClient, dc)).Should(util.NotFoundMatcher{})
|
||||
Expect(checkAddonVersionMeetRequired(ctx, &SystemRequirements{VelaVersion: ">=v1.2.1"}, k8sClient, dc)).ShouldNot(BeNil())
|
||||
Expect(yaml.Unmarshal([]byte(deployYaml), &deploy)).Should(BeNil())
|
||||
deploy.SetNamespace(types.DefaultKubeVelaNS)
|
||||
Expect(k8sClient.Create(ctx, &deploy)).Should(BeNil())
|
||||
@@ -408,6 +408,8 @@ kind: Deployment
|
||||
metadata:
|
||||
name: kubevela-vela-core
|
||||
namespace: vela-system
|
||||
labels:
|
||||
controller.oam.dev/name: vela-core
|
||||
spec:
|
||||
progressDeadlineSeconds: 600
|
||||
replicas: 1
|
||||
|
||||
@@ -33,6 +33,7 @@ import (
|
||||
"github.com/crossplane/crossplane-runtime/pkg/test"
|
||||
"github.com/google/go-github/v32/github"
|
||||
"github.com/stretchr/testify/assert"
|
||||
appsv1 "k8s.io/api/apps/v1"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
kerrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
@@ -47,6 +48,7 @@ import (
|
||||
"github.com/oam-dev/kubevela/apis/core.oam.dev/common"
|
||||
"github.com/oam-dev/kubevela/apis/core.oam.dev/v1beta1"
|
||||
"github.com/oam-dev/kubevela/apis/types"
|
||||
"github.com/oam-dev/kubevela/pkg/oam"
|
||||
version2 "github.com/oam-dev/kubevela/version"
|
||||
)
|
||||
|
||||
@@ -791,6 +793,33 @@ func TestCheckAddonVersionMeetRequired(t *testing.T) {
|
||||
MockGet: test.NewMockGetFn(nil, func(obj client.Object) error {
|
||||
return nil
|
||||
}),
|
||||
MockList: test.NewMockListFn(nil, func(obj client.ObjectList) error {
|
||||
robj := obj.(*appsv1.DeploymentList)
|
||||
list := &appsv1.DeploymentList{
|
||||
Items: []appsv1.Deployment{
|
||||
{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Labels: map[string]string{
|
||||
oam.LabelControllerName: oam.ApplicationControllerName,
|
||||
},
|
||||
},
|
||||
Spec: appsv1.DeploymentSpec{
|
||||
Template: corev1.PodTemplateSpec{
|
||||
Spec: corev1.PodSpec{
|
||||
Containers: []corev1.Container{
|
||||
{
|
||||
Image: "vela-core:v1.2.5",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
list.DeepCopyInto(robj)
|
||||
return nil
|
||||
}),
|
||||
}
|
||||
ctx := context.Background()
|
||||
assert.NoError(t, checkAddonVersionMeetRequired(ctx, &SystemRequirements{VelaVersion: ">=1.2.4"}, k8sClient, nil))
|
||||
|
||||
BIN
pkg/addon/example-1.0.1.tgz
Normal file
BIN
pkg/addon/example-1.0.1.tgz
Normal file
Binary file not shown.
@@ -54,8 +54,8 @@ const (
|
||||
)
|
||||
|
||||
// EnableAddon will enable addon with dependency check, source is where addon from.
|
||||
func EnableAddon(ctx context.Context, name string, version string, cli client.Client, discoveryClient *discovery.DiscoveryClient, apply apply.Applicator, config *rest.Config, r Registry, args map[string]interface{}, cache *Cache) error {
|
||||
h := NewAddonInstaller(ctx, cli, discoveryClient, apply, config, &r, args, cache)
|
||||
func EnableAddon(ctx context.Context, name string, version string, cli client.Client, discoveryClient *discovery.DiscoveryClient, apply apply.Applicator, config *rest.Config, r Registry, args map[string]interface{}, cache *Cache, opts ...InstallOption) error {
|
||||
h := NewAddonInstaller(ctx, cli, discoveryClient, apply, config, &r, args, cache, opts...)
|
||||
pkg, err := h.loadInstallPackage(name, version)
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -93,7 +93,7 @@ func DisableAddon(ctx context.Context, cli client.Client, name string, config *r
|
||||
}
|
||||
|
||||
// EnableAddonByLocalDir enable an addon from local dir
|
||||
func EnableAddonByLocalDir(ctx context.Context, name string, dir string, cli client.Client, dc *discovery.DiscoveryClient, applicator apply.Applicator, config *rest.Config, args map[string]interface{}) error {
|
||||
func EnableAddonByLocalDir(ctx context.Context, name string, dir string, cli client.Client, dc *discovery.DiscoveryClient, applicator apply.Applicator, config *rest.Config, args map[string]interface{}, opts ...InstallOption) error {
|
||||
absDir, err := filepath.Abs(dir)
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -112,7 +112,7 @@ func EnableAddonByLocalDir(ctx context.Context, name string, dir string, cli cli
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
h := NewAddonInstaller(ctx, cli, dc, applicator, config, &Registry{Name: LocalAddonRegistryName}, args, nil)
|
||||
h := NewAddonInstaller(ctx, cli, dc, applicator, config, &Registry{Name: LocalAddonRegistryName}, args, nil, opts...)
|
||||
needEnableAddonNames, err := h.checkDependency(pkg)
|
||||
if err != nil {
|
||||
return err
|
||||
|
||||
@@ -228,3 +228,11 @@ func usingAppsInfo(apps []v1beta1.Application) string {
|
||||
func IsVersionRegistry(r Registry) bool {
|
||||
return r.Helm != nil
|
||||
}
|
||||
|
||||
// InstallOption define additional option for installation
|
||||
type InstallOption func(installer *Installer)
|
||||
|
||||
// SkipValidateVersion means skip validating system version
|
||||
func SkipValidateVersion(installer *Installer) {
|
||||
installer.skipVersionValidate = true
|
||||
}
|
||||
|
||||
@@ -18,6 +18,8 @@ package service
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/base64"
|
||||
"strings"
|
||||
|
||||
"k8s.io/client-go/rest"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
@@ -79,5 +81,9 @@ func (v *velaQLServiceImpl) QueryView(ctx context.Context, velaQL string) (*apis
|
||||
log.Logger.Errorf("decode the velaQL response to json failure %s", err.Error())
|
||||
return nil, bcode.ErrParseQuery2Json
|
||||
}
|
||||
if strings.Contains(velaQL, "collect-logs") {
|
||||
enc, _ := base64.StdEncoding.DecodeString(resp["logs"].(string))
|
||||
resp["logs"] = string(enc)
|
||||
}
|
||||
return &resp, err
|
||||
}
|
||||
|
||||
@@ -20,6 +20,7 @@ import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/emicklei/go-restful/v3"
|
||||
@@ -426,6 +427,11 @@ func (j *jfrogHandlerImpl) handle(ctx context.Context, webhookTrigger *model.App
|
||||
return nil, err
|
||||
}
|
||||
image := fmt.Sprintf("%s/%s:%s", jfrogReq.Data.RepoKey, jfrogReq.Data.ImageName, jfrogReq.Data.Tag)
|
||||
pathArray := strings.Split(jfrogReq.Data.Path, "/")
|
||||
if len(pathArray) > 2 {
|
||||
image = fmt.Sprintf("%s/%s:%s", jfrogReq.Data.RepoKey, strings.Join(pathArray[:len(pathArray)-2], "/"), jfrogReq.Data.Tag)
|
||||
}
|
||||
|
||||
if jfrogReq.Data.URL != "" {
|
||||
image = fmt.Sprintf("%s/%s", jfrogReq.Data.URL, image)
|
||||
}
|
||||
@@ -441,7 +447,7 @@ func (j *jfrogHandlerImpl) handle(ctx context.Context, webhookTrigger *model.App
|
||||
TriggerType: apisv1.TriggerTypeWebhook,
|
||||
Force: true,
|
||||
ImageInfo: &model.ImageInfo{
|
||||
Type: model.PayloadTypeHarbor,
|
||||
Type: model.PayloadTypeJFrog,
|
||||
Resource: &model.ImageResource{
|
||||
Digest: jfrogReq.Data.Digest,
|
||||
Tag: jfrogReq.Data.Tag,
|
||||
|
||||
@@ -322,7 +322,17 @@ func genClusterCountInfo(num int) string {
|
||||
return "<10"
|
||||
case num < 50:
|
||||
return "<50"
|
||||
case num < 100:
|
||||
return "<100"
|
||||
case num < 150:
|
||||
return "<150"
|
||||
case num < 200:
|
||||
return "<200"
|
||||
case num < 300:
|
||||
return "<300"
|
||||
case num < 500:
|
||||
return "<500"
|
||||
default:
|
||||
return ">=50"
|
||||
return ">=500"
|
||||
}
|
||||
}
|
||||
|
||||
@@ -211,8 +211,28 @@ func TestGenClusterCountInfo(t *testing.T) {
|
||||
res: "<50",
|
||||
},
|
||||
{
|
||||
count: 100,
|
||||
res: ">=50",
|
||||
count: 90,
|
||||
res: "<100",
|
||||
},
|
||||
{
|
||||
count: 137,
|
||||
res: "<150",
|
||||
},
|
||||
{
|
||||
count: 170,
|
||||
res: "<200",
|
||||
},
|
||||
{
|
||||
count: 270,
|
||||
res: "<300",
|
||||
},
|
||||
{
|
||||
count: 400,
|
||||
res: "<500",
|
||||
},
|
||||
{
|
||||
count: 520,
|
||||
res: ">=500",
|
||||
},
|
||||
}
|
||||
for _, testcase := range testcases {
|
||||
|
||||
@@ -97,21 +97,21 @@ func (wl *Workload) EvalContext(ctx process.Context) error {
|
||||
}
|
||||
|
||||
// EvalStatus eval workload status
|
||||
func (wl *Workload) EvalStatus(ctx process.Context, cli client.Client, ns string) (string, error) {
|
||||
func (wl *Workload) EvalStatus(ctx process.Context, cli client.Client, accessor util.NamespaceAccessor) (string, error) {
|
||||
// if the standard workload is managed by trait always return empty message
|
||||
if wl.SkipApplyWorkload {
|
||||
return "", nil
|
||||
}
|
||||
return wl.engine.Status(ctx, cli, ns, wl.FullTemplate.CustomStatus, wl.Params)
|
||||
return wl.engine.Status(ctx, cli, accessor, wl.FullTemplate.CustomStatus, wl.Params)
|
||||
}
|
||||
|
||||
// EvalHealth eval workload health check
|
||||
func (wl *Workload) EvalHealth(ctx process.Context, client client.Client, namespace string) (bool, error) {
|
||||
func (wl *Workload) EvalHealth(ctx process.Context, client client.Client, accessor util.NamespaceAccessor) (bool, error) {
|
||||
// if health of template is not set or standard workload is managed by trait always return true
|
||||
if wl.FullTemplate.Health == "" || wl.SkipApplyWorkload {
|
||||
return true, nil
|
||||
}
|
||||
return wl.engine.HealthCheck(ctx, client, namespace, wl.FullTemplate.Health)
|
||||
return wl.engine.HealthCheck(ctx, client, accessor, wl.FullTemplate.Health)
|
||||
}
|
||||
|
||||
// Scope defines the scope of workload
|
||||
@@ -145,16 +145,16 @@ func (trait *Trait) EvalContext(ctx process.Context) error {
|
||||
}
|
||||
|
||||
// EvalStatus eval trait status
|
||||
func (trait *Trait) EvalStatus(ctx process.Context, cli client.Client, ns string) (string, error) {
|
||||
return trait.engine.Status(ctx, cli, ns, trait.CustomStatusFormat, trait.Params)
|
||||
func (trait *Trait) EvalStatus(ctx process.Context, cli client.Client, accessor util.NamespaceAccessor) (string, error) {
|
||||
return trait.engine.Status(ctx, cli, accessor, trait.CustomStatusFormat, trait.Params)
|
||||
}
|
||||
|
||||
// EvalHealth eval trait health check
|
||||
func (trait *Trait) EvalHealth(ctx process.Context, client client.Client, namespace string) (bool, error) {
|
||||
func (trait *Trait) EvalHealth(ctx process.Context, client client.Client, accessor util.NamespaceAccessor) (bool, error) {
|
||||
if trait.FullTemplate.Health == "" {
|
||||
return true, nil
|
||||
}
|
||||
return trait.engine.HealthCheck(ctx, client, namespace, trait.HealthCheckPolicy)
|
||||
return trait.engine.HealthCheck(ctx, client, accessor, trait.HealthCheckPolicy)
|
||||
}
|
||||
|
||||
// Appfile describes application
|
||||
|
||||
@@ -47,6 +47,11 @@ func ContextWithUserInfo(ctx context.Context, app *v1beta1.Application) context.
|
||||
return request.WithUser(ctx, GetUserInfoInAnnotation(&app.ObjectMeta))
|
||||
}
|
||||
|
||||
// ContextClearUserInfo clear user info in context
|
||||
func ContextClearUserInfo(ctx context.Context) context.Context {
|
||||
return request.WithUser(ctx, nil)
|
||||
}
|
||||
|
||||
// SetUserInfoInAnnotation set username and group from userInfo into annotations
|
||||
// it will clear the existing service account annotation in avoid of permission leak
|
||||
func SetUserInfoInAnnotation(obj *metav1.ObjectMeta, userInfo authv1.UserInfo) {
|
||||
|
||||
@@ -51,7 +51,7 @@ func (c *HTTPCmd) Run(meta *registry.Meta) (res interface{}, err error) {
|
||||
var (
|
||||
r io.Reader
|
||||
client = &http.Client{
|
||||
Transport: &http.Transport{},
|
||||
Transport: http.DefaultTransport,
|
||||
Timeout: time.Second * 3,
|
||||
}
|
||||
)
|
||||
|
||||
@@ -38,6 +38,7 @@ import (
|
||||
"github.com/oam-dev/kubevela/pkg/monitor/metrics"
|
||||
"github.com/oam-dev/kubevela/pkg/multicluster"
|
||||
"github.com/oam-dev/kubevela/pkg/oam"
|
||||
"github.com/oam-dev/kubevela/pkg/oam/util"
|
||||
"github.com/oam-dev/kubevela/pkg/resourcekeeper"
|
||||
)
|
||||
|
||||
@@ -215,17 +216,14 @@ func (h *AppHandler) ProduceArtifacts(ctx context.Context, comps []*types.Compon
|
||||
|
||||
// nolint
|
||||
func (h *AppHandler) collectHealthStatus(ctx context.Context, wl *appfile.Workload, appRev *v1beta1.ApplicationRevision, overrideNamespace string) (*common.ApplicationComponentStatus, bool, error) {
|
||||
namespace := h.app.Namespace
|
||||
if overrideNamespace != "" {
|
||||
namespace = overrideNamespace
|
||||
}
|
||||
accessor := util.NewApplicationResourceNamespaceAccessor(h.app.Namespace, overrideNamespace)
|
||||
|
||||
var (
|
||||
status = common.ApplicationComponentStatus{
|
||||
Name: wl.Name,
|
||||
WorkloadDefinition: wl.FullTemplate.Reference.Definition,
|
||||
Healthy: true,
|
||||
Namespace: namespace,
|
||||
Namespace: accessor.Namespace(),
|
||||
Cluster: multicluster.ClusterNameInContext(ctx),
|
||||
}
|
||||
appName = appRev.Spec.Application.Name
|
||||
@@ -235,10 +233,10 @@ func (h *AppHandler) collectHealthStatus(ctx context.Context, wl *appfile.Worklo
|
||||
|
||||
if wl.CapabilityCategory == types.TerraformCategory {
|
||||
var configuration terraforv1beta2.Configuration
|
||||
if err := h.r.Client.Get(ctx, client.ObjectKey{Name: wl.Name, Namespace: namespace}, &configuration); err != nil {
|
||||
if err := h.r.Client.Get(ctx, client.ObjectKey{Name: wl.Name, Namespace: accessor.Namespace()}, &configuration); err != nil {
|
||||
if kerrors.IsNotFound(err) {
|
||||
var legacyConfiguration terraforv1beta1.Configuration
|
||||
if err := h.r.Client.Get(ctx, client.ObjectKey{Name: wl.Name, Namespace: namespace}, &legacyConfiguration); err != nil {
|
||||
if err := h.r.Client.Get(ctx, client.ObjectKey{Name: wl.Name, Namespace: accessor.Namespace()}, &legacyConfiguration); err != nil {
|
||||
return nil, false, errors.WithMessagef(err, "app=%s, comp=%s, check health error", appName, wl.Name)
|
||||
}
|
||||
isHealth = setStatus(&status, legacyConfiguration.Status.ObservedGeneration, legacyConfiguration.Generation,
|
||||
@@ -251,12 +249,12 @@ func (h *AppHandler) collectHealthStatus(ctx context.Context, wl *appfile.Worklo
|
||||
appRev.Name, configuration.Status.Apply.State, configuration.Status.Apply.Message)
|
||||
}
|
||||
} else {
|
||||
if ok, err := wl.EvalHealth(wl.Ctx, h.r.Client, namespace); !ok || err != nil {
|
||||
if ok, err := wl.EvalHealth(wl.Ctx, h.r.Client, accessor); !ok || err != nil {
|
||||
isHealth = false
|
||||
status.Healthy = false
|
||||
}
|
||||
|
||||
status.Message, err = wl.EvalStatus(wl.Ctx, h.r.Client, namespace)
|
||||
status.Message, err = wl.EvalStatus(wl.Ctx, h.r.Client, accessor)
|
||||
if err != nil {
|
||||
return nil, false, errors.WithMessagef(err, "app=%s, comp=%s, evaluate workload status message error", appName, wl.Name)
|
||||
}
|
||||
@@ -264,24 +262,25 @@ func (h *AppHandler) collectHealthStatus(ctx context.Context, wl *appfile.Worklo
|
||||
|
||||
var traitStatusList []common.ApplicationTraitStatus
|
||||
for _, tr := range wl.Traits {
|
||||
traitOverrideNamespace := overrideNamespace
|
||||
if tr.FullTemplate.TraitDefinition.Spec.ControlPlaneOnly {
|
||||
namespace = appRev.GetNamespace()
|
||||
traitOverrideNamespace = appRev.GetNamespace()
|
||||
wl.Ctx.SetCtx(context.WithValue(wl.Ctx.GetCtx(), multicluster.ClusterContextKey, multicluster.ClusterLocalName))
|
||||
}
|
||||
_accessor := util.NewApplicationResourceNamespaceAccessor(h.app.Namespace, traitOverrideNamespace)
|
||||
var traitStatus = common.ApplicationTraitStatus{
|
||||
Type: tr.Name,
|
||||
Healthy: true,
|
||||
}
|
||||
if ok, err := tr.EvalHealth(wl.Ctx, h.r.Client, namespace); !ok || err != nil {
|
||||
if ok, err := tr.EvalHealth(wl.Ctx, h.r.Client, _accessor); !ok || err != nil {
|
||||
isHealth = false
|
||||
traitStatus.Healthy = false
|
||||
}
|
||||
traitStatus.Message, err = tr.EvalStatus(wl.Ctx, h.r.Client, namespace)
|
||||
traitStatus.Message, err = tr.EvalStatus(wl.Ctx, h.r.Client, _accessor)
|
||||
if err != nil {
|
||||
return nil, false, errors.WithMessagef(err, "app=%s, comp=%s, trait=%s, evaluate status message error", appName, wl.Name, tr.Name)
|
||||
}
|
||||
traitStatusList = append(traitStatusList, traitStatus)
|
||||
namespace = appRev.GetNamespace()
|
||||
wl.Ctx.SetCtx(context.WithValue(wl.Ctx.GetCtx(), multicluster.ClusterContextKey, status.Cluster))
|
||||
}
|
||||
|
||||
@@ -305,12 +304,12 @@ func setStatus(status *common.ApplicationComponentStatus, observedGeneration, ge
|
||||
}
|
||||
return true
|
||||
}
|
||||
status.Message = message
|
||||
if !isLatest() || state != terraformtypes.Available {
|
||||
status.Healthy = false
|
||||
return false
|
||||
}
|
||||
status.Healthy = true
|
||||
status.Message = message
|
||||
return true
|
||||
}
|
||||
|
||||
|
||||
@@ -188,7 +188,7 @@ func convertStepProperties(step *v1beta1.WorkflowStep, app *v1beta1.Application)
|
||||
}
|
||||
|
||||
func checkDependsOnValidComponent(dependsOnComponentNames, allComponentNames []string) (string, bool) {
|
||||
// does not depends on other components
|
||||
// does not depend on other components
|
||||
if dependsOnComponentNames == nil {
|
||||
return "", true
|
||||
}
|
||||
|
||||
@@ -967,7 +967,7 @@ func (h historiesByComponentRevision) Less(i, j int) bool {
|
||||
|
||||
// UpdateApplicationRevisionStatus update application revision status
|
||||
func (h *AppHandler) UpdateApplicationRevisionStatus(ctx context.Context, appRev *v1beta1.ApplicationRevision, succeed bool, wfStatus *common.WorkflowStatus) {
|
||||
if appRev == nil {
|
||||
if appRev == nil || DisableAllApplicationRevision {
|
||||
return
|
||||
}
|
||||
appRev.Status.Succeeded = succeed
|
||||
|
||||
@@ -44,6 +44,7 @@ import (
|
||||
af "github.com/oam-dev/kubevela/pkg/appfile"
|
||||
"github.com/oam-dev/kubevela/pkg/cue/process"
|
||||
"github.com/oam-dev/kubevela/pkg/oam"
|
||||
"github.com/oam-dev/kubevela/pkg/oam/util"
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -478,7 +479,8 @@ func CUEBasedHealthCheck(ctx context.Context, c client.Client, wlRef WorkloadRef
|
||||
okToCheckTrait = true
|
||||
return
|
||||
}
|
||||
isHealthy, err := wl.EvalHealth(pCtx, c, ns)
|
||||
accessor := util.NewApplicationResourceNamespaceAccessor(ns, "")
|
||||
isHealthy, err := wl.EvalHealth(pCtx, c, accessor)
|
||||
if err != nil {
|
||||
wlHealth.HealthStatus = StatusUnhealthy
|
||||
wlHealth.Diagnosis = errors.Wrap(err, errHealthCheck).Error()
|
||||
@@ -490,7 +492,7 @@ func CUEBasedHealthCheck(ctx context.Context, c client.Client, wlRef WorkloadRef
|
||||
// TODO(wonderflow): we should add a custom way to let the template say why it's unhealthy, only a bool flag is not enough
|
||||
wlHealth.HealthStatus = StatusUnhealthy
|
||||
}
|
||||
wlHealth.CustomStatusMsg, err = wl.EvalStatus(pCtx, c, ns)
|
||||
wlHealth.CustomStatusMsg, err = wl.EvalStatus(pCtx, c, accessor)
|
||||
if err != nil {
|
||||
wlHealth.Diagnosis = errors.Wrap(err, errHealthCheck).Error()
|
||||
}
|
||||
@@ -522,7 +524,8 @@ func CUEBasedHealthCheck(ctx context.Context, c client.Client, wlRef WorkloadRef
|
||||
traits[i] = tHealth
|
||||
continue
|
||||
}
|
||||
isHealthy, err := tr.EvalHealth(pCtx, c, ns)
|
||||
accessor := util.NewApplicationResourceNamespaceAccessor("", ns)
|
||||
isHealthy, err := tr.EvalHealth(pCtx, c, accessor)
|
||||
if err != nil {
|
||||
tHealth.HealthStatus = StatusUnhealthy
|
||||
tHealth.Diagnosis = errors.Wrap(err, errHealthCheck).Error()
|
||||
@@ -535,7 +538,7 @@ func CUEBasedHealthCheck(ctx context.Context, c client.Client, wlRef WorkloadRef
|
||||
// TODO(wonderflow): we should add a custom way to let the template say why it's unhealthy, only a bool flag is not enough
|
||||
tHealth.HealthStatus = StatusUnhealthy
|
||||
}
|
||||
tHealth.CustomStatusMsg, err = tr.EvalStatus(pCtx, c, ns)
|
||||
tHealth.CustomStatusMsg, err = tr.EvalStatus(pCtx, c, accessor)
|
||||
if err != nil {
|
||||
tHealth.Diagnosis = errors.Wrap(err, errHealthCheck).Error()
|
||||
}
|
||||
|
||||
@@ -30,7 +30,7 @@ import (
|
||||
"cuelang.org/go/cue/build"
|
||||
"github.com/getkin/kin-openapi/openapi3"
|
||||
"github.com/pkg/errors"
|
||||
git "gopkg.in/src-d/go-git.v4"
|
||||
"gopkg.in/src-d/go-git.v4"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
@@ -215,25 +215,26 @@ func GetOpenAPISchemaFromTerraformComponentDefinition(configuration string) ([]b
|
||||
|
||||
// GetTerraformConfigurationFromRemote gets Terraform Configuration(HCL)
|
||||
func GetTerraformConfigurationFromRemote(name, remoteURL, remotePath string) (string, error) {
|
||||
tmpPath := filepath.Join("./tmp/terraform", name)
|
||||
// Check if the directory exists. If yes, remove it.
|
||||
if _, err := os.Stat(tmpPath); err == nil {
|
||||
err := os.RemoveAll(tmpPath)
|
||||
if err != nil {
|
||||
return "", errors.Wrap(err, "failed to remove the directory")
|
||||
}
|
||||
}
|
||||
_, err := git.PlainClone(tmpPath, false, &git.CloneOptions{
|
||||
URL: remoteURL,
|
||||
Progress: nil,
|
||||
})
|
||||
userHome, err := os.UserHomeDir()
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
cachePath := filepath.Join(userHome, ".vela", "terraform", name)
|
||||
// Check if the directory exists. If yes, remove it.
|
||||
entities, err := os.ReadDir(cachePath)
|
||||
if err != nil || len(entities) == 0 {
|
||||
fmt.Printf("loading terraform module %s into %s from %s\n", name, cachePath, remoteURL)
|
||||
if _, err = git.PlainClone(cachePath, false, &git.CloneOptions{
|
||||
URL: remoteURL,
|
||||
Progress: os.Stdout,
|
||||
}); err != nil {
|
||||
return "", err
|
||||
}
|
||||
}
|
||||
|
||||
tfPath := filepath.Join(tmpPath, remotePath, "variables.tf")
|
||||
tfPath := filepath.Join(cachePath, remotePath, "variables.tf")
|
||||
if _, err := os.Stat(tfPath); err != nil {
|
||||
tfPath = filepath.Join(tmpPath, remotePath, "main.tf")
|
||||
tfPath = filepath.Join(cachePath, remotePath, "main.tf")
|
||||
if _, err := os.Stat(tfPath); err != nil {
|
||||
return "", errors.Wrap(err, "failed to find main.tf or variables.tf in Terraform configurations of the remote repository")
|
||||
}
|
||||
@@ -242,10 +243,6 @@ func GetTerraformConfigurationFromRemote(name, remoteURL, remotePath string) (st
|
||||
if err != nil {
|
||||
return "", errors.Wrap(err, "failed to read Terraform configuration")
|
||||
}
|
||||
if err := os.RemoveAll(tmpPath); err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
return string(conf), nil
|
||||
}
|
||||
|
||||
|
||||
@@ -17,24 +17,16 @@
|
||||
package utils
|
||||
|
||||
import (
|
||||
"context"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
. "github.com/agiledragon/gomonkey/v2"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"gopkg.in/src-d/go-git.v4"
|
||||
"gotest.tools/assert"
|
||||
)
|
||||
|
||||
func TestGetTerraformConfigurationFromRemote(t *testing.T) {
|
||||
// If you hit a panic on macOS as below, please fix it by referencing https://github.com/eisenxp/macos-golink-wrapper.
|
||||
// panic: permission denied [recovered]
|
||||
// panic: permission denied
|
||||
type want struct {
|
||||
config string
|
||||
errMsg string
|
||||
@@ -46,8 +38,6 @@ func TestGetTerraformConfigurationFromRemote(t *testing.T) {
|
||||
path string
|
||||
data []byte
|
||||
variableFile string
|
||||
// mockWorkingPath will create `/tmp/terraform`
|
||||
mockWorkingPath bool
|
||||
}
|
||||
cases := map[string]struct {
|
||||
args args
|
||||
@@ -57,7 +47,7 @@ func TestGetTerraformConfigurationFromRemote(t *testing.T) {
|
||||
args: args{
|
||||
name: "valid",
|
||||
url: "https://github.com/kubevela-contrib/terraform-modules.git",
|
||||
path: "",
|
||||
path: "unittest/",
|
||||
data: []byte(`
|
||||
variable "aaa" {
|
||||
type = list(object({
|
||||
@@ -85,7 +75,7 @@ variable "aaa" {
|
||||
args: args{
|
||||
name: "aws-subnet",
|
||||
url: "https://github.com/kubevela-contrib/terraform-modules.git",
|
||||
path: "aws/subnet",
|
||||
path: "unittest/aws/subnet",
|
||||
data: []byte(`
|
||||
variable "aaa" {
|
||||
type = list(object({
|
||||
@@ -109,47 +99,20 @@ variable "aaa" {
|
||||
}`,
|
||||
},
|
||||
},
|
||||
"working path exists": {
|
||||
args: args{
|
||||
variableFile: "main.tf",
|
||||
mockWorkingPath: true,
|
||||
},
|
||||
want: want{
|
||||
errMsg: "failed to remove the directory",
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for name, tc := range cases {
|
||||
t.Run(name, func(t *testing.T) {
|
||||
if tc.args.mockWorkingPath {
|
||||
err := os.MkdirAll("./tmp/terraform", 0755)
|
||||
assert.NilError(t, err)
|
||||
defer os.RemoveAll("./tmp/terraform")
|
||||
patch1 := ApplyFunc(os.Remove, func(_ string) error {
|
||||
return errors.New("failed")
|
||||
})
|
||||
defer patch1.Reset()
|
||||
patch2 := ApplyFunc(os.Open, func(_ string) (*os.File, error) {
|
||||
return nil, errors.New("failed")
|
||||
})
|
||||
defer patch2.Reset()
|
||||
}
|
||||
|
||||
patch := ApplyFunc(git.PlainCloneContext, func(ctx context.Context, path string, isBare bool, o *git.CloneOptions) (*git.Repository, error) {
|
||||
var tmpPath string
|
||||
if tc.args.path != "" {
|
||||
tmpPath = filepath.Join("./tmp/terraform", tc.args.name, tc.args.path)
|
||||
} else {
|
||||
tmpPath = filepath.Join("./tmp/terraform", tc.args.name)
|
||||
}
|
||||
home, _ := os.UserHomeDir()
|
||||
path := filepath.Join(home, ".vela", "terraform")
|
||||
tmpPath := filepath.Join(path, tc.args.name, tc.args.path)
|
||||
if len(tc.args.data) > 0 {
|
||||
err := os.MkdirAll(tmpPath, os.ModePerm)
|
||||
assert.NilError(t, err)
|
||||
err = ioutil.WriteFile(filepath.Clean(filepath.Join(tmpPath, tc.args.variableFile)), tc.args.data, 0644)
|
||||
assert.NilError(t, err)
|
||||
return nil, nil
|
||||
})
|
||||
defer patch.Reset()
|
||||
}
|
||||
defer os.RemoveAll(tmpPath)
|
||||
|
||||
conf, err := GetTerraformConfigurationFromRemote(tc.args.name, tc.args.url, tc.args.path)
|
||||
if tc.want.errMsg != "" {
|
||||
@@ -62,8 +62,8 @@ const (
|
||||
// AbstractEngine defines Definition's Render interface
|
||||
type AbstractEngine interface {
|
||||
Complete(ctx process.Context, abstractTemplate string, params interface{}) error
|
||||
HealthCheck(ctx process.Context, cli client.Client, ns string, healthPolicyTemplate string) (bool, error)
|
||||
Status(ctx process.Context, cli client.Client, ns string, customStatusTemplate string, parameter interface{}) (string, error)
|
||||
HealthCheck(ctx process.Context, cli client.Client, accessor util.NamespaceAccessor, healthPolicyTemplate string) (bool, error)
|
||||
Status(ctx process.Context, cli client.Client, accessor util.NamespaceAccessor, customStatusTemplate string, parameter interface{}) (string, error)
|
||||
}
|
||||
|
||||
type def struct {
|
||||
@@ -151,7 +151,7 @@ func (wd *workloadDef) Complete(ctx process.Context, abstractTemplate string, pa
|
||||
return nil
|
||||
}
|
||||
|
||||
func (wd *workloadDef) getTemplateContext(ctx process.Context, cli client.Reader, ns string) (map[string]interface{}, error) {
|
||||
func (wd *workloadDef) getTemplateContext(ctx process.Context, cli client.Reader, accessor util.NamespaceAccessor) (map[string]interface{}, error) {
|
||||
|
||||
var root = initRoot(ctx.BaseContextLabels())
|
||||
var commonLabels = GetCommonLabels(ctx.BaseContextLabels())
|
||||
@@ -162,7 +162,7 @@ func (wd *workloadDef) getTemplateContext(ctx process.Context, cli client.Reader
|
||||
return nil, err
|
||||
}
|
||||
// workload main resource will have a unique label("app.oam.dev/resourceType"="WORKLOAD") in per component/app level
|
||||
object, err := getResourceFromObj(ctx.GetCtx(), componentWorkload, cli, ns, util.MergeMapOverrideWithDst(map[string]string{
|
||||
object, err := getResourceFromObj(ctx.GetCtx(), componentWorkload, cli, accessor.For(componentWorkload), util.MergeMapOverrideWithDst(map[string]string{
|
||||
oam.LabelOAMResourceType: oam.ResourceTypeWorkload,
|
||||
}, commonLabels), "")
|
||||
if err != nil {
|
||||
@@ -182,7 +182,7 @@ func (wd *workloadDef) getTemplateContext(ctx process.Context, cli client.Reader
|
||||
return nil, err
|
||||
}
|
||||
// AuxiliaryWorkload will have a unique label("trait.oam.dev/resource"="name of outputs") in per component/app level
|
||||
object, err := getResourceFromObj(ctx.GetCtx(), traitRef, cli, ns, util.MergeMapOverrideWithDst(map[string]string{
|
||||
object, err := getResourceFromObj(ctx.GetCtx(), traitRef, cli, accessor.For(componentWorkload), util.MergeMapOverrideWithDst(map[string]string{
|
||||
oam.TraitTypeLabel: AuxiliaryWorkload,
|
||||
}, commonLabels), assist.Name)
|
||||
if err != nil {
|
||||
@@ -197,11 +197,11 @@ func (wd *workloadDef) getTemplateContext(ctx process.Context, cli client.Reader
|
||||
}
|
||||
|
||||
// HealthCheck address health check for workload
|
||||
func (wd *workloadDef) HealthCheck(ctx process.Context, cli client.Client, ns string, healthPolicyTemplate string) (bool, error) {
|
||||
func (wd *workloadDef) HealthCheck(ctx process.Context, cli client.Client, accessor util.NamespaceAccessor, healthPolicyTemplate string) (bool, error) {
|
||||
if healthPolicyTemplate == "" {
|
||||
return true, nil
|
||||
}
|
||||
templateContext, err := wd.getTemplateContext(ctx, cli, ns)
|
||||
templateContext, err := wd.getTemplateContext(ctx, cli, accessor)
|
||||
if err != nil {
|
||||
return false, errors.WithMessage(err, "get template context")
|
||||
}
|
||||
@@ -228,11 +228,11 @@ func checkHealth(templateContext map[string]interface{}, healthPolicyTemplate st
|
||||
}
|
||||
|
||||
// Status get workload status by customStatusTemplate
|
||||
func (wd *workloadDef) Status(ctx process.Context, cli client.Client, ns string, customStatusTemplate string, parameter interface{}) (string, error) {
|
||||
func (wd *workloadDef) Status(ctx process.Context, cli client.Client, accessor util.NamespaceAccessor, customStatusTemplate string, parameter interface{}) (string, error) {
|
||||
if customStatusTemplate == "" {
|
||||
return "", nil
|
||||
}
|
||||
templateContext, err := wd.getTemplateContext(ctx, cli, ns)
|
||||
templateContext, err := wd.getTemplateContext(ctx, cli, accessor)
|
||||
if err != nil {
|
||||
return "", errors.WithMessage(err, "get template context")
|
||||
}
|
||||
@@ -417,7 +417,7 @@ func initRoot(contextLabels map[string]string) map[string]interface{} {
|
||||
return root
|
||||
}
|
||||
|
||||
func (td *traitDef) getTemplateContext(ctx process.Context, cli client.Reader, ns string) (map[string]interface{}, error) {
|
||||
func (td *traitDef) getTemplateContext(ctx process.Context, cli client.Reader, accessor util.NamespaceAccessor) (map[string]interface{}, error) {
|
||||
var root = initRoot(ctx.BaseContextLabels())
|
||||
var commonLabels = GetCommonLabels(ctx.BaseContextLabels())
|
||||
|
||||
@@ -431,7 +431,7 @@ func (td *traitDef) getTemplateContext(ctx process.Context, cli client.Reader, n
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
object, err := getResourceFromObj(ctx.GetCtx(), traitRef, cli, ns, util.MergeMapOverrideWithDst(map[string]string{
|
||||
object, err := getResourceFromObj(ctx.GetCtx(), traitRef, cli, accessor.For(traitRef), util.MergeMapOverrideWithDst(map[string]string{
|
||||
oam.TraitTypeLabel: assist.Type,
|
||||
}, commonLabels), assist.Name)
|
||||
if err != nil {
|
||||
@@ -446,11 +446,11 @@ func (td *traitDef) getTemplateContext(ctx process.Context, cli client.Reader, n
|
||||
}
|
||||
|
||||
// Status get trait status by customStatusTemplate
|
||||
func (td *traitDef) Status(ctx process.Context, cli client.Client, ns string, customStatusTemplate string, parameter interface{}) (string, error) {
|
||||
func (td *traitDef) Status(ctx process.Context, cli client.Client, accessor util.NamespaceAccessor, customStatusTemplate string, parameter interface{}) (string, error) {
|
||||
if customStatusTemplate == "" {
|
||||
return "", nil
|
||||
}
|
||||
templateContext, err := td.getTemplateContext(ctx, cli, ns)
|
||||
templateContext, err := td.getTemplateContext(ctx, cli, accessor)
|
||||
if err != nil {
|
||||
return "", errors.WithMessage(err, "get template context")
|
||||
}
|
||||
@@ -458,11 +458,11 @@ func (td *traitDef) Status(ctx process.Context, cli client.Client, ns string, cu
|
||||
}
|
||||
|
||||
// HealthCheck address health check for trait
|
||||
func (td *traitDef) HealthCheck(ctx process.Context, cli client.Client, ns string, healthPolicyTemplate string) (bool, error) {
|
||||
func (td *traitDef) HealthCheck(ctx process.Context, cli client.Client, accessor util.NamespaceAccessor, healthPolicyTemplate string) (bool, error) {
|
||||
if healthPolicyTemplate == "" {
|
||||
return true, nil
|
||||
}
|
||||
templateContext, err := td.getTemplateContext(ctx, cli, ns)
|
||||
templateContext, err := td.getTemplateContext(ctx, cli, accessor)
|
||||
if err != nil {
|
||||
return false, errors.WithMessage(err, "get template context")
|
||||
}
|
||||
|
||||
@@ -51,7 +51,7 @@ const (
|
||||
ContextComponents = "components"
|
||||
// ContextComponentType is the component type of current trait binding with
|
||||
ContextComponentType = "componentType"
|
||||
// ComponentRevisionPlaceHolder is the component revision name placeHolder, this field will be replace with real value
|
||||
// ComponentRevisionPlaceHolder is the component revision name placeHolder, this field will be replaced with real value
|
||||
// after component be created
|
||||
ComponentRevisionPlaceHolder = "KUBEVELA_COMPONENT_REVISION_PLACEHOLDER"
|
||||
// ContextDataArtifacts is used to store unstructured resources of components
|
||||
|
||||
@@ -358,7 +358,11 @@ func jsonMergePatch(base cue.Value, patch cue.Value) (string, error) {
|
||||
if err != nil {
|
||||
return "", errors.Wrapf(err, "failed to merge base value and patch value by JsonMergePatch")
|
||||
}
|
||||
return string(merged), nil
|
||||
output, err := OpenBaiscLit(string(merged))
|
||||
if err != nil {
|
||||
return "", errors.Wrapf(err, "failed to parse open basic lit for merged result")
|
||||
}
|
||||
return output, nil
|
||||
}
|
||||
|
||||
func jsonPatch(base cue.Value, patch cue.Value) (string, error) {
|
||||
@@ -379,5 +383,9 @@ func jsonPatch(base cue.Value, patch cue.Value) (string, error) {
|
||||
if err != nil {
|
||||
return "", errors.Wrapf(err, "failed to apply json patch")
|
||||
}
|
||||
return string(merged), nil
|
||||
output, err := OpenBaiscLit(string(merged))
|
||||
if err != nil {
|
||||
return "", errors.Wrapf(err, "failed to parse open basic lit for merged result")
|
||||
}
|
||||
return output, nil
|
||||
}
|
||||
|
||||
@@ -718,10 +718,10 @@ func TestImports(t *testing.T) {
|
||||
context: stepSessionID: "3w9qkdgn5w"`
|
||||
v, err := NewValue(`
|
||||
import (
|
||||
"vela/op"
|
||||
"vela/custom"
|
||||
)
|
||||
|
||||
id: op.context.stepSessionID
|
||||
id: custom.context.stepSessionID
|
||||
|
||||
`+cont, nil, cont)
|
||||
assert.NilError(t, err)
|
||||
|
||||
@@ -98,6 +98,9 @@ const (
|
||||
LabelProject = "core.oam.dev/project"
|
||||
|
||||
LabelResourceRules = "rules.oam.dev/resources"
|
||||
|
||||
// LabelControllerName indicates the controller name
|
||||
LabelControllerName = "controller.oam.dev/name"
|
||||
)
|
||||
|
||||
const (
|
||||
|
||||
@@ -953,3 +953,38 @@ func AsController(r *corev1.ObjectReference) metav1.OwnerReference {
|
||||
ref.Controller = &c
|
||||
return ref
|
||||
}
|
||||
|
||||
// NamespaceAccessor namespace accessor for resource
|
||||
type NamespaceAccessor interface {
|
||||
For(obj client.Object) string
|
||||
Namespace() string
|
||||
}
|
||||
|
||||
type applicationResourceNamespaceAccessor struct {
|
||||
applicationNamespace string
|
||||
overrideNamespace string
|
||||
}
|
||||
|
||||
// For access namespace for resource
|
||||
func (accessor *applicationResourceNamespaceAccessor) For(obj client.Object) string {
|
||||
if accessor.overrideNamespace != "" {
|
||||
return accessor.overrideNamespace
|
||||
}
|
||||
if originalNamespace := obj.GetNamespace(); originalNamespace != "" {
|
||||
return originalNamespace
|
||||
}
|
||||
return accessor.applicationNamespace
|
||||
}
|
||||
|
||||
// Namespace the namespace by default
|
||||
func (accessor *applicationResourceNamespaceAccessor) Namespace() string {
|
||||
if accessor.overrideNamespace != "" {
|
||||
return accessor.overrideNamespace
|
||||
}
|
||||
return accessor.applicationNamespace
|
||||
}
|
||||
|
||||
// NewApplicationResourceNamespaceAccessor create namespace accessor for resource in application
|
||||
func NewApplicationResourceNamespaceAccessor(appNs, overrideNs string) NamespaceAccessor {
|
||||
return &applicationResourceNamespaceAccessor{applicationNamespace: appNs, overrideNamespace: overrideNs}
|
||||
}
|
||||
|
||||
@@ -16,7 +16,10 @@ limitations under the License.
|
||||
|
||||
package oam
|
||||
|
||||
// SystemDefinitonNamespace golbal value for controller and webhook systemlevel namespace
|
||||
var (
|
||||
// SystemDefinitonNamespace golbal value for controller and webhook systemlevel namespace
|
||||
SystemDefinitonNamespace string = "vela-system"
|
||||
|
||||
// ApplicationControllerName means the controller is application
|
||||
ApplicationControllerName string = "vela-core"
|
||||
)
|
||||
|
||||
@@ -105,6 +105,7 @@ func (h *resourceKeeper) record(ctx context.Context, manifests []*unstructured.U
|
||||
}
|
||||
|
||||
cfg := newDispatchConfig(options...)
|
||||
ctx = auth.ContextClearUserInfo(ctx)
|
||||
if len(rootManifests) != 0 {
|
||||
rt, err := h.getRootRT(ctx)
|
||||
if err != nil {
|
||||
|
||||
@@ -85,10 +85,7 @@ func (options *ResourceTreePrintOptions) loadResourceRows(currentRT *v1beta1.Res
|
||||
if mr.Deleted {
|
||||
continue
|
||||
}
|
||||
rows = append(rows, &resourceRow{
|
||||
mr: mr.DeepCopy(),
|
||||
status: resourceRowStatusUpdated,
|
||||
})
|
||||
rows = append(rows, buildResourceRow(mr, resourceRowStatusUpdated))
|
||||
}
|
||||
}
|
||||
for _, rt := range historyRT {
|
||||
@@ -100,10 +97,7 @@ func (options *ResourceTreePrintOptions) loadResourceRows(currentRT *v1beta1.Res
|
||||
}
|
||||
}
|
||||
if matchedRow == nil {
|
||||
rows = append(rows, &resourceRow{
|
||||
mr: mr.DeepCopy(),
|
||||
status: resourceRowStatusOutdated,
|
||||
})
|
||||
rows = append(rows, buildResourceRow(mr, resourceRowStatusOutdated))
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -410,3 +404,14 @@ func RetrieveKubeCtlGetMessageGenerator(cfg *rest.Config) (ResourceDetailRetriev
|
||||
return nil
|
||||
}, nil
|
||||
}
|
||||
|
||||
func buildResourceRow(mr v1beta1.ManagedResource, resourceStatus string) *resourceRow {
|
||||
rr := &resourceRow{
|
||||
mr: mr.DeepCopy(),
|
||||
status: resourceStatus,
|
||||
}
|
||||
if rr.mr.Cluster == "" {
|
||||
rr.mr.Cluster = multicluster.ClusterLocalName
|
||||
}
|
||||
return rr
|
||||
}
|
||||
|
||||
@@ -22,6 +22,10 @@ import (
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
"k8s.io/utils/pointer"
|
||||
|
||||
"github.com/oam-dev/kubevela/apis/core.oam.dev/common"
|
||||
"github.com/oam-dev/kubevela/apis/core.oam.dev/v1beta1"
|
||||
"github.com/oam-dev/kubevela/pkg/multicluster"
|
||||
)
|
||||
|
||||
func TestResourceTreePrintOption_getWidthForDetails(t *testing.T) {
|
||||
@@ -46,3 +50,39 @@ func TestResourceTreePrintOptions_wrapDetails(t *testing.T) {
|
||||
},
|
||||
options._wrapDetails(detail, 40))
|
||||
}
|
||||
|
||||
func TestBuildResourceRow(t *testing.T) {
|
||||
r := require.New(t)
|
||||
|
||||
cases := map[string]struct {
|
||||
Cluster string
|
||||
ResourceRowStatus string
|
||||
ExpectedCluster string
|
||||
ExpectedResourceRowStatus string
|
||||
}{
|
||||
"localCluster": {
|
||||
Cluster: "",
|
||||
ResourceRowStatus: resourceRowStatusUpdated,
|
||||
ExpectedCluster: multicluster.ClusterLocalName,
|
||||
ExpectedResourceRowStatus: resourceRowStatusUpdated,
|
||||
},
|
||||
"remoteCluster": {
|
||||
Cluster: "remoteCluster",
|
||||
ResourceRowStatus: resourceRowStatusUpdated,
|
||||
ExpectedCluster: "remoteCluster",
|
||||
ExpectedResourceRowStatus: resourceRowStatusUpdated,
|
||||
},
|
||||
}
|
||||
|
||||
for name, c := range cases {
|
||||
mr := v1beta1.ManagedResource{
|
||||
ClusterObjectReference: common.ClusterObjectReference{
|
||||
Cluster: c.Cluster,
|
||||
},
|
||||
}
|
||||
rr := buildResourceRow(mr, c.ResourceRowStatus)
|
||||
r.Equal(c.ExpectedCluster, rr.mr.Cluster, name)
|
||||
r.Equal(c.ExpectedResourceRowStatus, rr.status, name)
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
@@ -19,20 +19,32 @@ package stdlib
|
||||
import (
|
||||
"embed"
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
|
||||
"cuelang.org/go/cue/build"
|
||||
"k8s.io/klog/v2"
|
||||
)
|
||||
|
||||
func init() {
|
||||
var err error
|
||||
BuiltinImports, err = initBuiltinImports()
|
||||
if err != nil {
|
||||
klog.ErrorS(err, "Unable to init builtin imports")
|
||||
os.Exit(1)
|
||||
}
|
||||
}
|
||||
|
||||
var (
|
||||
//go:embed pkgs op.cue ql.cue
|
||||
fs embed.FS
|
||||
// BuiltinImports is the builtin imports for cue
|
||||
BuiltinImports []*build.Instance
|
||||
)
|
||||
|
||||
// GetPackages Get Stdlib packages
|
||||
func GetPackages(tagTempl string) (map[string]string, error) {
|
||||
|
||||
func GetPackages() (map[string]string, error) {
|
||||
files, err := fs.ReadDir("pkgs")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -63,16 +75,32 @@ func GetPackages(tagTempl string) (map[string]string, error) {
|
||||
}
|
||||
|
||||
return map[string]string{
|
||||
"vela/op": opContent + "\n" + tagTempl,
|
||||
"vela/ql": qlContent + "\n" + tagTempl,
|
||||
"vela/op": opContent,
|
||||
"vela/ql": qlContent,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// AddImportsFor install imports for build.Instance.
|
||||
func AddImportsFor(inst *build.Instance, tagTempl string) error {
|
||||
pkgs, err := GetPackages(tagTempl)
|
||||
inst.Imports = append(inst.Imports, BuiltinImports...)
|
||||
if tagTempl != "" {
|
||||
p := &build.Instance{
|
||||
PkgName: filepath.Base("vela/custom"),
|
||||
ImportPath: "vela/custom",
|
||||
}
|
||||
if err := p.AddFile("-", tagTempl); err != nil {
|
||||
return err
|
||||
}
|
||||
inst.Imports = append(inst.Imports, p)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func initBuiltinImports() ([]*build.Instance, error) {
|
||||
imports := make([]*build.Instance, 0)
|
||||
pkgs, err := GetPackages()
|
||||
if err != nil {
|
||||
return err
|
||||
return nil, err
|
||||
}
|
||||
for path, content := range pkgs {
|
||||
p := &build.Instance{
|
||||
@@ -80,9 +108,9 @@ func AddImportsFor(inst *build.Instance, tagTempl string) error {
|
||||
ImportPath: path,
|
||||
}
|
||||
if err := p.AddFile("-", content); err != nil {
|
||||
return err
|
||||
return nil, err
|
||||
}
|
||||
inst.Imports = append(inst.Imports, p)
|
||||
imports = append(imports, p)
|
||||
}
|
||||
return nil
|
||||
return imports, nil
|
||||
}
|
||||
|
||||
@@ -26,7 +26,7 @@ import (
|
||||
)
|
||||
|
||||
func TestGetPackages(t *testing.T) {
|
||||
pkgs, err := GetPackages("context: _")
|
||||
pkgs, err := GetPackages()
|
||||
assert.NilError(t, err)
|
||||
var r cue.Runtime
|
||||
for path, content := range pkgs {
|
||||
@@ -36,8 +36,8 @@ func TestGetPackages(t *testing.T) {
|
||||
|
||||
builder := &build.Instance{}
|
||||
builder.AddFile("-", `
|
||||
import "vela/op"
|
||||
out: op.context`)
|
||||
import "vela/custom"
|
||||
out: custom.context`)
|
||||
err = AddImportsFor(builder, "context: id: \"xxx\"")
|
||||
assert.NilError(t, err)
|
||||
|
||||
|
||||
@@ -86,9 +86,9 @@
|
||||
limitBytes: *null | int
|
||||
}
|
||||
outputs?: {
|
||||
logs: string
|
||||
err?: string
|
||||
info: {
|
||||
logs?: string
|
||||
err?: string
|
||||
info?: {
|
||||
fromDate: string
|
||||
toDate: string
|
||||
}
|
||||
|
||||
@@ -35,7 +35,6 @@ import (
|
||||
)
|
||||
|
||||
const (
|
||||
errAuthenticateProvider = "failed to authenticate Terraform cloud provider %s for %s"
|
||||
errProviderExists = "terraform provider %s for %s already exists"
|
||||
errDeleteProvider = "failed to delete Terraform Provider %s err: %w"
|
||||
errCouldNotDeleteProvider = "the Terraform Provider %s could not be disabled because it was created by enabling a Terraform provider or was manually created"
|
||||
@@ -54,7 +53,7 @@ func CreateApplication(ctx context.Context, k8sClient client.Client, name, compo
|
||||
if strings.HasPrefix(componentType, types.TerraformComponentPrefix) {
|
||||
existed, err := IsTerraformProviderExisted(ctx, k8sClient, name)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, errAuthenticateProvider, name, componentType)
|
||||
return errors.Wrapf(err, errCheckProviderExistence, name)
|
||||
}
|
||||
if existed {
|
||||
return fmt.Errorf(errProviderExists, name, componentType)
|
||||
|
||||
@@ -22,6 +22,9 @@ import (
|
||||
"strings"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
|
||||
"github.com/oam-dev/kubevela/pkg/cue/model/value"
|
||||
"github.com/oam-dev/kubevela/references/common"
|
||||
)
|
||||
|
||||
// QueryView contains query data
|
||||
@@ -40,6 +43,8 @@ const (
|
||||
KeyWordView = "view"
|
||||
// KeyWordParameter represent parameter keyword
|
||||
KeyWordParameter = "parameter"
|
||||
// KeyWordTemplate represents template keyword
|
||||
KeyWordTemplate = "template"
|
||||
// KeyWordExport represent export keyword
|
||||
KeyWordExport = "export"
|
||||
// DefaultExportValue is the default Export value
|
||||
@@ -91,6 +96,36 @@ func ParseVelaQL(ql string) (QueryView, error) {
|
||||
return qv, nil
|
||||
}
|
||||
|
||||
// ParseVelaQLFromPath will parse a velaQL file path to QueryView
|
||||
func ParseVelaQLFromPath(velaQLViewPath string) (*QueryView, error) {
|
||||
body, err := common.ReadRemoteOrLocalPath(velaQLViewPath)
|
||||
if err != nil {
|
||||
return nil, errors.Errorf("read view file from %s: %v", velaQLViewPath, err)
|
||||
}
|
||||
|
||||
val, err := value.NewValue(string(body), nil, "")
|
||||
if err != nil {
|
||||
return nil, errors.Errorf("new value for view: %v", err)
|
||||
}
|
||||
|
||||
var expStr string
|
||||
exp, err := val.LookupValue(KeyWordExport)
|
||||
if err == nil {
|
||||
expStr, err = exp.String()
|
||||
if err != nil {
|
||||
expStr = DefaultExportValue
|
||||
}
|
||||
} else {
|
||||
expStr = DefaultExportValue
|
||||
}
|
||||
|
||||
return &QueryView{
|
||||
View: string(body),
|
||||
Parameter: nil,
|
||||
Export: strings.Trim(strings.TrimSpace(expStr), `"`),
|
||||
}, nil
|
||||
}
|
||||
|
||||
// ParseParameter parse parameter to map[string]interface{}
|
||||
func ParseParameter(parameter string) (map[string]interface{}, error) {
|
||||
parameter = strings.TrimLeft(parameter, "{")
|
||||
|
||||
@@ -18,12 +18,12 @@ package query
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/base64"
|
||||
"fmt"
|
||||
"io"
|
||||
"regexp"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
@@ -397,14 +397,6 @@ func (h *provider) GeneratorServiceEndpoints(wfctx wfContext.Context, v *value.V
|
||||
return fillQueryResult(v, serviceEndpoints, "list")
|
||||
}
|
||||
|
||||
var (
|
||||
terminatedContainerNotFoundRegex = regexp.MustCompile("previous terminated container .+ in pod .+ not found")
|
||||
)
|
||||
|
||||
func isTerminatedContainerNotFound(err error) bool {
|
||||
return err != nil && terminatedContainerNotFoundRegex.MatchString(err.Error())
|
||||
}
|
||||
|
||||
func (h *provider) CollectLogsInPod(ctx wfContext.Context, v *value.Value, act types.Action) error {
|
||||
cluster, err := v.GetString("cluster")
|
||||
if err != nil {
|
||||
@@ -429,27 +421,29 @@ func (h *provider) CollectLogsInPod(ctx wfContext.Context, v *value.Value, act t
|
||||
cliCtx := multicluster.ContextWithClusterName(context.Background(), cluster)
|
||||
clientSet, err := kubernetes.NewForConfig(h.cfg)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "failed to create kubernetes clientset")
|
||||
return errors.Wrapf(err, "failed to create kubernetes client")
|
||||
}
|
||||
var defaultOutputs = make(map[string]interface{})
|
||||
var errMsg string
|
||||
podInst, err := clientSet.CoreV1().Pods(namespace).Get(cliCtx, pod, v1.GetOptions{})
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "failed to get pod")
|
||||
errMsg = fmt.Sprintf("failed to get pod:%s", err.Error())
|
||||
}
|
||||
req := clientSet.CoreV1().Pods(namespace).GetLogs(pod, opts)
|
||||
readCloser, err := req.Stream(cliCtx)
|
||||
if err != nil && !isTerminatedContainerNotFound(err) {
|
||||
return errors.Wrapf(err, "failed to get stream logs")
|
||||
if err != nil {
|
||||
errMsg = fmt.Sprintf("failed to get stream logs %s", err.Error())
|
||||
}
|
||||
r := bufio.NewReader(readCloser)
|
||||
var b strings.Builder
|
||||
var readErr error
|
||||
if err == nil {
|
||||
if readCloser != nil && podInst != nil {
|
||||
r := bufio.NewReader(readCloser)
|
||||
buffer := bytes.NewBuffer(nil)
|
||||
var readErr error
|
||||
defer func() {
|
||||
_ = readCloser.Close()
|
||||
}()
|
||||
for {
|
||||
s, err := r.ReadString('\n')
|
||||
b.WriteString(s)
|
||||
buffer.WriteString(s)
|
||||
if err != nil {
|
||||
if !errors.Is(err, io.EOF) {
|
||||
readErr = err
|
||||
@@ -457,30 +451,34 @@ func (h *provider) CollectLogsInPod(ctx wfContext.Context, v *value.Value, act t
|
||||
break
|
||||
}
|
||||
}
|
||||
} else {
|
||||
readErr = err
|
||||
toDate := v1.Now()
|
||||
var fromDate v1.Time
|
||||
// nolint
|
||||
if opts.SinceTime != nil {
|
||||
fromDate = *opts.SinceTime
|
||||
} else if opts.SinceSeconds != nil {
|
||||
fromDate = v1.NewTime(toDate.Add(time.Duration(-(*opts.SinceSeconds) * int64(time.Second))))
|
||||
} else {
|
||||
fromDate = podInst.CreationTimestamp
|
||||
}
|
||||
// the cue string can not support the special characters
|
||||
logs := base64.StdEncoding.EncodeToString(buffer.Bytes())
|
||||
defaultOutputs = map[string]interface{}{
|
||||
"logs": logs,
|
||||
"info": map[string]interface{}{
|
||||
"fromDate": fromDate,
|
||||
"toDate": toDate,
|
||||
},
|
||||
}
|
||||
if readErr != nil {
|
||||
errMsg = readErr.Error()
|
||||
}
|
||||
}
|
||||
toDate := v1.Now()
|
||||
var fromDate v1.Time
|
||||
// nolint
|
||||
if opts.SinceTime != nil {
|
||||
fromDate = *opts.SinceTime
|
||||
} else if opts.SinceSeconds != nil {
|
||||
fromDate = v1.NewTime(toDate.Add(time.Duration(-(*opts.SinceSeconds) * int64(time.Second))))
|
||||
} else {
|
||||
fromDate = podInst.CreationTimestamp
|
||||
if errMsg != "" {
|
||||
klog.Warningf(errMsg)
|
||||
defaultOutputs["err"] = errMsg
|
||||
}
|
||||
o := map[string]interface{}{
|
||||
"logs": b.String(),
|
||||
"info": map[string]interface{}{
|
||||
"fromDate": fromDate,
|
||||
"toDate": toDate,
|
||||
},
|
||||
}
|
||||
if readErr != nil {
|
||||
o["err"] = readErr.Error()
|
||||
}
|
||||
return v.FillObject(o, "outputs")
|
||||
return v.FillObject(defaultOutputs, "outputs")
|
||||
}
|
||||
|
||||
// Install register handlers to provider discover.
|
||||
|
||||
@@ -20,6 +20,7 @@ import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||
@@ -37,6 +38,7 @@ import (
|
||||
"github.com/oam-dev/kubevela/pkg/utils"
|
||||
"github.com/oam-dev/kubevela/pkg/utils/apply"
|
||||
"github.com/oam-dev/kubevela/pkg/workflow/tasks"
|
||||
"github.com/oam-dev/kubevela/pkg/workflow/tasks/template"
|
||||
wfTypes "github.com/oam-dev/kubevela/pkg/workflow/types"
|
||||
)
|
||||
|
||||
@@ -73,7 +75,7 @@ func (handler *ViewHandler) QueryView(ctx context.Context, qv QueryView) (*value
|
||||
outputsTemplate := fmt.Sprintf(OutputsTemplate, qv.Export, qv.Export)
|
||||
queryKey := QueryParameterKey{}
|
||||
if err := json.Unmarshal([]byte(outputsTemplate), &queryKey); err != nil {
|
||||
return nil, err
|
||||
return nil, errors.Errorf("unmarhsal query template: %v", err)
|
||||
}
|
||||
|
||||
handler.viewTask = v1beta1.WorkflowStep{
|
||||
@@ -84,7 +86,12 @@ func (handler *ViewHandler) QueryView(ctx context.Context, qv QueryView) (*value
|
||||
}
|
||||
|
||||
pCtx := process.NewContext(process.ContextData{})
|
||||
taskDiscover := tasks.NewViewTaskDiscover(handler.pd, handler.cli, handler.cfg, handler.dispatch, handler.delete, handler.namespace, 3, pCtx)
|
||||
loader := template.NewViewTemplateLoader(handler.cli, handler.namespace)
|
||||
if len(strings.Split(qv.View, "\n")) > 2 {
|
||||
loader = &template.EchoLoader{}
|
||||
}
|
||||
|
||||
taskDiscover := tasks.NewViewTaskDiscover(handler.pd, handler.cli, handler.cfg, handler.dispatch, handler.delete, handler.namespace, 3, pCtx, loader)
|
||||
genTask, err := taskDiscover.GetTaskGenerator(ctx, handler.viewTask.Type)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -97,14 +104,14 @@ func (handler *ViewHandler) QueryView(ctx context.Context, qv QueryView) (*value
|
||||
|
||||
viewCtx, err := NewViewContext()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return nil, errors.Errorf("new view context: %v", err)
|
||||
}
|
||||
status, _, err := runner.Run(viewCtx, &wfTypes.TaskRunOptions{})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return nil, errors.Errorf("run query view: %v", err)
|
||||
}
|
||||
if string(status.Phase) != ViewTaskPhaseSucceeded {
|
||||
return nil, errors.Errorf("failed to query the view %s", status.Message)
|
||||
return nil, errors.Errorf("failed to query the view: %s", status.Message)
|
||||
}
|
||||
return viewCtx.GetVar(qv.Export)
|
||||
}
|
||||
|
||||
@@ -265,7 +265,7 @@ func (t *TaskLoader) makeValue(ctx wfContext.Context, templ string, id string, p
|
||||
}
|
||||
contextTempl += "\n" + pCtx.ExtendedContextFile()
|
||||
|
||||
return value.NewValue(templ+contextTempl, t.pd, contextTempl, value.ProcessScript, value.TagFieldOrder)
|
||||
return value.NewValue(templ+contextTempl, t.pd, "", value.ProcessScript, value.TagFieldOrder)
|
||||
}
|
||||
|
||||
type executor struct {
|
||||
|
||||
@@ -272,7 +272,7 @@ func (tr *stepGroupTaskRunner) Run(ctx wfContext.Context, options *types.TaskRun
|
||||
}
|
||||
|
||||
// NewViewTaskDiscover will create a client for load task generator.
|
||||
func NewViewTaskDiscover(pd *packages.PackageDiscover, cli client.Client, cfg *rest.Config, apply kube.Dispatcher, delete kube.Deleter, viewNs string, logLevel int, pCtx process.Context) types.TaskDiscover {
|
||||
func NewViewTaskDiscover(pd *packages.PackageDiscover, cli client.Client, cfg *rest.Config, apply kube.Dispatcher, delete kube.Deleter, viewNs string, logLevel int, pCtx process.Context, loader template.Loader) types.TaskDiscover {
|
||||
handlerProviders := providers.NewProviders()
|
||||
|
||||
// install builtin provider
|
||||
@@ -282,10 +282,9 @@ func NewViewTaskDiscover(pd *packages.PackageDiscover, cli client.Client, cfg *r
|
||||
http.Install(handlerProviders, cli, viewNs)
|
||||
email.Install(handlerProviders)
|
||||
|
||||
templateLoader := template.NewViewTemplateLoader(cli, viewNs)
|
||||
return &taskDiscover{
|
||||
remoteTaskDiscover: custom.NewTaskLoader(templateLoader.LoadTaskTemplate, pd, handlerProviders, logLevel, pCtx),
|
||||
templateLoader: templateLoader,
|
||||
remoteTaskDiscover: custom.NewTaskLoader(loader.LoadTaskTemplate, pd, handlerProviders, logLevel, pCtx),
|
||||
templateLoader: loader,
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -119,3 +119,12 @@ func NewViewTemplateLoader(client client.Client, namespace string) Loader {
|
||||
namespace: namespace,
|
||||
}
|
||||
}
|
||||
|
||||
// EchoLoader will load data from input as it is.
|
||||
type EchoLoader struct {
|
||||
}
|
||||
|
||||
// LoadTaskTemplate gets the echo content exactly what it is .
|
||||
func (ll *EchoLoader) LoadTaskTemplate(_ context.Context, content string) (string, error) {
|
||||
return content, nil
|
||||
}
|
||||
|
||||
@@ -76,6 +76,8 @@ var addonClusters string
|
||||
|
||||
var verboseSatatus bool
|
||||
|
||||
var skipValidate bool
|
||||
|
||||
// NewAddonCommand create `addon` command
|
||||
func NewAddonCommand(c common.Args, order string, ioStreams cmdutil.IOStreams) *cobra.Command {
|
||||
cmd := &cobra.Command{
|
||||
@@ -189,6 +191,7 @@ Enable addon for specific clusters, (local means control plane):
|
||||
|
||||
cmd.Flags().StringVarP(&addonVersion, "version", "v", "", "specify the addon version to enable")
|
||||
cmd.Flags().StringVarP(&addonClusters, types.ClustersArg, "c", "", "specify the runtime-clusters to enable")
|
||||
cmd.Flags().BoolVarP(&skipValidate, "skip-version-validating", "s", false, "skip validating system version requirement")
|
||||
return cmd
|
||||
}
|
||||
|
||||
@@ -285,6 +288,7 @@ Upgrade addon for specific clusters, (local means control plane):
|
||||
},
|
||||
}
|
||||
cmd.Flags().StringVarP(&addonVersion, "version", "v", "", "specify the addon version to upgrade")
|
||||
cmd.Flags().BoolVarP(&skipValidate, "skip-version-validating", "s", false, "skip validating system version requirement")
|
||||
return cmd
|
||||
}
|
||||
|
||||
@@ -362,7 +366,11 @@ func enableAddon(ctx context.Context, k8sClient client.Client, dc *discovery.Dis
|
||||
}
|
||||
|
||||
for _, registry := range registries {
|
||||
err = pkgaddon.EnableAddon(ctx, name, version, k8sClient, dc, apply.NewAPIApplicator(k8sClient), config, registry, args, nil)
|
||||
var opts []pkgaddon.InstallOption
|
||||
if skipValidate {
|
||||
opts = append(opts, pkgaddon.SkipValidateVersion)
|
||||
}
|
||||
err = pkgaddon.EnableAddon(ctx, name, version, k8sClient, dc, apply.NewAPIApplicator(k8sClient), config, registry, args, nil, opts...)
|
||||
if errors.Is(err, pkgaddon.ErrNotExist) {
|
||||
continue
|
||||
}
|
||||
@@ -382,7 +390,11 @@ func enableAddon(ctx context.Context, k8sClient client.Client, dc *discovery.Dis
|
||||
|
||||
// enableAddonByLocal enable addon in local dir and return the addon name
|
||||
func enableAddonByLocal(ctx context.Context, name string, dir string, k8sClient client.Client, dc *discovery.DiscoveryClient, config *rest.Config, args map[string]interface{}) error {
|
||||
if err := pkgaddon.EnableAddonByLocalDir(ctx, name, dir, k8sClient, dc, apply.NewAPIApplicator(k8sClient), config, args); err != nil {
|
||||
var opts []pkgaddon.InstallOption
|
||||
if skipValidate {
|
||||
opts = append(opts, pkgaddon.SkipValidateVersion)
|
||||
}
|
||||
if err := pkgaddon.EnableAddonByLocalDir(ctx, name, dir, k8sClient, dc, apply.NewAPIApplicator(k8sClient), config, args, opts...); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := waitApplicationRunning(k8sClient, name); err != nil {
|
||||
|
||||
@@ -166,9 +166,12 @@ func prepareProviderAddSubCommand(c common.Args, ioStreams cmdutil.IOStreams) ([
|
||||
return nil, err
|
||||
}
|
||||
for _, p := range parameters {
|
||||
// TODO(wonderflow): make the provider default name to be unique but keep the compatiblility as some Application didn't specify the name,
|
||||
// now it's “default” for every one, the name will conflict if we have more than one cloud provider.
|
||||
cmd.Flags().String(p.Name, fmt.Sprint(p.Default), p.Usage)
|
||||
}
|
||||
cmd.RunE = func(cmd *cobra.Command, args []string) error {
|
||||
newContext := context.Background()
|
||||
name, err := cmd.Flags().GetString(providerNameParam)
|
||||
if err != nil || name == "" {
|
||||
return fmt.Errorf("must specify a name for the Terraform Cloud Provider %s", providerType)
|
||||
@@ -188,8 +191,7 @@ func prepareProviderAddSubCommand(c common.Args, ioStreams cmdutil.IOStreams) ([
|
||||
if err != nil {
|
||||
return fmt.Errorf(errAuthenticateProvider, providerType, err)
|
||||
}
|
||||
|
||||
if err := config.CreateApplication(ctx, k8sClient, name, providerType, string(data), config.UIParam{}); err != nil {
|
||||
if err := config.CreateApplication(newContext, k8sClient, name, providerType, string(data), config.UIParam{}); err != nil {
|
||||
return fmt.Errorf(errAuthenticateProvider, providerType, err)
|
||||
}
|
||||
ioStreams.Infof("Successfully authenticate provider %s for %s\n", name, providerType)
|
||||
|
||||
@@ -100,12 +100,6 @@ func startReferenceDocsSite(ctx context.Context, ns string, c common.Args, ioStr
|
||||
}
|
||||
referenceHome := filepath.Join(home, "reference")
|
||||
|
||||
definitionPath := filepath.Join(referenceHome, "capabilities")
|
||||
if _, err := os.Stat(definitionPath); err != nil && os.IsNotExist(err) {
|
||||
if err := os.MkdirAll(definitionPath, 0750); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
docsPath := filepath.Join(referenceHome, "docs")
|
||||
if _, err := os.Stat(docsPath); err != nil && os.IsNotExist(err) {
|
||||
if err := os.MkdirAll(docsPath, 0750); err != nil {
|
||||
@@ -171,22 +165,12 @@ func startReferenceDocsSite(ctx context.Context, ns string, c common.Args, ioStr
|
||||
return err
|
||||
}
|
||||
|
||||
var capabilityPath string
|
||||
switch capabilityType {
|
||||
case types.TypeWorkload:
|
||||
capabilityPath = plugins.WorkloadTypePath
|
||||
case types.TypeTrait:
|
||||
capabilityPath = plugins.TraitPath
|
||||
case types.TypeScope:
|
||||
case types.TypeComponentDefinition:
|
||||
capabilityPath = plugins.ComponentDefinitionTypePath
|
||||
case types.TypeWorkflowStep:
|
||||
capabilityPath = plugins.WorkflowStepPath
|
||||
default:
|
||||
if capabilityType != types.TypeWorkload && capabilityType != types.TypeTrait && capabilityType != types.TypeScope &&
|
||||
capabilityType != types.TypeComponentDefinition && capabilityType != types.TypeWorkflowStep {
|
||||
return fmt.Errorf("unsupported type: %v", capabilityType)
|
||||
}
|
||||
|
||||
url := fmt.Sprintf("http://127.0.0.1%s/#/%s/%s", Port, capabilityPath, capabilityName)
|
||||
url := fmt.Sprintf("http://127.0.0.1%s/#/%s/%s", Port, capabilityType, capabilityName)
|
||||
server := &http.Server{
|
||||
Addr: Port,
|
||||
Handler: http.FileServer(http.Dir(docsPath)),
|
||||
@@ -227,7 +211,7 @@ func launch(server *http.Server, errChan chan<- error) {
|
||||
|
||||
func generateSideBar(capabilities []types.Capability, docsPath string) error {
|
||||
sideBar := filepath.Join(docsPath, SideBar)
|
||||
components, traits, workflowsteps := getDefinitions(capabilities)
|
||||
components, traits, workflowSteps, policies := getDefinitions(capabilities)
|
||||
f, err := os.Create(sideBar)
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -235,8 +219,9 @@ func generateSideBar(capabilities []types.Capability, docsPath string) error {
|
||||
if _, err := f.WriteString("- Components Types\n"); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for _, c := range components {
|
||||
if _, err := f.WriteString(fmt.Sprintf(" - [%s](%s/%s.md)\n", c, plugins.ComponentDefinitionTypePath, c)); err != nil {
|
||||
if _, err := f.WriteString(fmt.Sprintf(" - [%s](%s/%s.md)\n", c, types.TypeComponentDefinition, c)); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
@@ -244,15 +229,24 @@ func generateSideBar(capabilities []types.Capability, docsPath string) error {
|
||||
return err
|
||||
}
|
||||
for _, t := range traits {
|
||||
if _, err := f.WriteString(fmt.Sprintf(" - [%s](%s/%s.md)\n", t, plugins.TraitPath, t)); err != nil {
|
||||
if _, err := f.WriteString(fmt.Sprintf(" - [%s](%s/%s.md)\n", t, types.TypeTrait, t)); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
if _, err := f.WriteString("- Workflow Steps\n"); err != nil {
|
||||
return err
|
||||
}
|
||||
for _, t := range workflowsteps {
|
||||
if _, err := f.WriteString(fmt.Sprintf(" - [%s](%s/%s.md)\n", t, plugins.WorkflowStepPath, t)); err != nil {
|
||||
for _, t := range workflowSteps {
|
||||
if _, err := f.WriteString(fmt.Sprintf(" - [%s](%s/%s.md)\n", t, types.TypeWorkflowStep, t)); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if _, err := f.WriteString("- Policies\n"); err != nil {
|
||||
return err
|
||||
}
|
||||
for _, t := range policies {
|
||||
if _, err := f.WriteString(fmt.Sprintf(" - [%s](%s/%s.md)\n", t, types.TypePolicy, t)); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
@@ -327,14 +321,14 @@ func generateREADME(capabilities []types.Capability, docsPath string) error {
|
||||
return err
|
||||
}
|
||||
|
||||
workloads, traits, workflowsteps := getDefinitions(capabilities)
|
||||
workloads, traits, workflowSteps, policies := getDefinitions(capabilities)
|
||||
|
||||
if _, err := f.WriteString("## Component Types\n"); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for _, w := range workloads {
|
||||
if _, err := f.WriteString(fmt.Sprintf(" - [%s](%s/%s.md)\n", w, plugins.ComponentDefinitionTypePath, w)); err != nil {
|
||||
if _, err := f.WriteString(fmt.Sprintf(" - [%s](%s/%s.md)\n", w, types.TypeComponentDefinition, w)); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
@@ -343,7 +337,7 @@ func generateREADME(capabilities []types.Capability, docsPath string) error {
|
||||
}
|
||||
|
||||
for _, t := range traits {
|
||||
if _, err := f.WriteString(fmt.Sprintf(" - [%s](%s/%s.md)\n", t, plugins.TraitPath, t)); err != nil {
|
||||
if _, err := f.WriteString(fmt.Sprintf(" - [%s](%s/%s.md)\n", t, types.TypeTrait, t)); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
@@ -351,16 +345,26 @@ func generateREADME(capabilities []types.Capability, docsPath string) error {
|
||||
if _, err := f.WriteString("## Workflow Steps\n"); err != nil {
|
||||
return err
|
||||
}
|
||||
for _, t := range workflowsteps {
|
||||
if _, err := f.WriteString(fmt.Sprintf(" - [%s](%s/%s.md)\n", t, plugins.WorkflowStepPath, t)); err != nil {
|
||||
for _, t := range workflowSteps {
|
||||
if _, err := f.WriteString(fmt.Sprintf(" - [%s](%s/%s.md)\n", t, types.TypeWorkflowStep, t)); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if _, err := f.WriteString("## Policies\n"); err != nil {
|
||||
return err
|
||||
}
|
||||
for _, t := range policies {
|
||||
if _, err := f.WriteString(fmt.Sprintf(" - [%s](%s/%s.md)\n", t, types.TypePolicy, t)); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func getDefinitions(capabilities []types.Capability) ([]string, []string, []string) {
|
||||
var components, traits, workflowSteps []string
|
||||
func getDefinitions(capabilities []types.Capability) ([]string, []string, []string, []string) {
|
||||
var components, traits, workflowSteps, policies []string
|
||||
for _, c := range capabilities {
|
||||
switch c.Type {
|
||||
case types.TypeComponentDefinition:
|
||||
@@ -369,12 +373,14 @@ func getDefinitions(capabilities []types.Capability) ([]string, []string, []stri
|
||||
traits = append(traits, c.Name)
|
||||
case types.TypeWorkflowStep:
|
||||
workflowSteps = append(workflowSteps, c.Name)
|
||||
case types.TypePolicy:
|
||||
policies = append(policies, c.Name)
|
||||
case types.TypeScope:
|
||||
case types.TypeWorkload:
|
||||
default:
|
||||
}
|
||||
}
|
||||
return components, traits, workflowSteps
|
||||
return components, traits, workflowSteps, policies
|
||||
}
|
||||
|
||||
// ShowReferenceConsole will show capability reference in console
|
||||
|
||||
@@ -27,7 +27,6 @@ import (
|
||||
"github.com/stretchr/testify/assert"
|
||||
|
||||
"github.com/oam-dev/kubevela/apis/types"
|
||||
"github.com/oam-dev/kubevela/references/plugins"
|
||||
)
|
||||
|
||||
const BaseDir = "testdata"
|
||||
@@ -134,9 +133,9 @@ func TestGenerateREADME(t *testing.T) {
|
||||
for _, c := range tc.capabilities {
|
||||
switch c.Type {
|
||||
case types.TypeComponentDefinition:
|
||||
assert.Contains(t, string(data), fmt.Sprintf(" - [%s](%s/%s.md)\n", c.Name, plugins.ComponentDefinitionTypePath, c.Name))
|
||||
assert.Contains(t, string(data), fmt.Sprintf(" - [%s](%s/%s.md)\n", c.Name, types.TypeComponentDefinition, c.Name))
|
||||
case types.TypeTrait:
|
||||
assert.Contains(t, string(data), fmt.Sprintf(" - [%s](%s/%s.md)\n", c.Name, plugins.TraitPath, c.Name))
|
||||
assert.Contains(t, string(data), fmt.Sprintf(" - [%s](%s/%s.md)\n", c.Name, types.TypeTrait, c.Name))
|
||||
}
|
||||
}
|
||||
})
|
||||
@@ -147,10 +146,15 @@ func TestGetWorkloadAndTraits(t *testing.T) {
|
||||
type want struct {
|
||||
workloads []string
|
||||
traits []string
|
||||
policies []string
|
||||
}
|
||||
workloadName := "component1"
|
||||
traitName := "trait1"
|
||||
scopeName := "scope1"
|
||||
|
||||
var (
|
||||
workloadName = "component1"
|
||||
traitName = "trait1"
|
||||
scopeName = "scope1"
|
||||
policyName = "policy1"
|
||||
)
|
||||
|
||||
cases := map[string]struct {
|
||||
reason string
|
||||
@@ -187,11 +191,22 @@ func TestGetWorkloadAndTraits(t *testing.T) {
|
||||
traits: nil,
|
||||
},
|
||||
},
|
||||
"PolicyTypeCapability": {
|
||||
capabilities: []types.Capability{
|
||||
{
|
||||
Name: policyName,
|
||||
Type: types.TypePolicy,
|
||||
},
|
||||
},
|
||||
want: want{
|
||||
policies: []string{policyName},
|
||||
},
|
||||
},
|
||||
}
|
||||
for name, tc := range cases {
|
||||
t.Run(name, func(t *testing.T) {
|
||||
gotWorkloads, gotTraits, _ := getDefinitions(tc.capabilities)
|
||||
assert.Equal(t, tc.want, want{workloads: gotWorkloads, traits: gotTraits})
|
||||
gotWorkloads, gotTraits, _, gotPolicies := getDefinitions(tc.capabilities)
|
||||
assert.Equal(t, tc.want, want{workloads: gotWorkloads, traits: gotTraits, policies: gotPolicies})
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
@@ -159,7 +159,6 @@ func printAppStatus(_ context.Context, c client.Client, ioStreams cmdutil.IOStre
|
||||
if err := printWorkflowStatus(c, ioStreams, appName, namespace); err != nil {
|
||||
return err
|
||||
}
|
||||
cmd.Printf("Services:\n\n")
|
||||
return loopCheckStatus(c, ioStreams, appName, namespace)
|
||||
}
|
||||
|
||||
@@ -237,6 +236,9 @@ func loopCheckStatus(c client.Client, ioStreams cmdutil.IOStreams, appName strin
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if len(remoteApp.Status.Services) > 0 {
|
||||
ioStreams.Infof("Services:\n\n")
|
||||
}
|
||||
for _, comp := range remoteApp.Status.Services {
|
||||
compName := comp.Name
|
||||
envStat := ""
|
||||
|
||||
@@ -21,6 +21,7 @@ import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
@@ -42,60 +43,116 @@ type Filter struct {
|
||||
|
||||
// NewQlCommand creates `ql` command for executing velaQL
|
||||
func NewQlCommand(c common.Args, order string, ioStreams util.IOStreams) *cobra.Command {
|
||||
var cueFile, querySts string
|
||||
ctx := context.Background()
|
||||
cmd := &cobra.Command{
|
||||
Use: "ql",
|
||||
Short: "Show result of executing velaQL.",
|
||||
Long: "Show result of executing velaQL.",
|
||||
Example: `vela ql "view{parameter=value1,parameter=value2}"`,
|
||||
Use: "ql",
|
||||
Short: "Show result of executing velaQL.",
|
||||
Long: `Show result of executing velaQL, use it like:
|
||||
vela ql --query "<inner-view-name>{<param1>=<value1>,<param2>=<value2>}
|
||||
vela ql --file ./ql.cue
|
||||
`,
|
||||
Example: `Users can query with a query statement:
|
||||
vela ql --query "<inner-view-name>{<param1>=<value1>,<param2>=<value2>}"
|
||||
They can also query by a ql file:
|
||||
vela ql --file ./ql.cue
|
||||
|
||||
Example content of ql.cue:
|
||||
---
|
||||
import (
|
||||
"vela/ql"
|
||||
)
|
||||
configmap: ql.#Read & {
|
||||
value: {
|
||||
kind: "ConfigMap"
|
||||
apiVersion: "v1"
|
||||
metadata: {
|
||||
name: "mycm"
|
||||
}
|
||||
}
|
||||
}
|
||||
status: configmap.value.data.key
|
||||
|
||||
export: "status"
|
||||
---
|
||||
`,
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
argsLength := len(args)
|
||||
if argsLength == 0 {
|
||||
return fmt.Errorf("please specify an VelaQL statement")
|
||||
if cueFile == "" && querySts == "" && len(args) == 0 {
|
||||
return fmt.Errorf("please specify at least on VelaQL statement or velaql file path")
|
||||
}
|
||||
velaQL := args[0]
|
||||
newClient, err := c.GetClient()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return printVelaQLResult(ctx, newClient, c, velaQL, cmd)
|
||||
|
||||
if cueFile != "" {
|
||||
return queryFromView(ctx, newClient, c, cueFile, cmd)
|
||||
}
|
||||
if querySts == "" {
|
||||
// for compatibility
|
||||
querySts = args[0]
|
||||
}
|
||||
return queryFromStatement(ctx, newClient, c, querySts, cmd)
|
||||
},
|
||||
Annotations: map[string]string{
|
||||
types.TagCommandOrder: order,
|
||||
types.TagCommandType: types.TypeApp,
|
||||
},
|
||||
}
|
||||
cmd.Flags().StringVarP(&cueFile, "file", "f", "", "The CUE file path for VelaQL, it could be a remote url.")
|
||||
cmd.Flags().StringVarP(&querySts, "query", "q", "", "The query statement for VelaQL.")
|
||||
cmd.SetOut(ioStreams.Out)
|
||||
return cmd
|
||||
}
|
||||
|
||||
// printVelaQLResult show velaQL result
|
||||
func printVelaQLResult(ctx context.Context, client client.Client, velaC common.Args, velaQL string, cmd *cobra.Command) error {
|
||||
queryValue, err := QueryValue(ctx, client, velaC, velaQL)
|
||||
// queryFromStatement print velaQL result from query statement with inner query view
|
||||
func queryFromStatement(ctx context.Context, client client.Client, velaC common.Args, velaQLStatement string, cmd *cobra.Command) error {
|
||||
queryView, err := velaql.ParseVelaQL(velaQLStatement)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
queryValue, err := QueryValue(ctx, client, velaC, &queryView)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return print(queryValue, cmd)
|
||||
}
|
||||
|
||||
// queryFromView print velaQL result from query view
|
||||
func queryFromView(ctx context.Context, client client.Client, velaC common.Args, velaQLViewPath string, cmd *cobra.Command) error {
|
||||
queryView, err := velaql.ParseVelaQLFromPath(velaQLViewPath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
queryValue, err := QueryValue(ctx, client, velaC, queryView)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return print(queryValue, cmd)
|
||||
}
|
||||
|
||||
func print(queryValue *value.Value, cmd *cobra.Command) error {
|
||||
response, err := queryValue.CueValue().MarshalJSON()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
var out bytes.Buffer
|
||||
err = json.Indent(&out, response, "", " ")
|
||||
err = json.Indent(&out, response, "", " ")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
cmd.Printf("%s\n", out.String())
|
||||
cmd.Println(strings.Trim(strings.TrimSpace(out.String()), "\""))
|
||||
return nil
|
||||
}
|
||||
|
||||
// MakeVelaQL build velaQL
|
||||
func MakeVelaQL(view string, params map[string]string, action string) string {
|
||||
var paramString string
|
||||
for key, value := range params {
|
||||
for k, v := range params {
|
||||
if paramString != "" {
|
||||
paramString = fmt.Sprintf("%s, %s=%s", paramString, key, value)
|
||||
paramString = fmt.Sprintf("%s, %s=%s", paramString, k, v)
|
||||
} else {
|
||||
paramString = fmt.Sprintf("%s=%s", key, value)
|
||||
paramString = fmt.Sprintf("%s=%s", k, v)
|
||||
}
|
||||
}
|
||||
return fmt.Sprintf("%s{%s}.%s", view, paramString, action)
|
||||
@@ -116,7 +173,11 @@ func GetServiceEndpoints(ctx context.Context, client client.Client, appName stri
|
||||
}
|
||||
|
||||
velaQL := MakeVelaQL("service-endpoints-view", params, "status")
|
||||
queryValue, err := QueryValue(ctx, client, velaC, velaQL)
|
||||
queryView, err := velaql.ParseVelaQL(velaQL)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
queryValue, err := QueryValue(ctx, client, velaC, &queryView)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -134,7 +195,7 @@ func GetServiceEndpoints(ctx context.Context, client client.Client, appName stri
|
||||
}
|
||||
|
||||
// QueryValue get queryValue from velaQL
|
||||
func QueryValue(ctx context.Context, client client.Client, velaC common.Args, velaQL string) (*value.Value, error) {
|
||||
func QueryValue(ctx context.Context, client client.Client, velaC common.Args, queryView *velaql.QueryView) (*value.Value, error) {
|
||||
dm, err := velaC.GetDiscoveryMapper()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -143,15 +204,11 @@ func QueryValue(ctx context.Context, client client.Client, velaC common.Args, ve
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
queryView, err := velaql.ParseVelaQL(velaQL)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
config, err := velaC.GetConfig()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
queryValue, err := velaql.NewViewHandler(client, config, dm, pd).QueryView(ctx, queryView)
|
||||
queryValue, err := velaql.NewViewHandler(client, config, dm, pd).QueryView(ctx, *queryView)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@@ -17,10 +17,14 @@ limitations under the License.
|
||||
package cli
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
. "github.com/onsi/ginkgo"
|
||||
. "github.com/onsi/gomega"
|
||||
@@ -39,6 +43,43 @@ import (
|
||||
common2 "github.com/oam-dev/kubevela/pkg/utils/common"
|
||||
)
|
||||
|
||||
var _ = Describe("Test velaQL from file", func() {
|
||||
It("Test Query pod data", func() {
|
||||
cm := &corev1.ConfigMap{Data: map[string]string{"key": "my-value"}}
|
||||
cm.Name = "mycm"
|
||||
cm.Namespace = "default"
|
||||
Expect(k8sClient.Create(context.TODO(), cm)).Should(BeNil())
|
||||
view := `import (
|
||||
"vela/ql"
|
||||
)
|
||||
configmap: ql.#Read & {
|
||||
value: {
|
||||
kind: "ConfigMap"
|
||||
apiVersion: "v1"
|
||||
metadata: {
|
||||
name: "mycm"
|
||||
}
|
||||
}
|
||||
}
|
||||
status: configmap.value.data.key
|
||||
|
||||
export: "status"
|
||||
`
|
||||
name := "vela-test-" + strconv.FormatInt(time.Now().UnixNano(), 10) + ".cue"
|
||||
Expect(os.WriteFile(name, []byte(view), 0644)).Should(BeNil())
|
||||
defer os.Remove(name)
|
||||
|
||||
arg := common2.Args{}
|
||||
arg.SetConfig(cfg)
|
||||
arg.SetClient(k8sClient)
|
||||
cmd := NewCommand()
|
||||
var buff = bytes.NewBufferString("")
|
||||
cmd.SetOut(buff)
|
||||
Expect(queryFromView(context.TODO(), k8sClient, arg, name, cmd)).Should(BeNil())
|
||||
Expect(strings.TrimSpace(buff.String())).Should(BeEquivalentTo("my-value"))
|
||||
})
|
||||
})
|
||||
|
||||
var _ = Describe("Test velaQL", func() {
|
||||
var appName = "test-velaql"
|
||||
var namespace = "default"
|
||||
|
||||
@@ -73,20 +73,38 @@ func GetNamespacedCapabilitiesFromCluster(ctx context.Context, namespace string,
|
||||
capabilities = append(capabilities, traits...)
|
||||
}
|
||||
|
||||
// get components from default namespace
|
||||
if workloads, _, err := GetComponentsFromClusterWithValidateOption(ctx, types.DefaultKubeVelaNS, c, selector, false); err == nil {
|
||||
capabilities = append(capabilities, workloads...)
|
||||
if workflowSteps, _, err := GetWorkflowSteps(ctx, namespace, c); err == nil {
|
||||
capabilities = append(capabilities, workflowSteps...)
|
||||
}
|
||||
|
||||
// get traits from default namespace
|
||||
if traits, _, err := GetTraitsFromClusterWithValidateOption(ctx, types.DefaultKubeVelaNS, c, selector, false); err == nil {
|
||||
capabilities = append(capabilities, traits...)
|
||||
if policies, _, err := GetPolicies(ctx, namespace, c); err == nil {
|
||||
capabilities = append(capabilities, policies...)
|
||||
}
|
||||
|
||||
if namespace != types.DefaultKubeVelaNS {
|
||||
// get components from default namespace
|
||||
if workloads, _, err := GetComponentsFromClusterWithValidateOption(ctx, types.DefaultKubeVelaNS, c, selector, false); err == nil {
|
||||
capabilities = append(capabilities, workloads...)
|
||||
}
|
||||
|
||||
// get traits from default namespace
|
||||
if traits, _, err := GetTraitsFromClusterWithValidateOption(ctx, types.DefaultKubeVelaNS, c, selector, false); err == nil {
|
||||
capabilities = append(capabilities, traits...)
|
||||
}
|
||||
|
||||
if workflowSteps, _, err := GetWorkflowSteps(ctx, types.DefaultKubeVelaNS, c); err == nil {
|
||||
capabilities = append(capabilities, workflowSteps...)
|
||||
}
|
||||
|
||||
if policies, _, err := GetPolicies(ctx, types.DefaultKubeVelaNS, c); err == nil {
|
||||
capabilities = append(capabilities, policies...)
|
||||
}
|
||||
}
|
||||
|
||||
if len(capabilities) > 0 {
|
||||
return capabilities, nil
|
||||
}
|
||||
return nil, fmt.Errorf("could not find any components or traits from namespace %s and %s", namespace, types.DefaultKubeVelaNS)
|
||||
return nil, fmt.Errorf("could not find any components, traits or workflowSteps from namespace %s and %s", namespace, types.DefaultKubeVelaNS)
|
||||
}
|
||||
|
||||
// GetComponentsFromCluster will get capability from K8s cluster
|
||||
@@ -184,6 +202,58 @@ func GetTraitsFromClusterWithValidateOption(ctx context.Context, namespace strin
|
||||
return templates, templateErrors, nil
|
||||
}
|
||||
|
||||
// GetWorkflowSteps will get WorkflowStepDefinition list
|
||||
func GetWorkflowSteps(ctx context.Context, namespace string, c common.Args) ([]types.Capability, []error, error) {
|
||||
newClient, err := c.GetClient()
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
var templates []types.Capability
|
||||
var workflowStepDefs v1beta1.WorkflowStepDefinitionList
|
||||
err = newClient.List(ctx, &workflowStepDefs, &client.ListOptions{Namespace: namespace})
|
||||
if err != nil {
|
||||
return nil, nil, fmt.Errorf("list WorkflowStepDefinition err: %w", err)
|
||||
}
|
||||
|
||||
var templateErrors []error
|
||||
for _, def := range workflowStepDefs.Items {
|
||||
tmp, err := GetCapabilityByWorkflowStepDefinitionObject(def, nil)
|
||||
if err != nil {
|
||||
templateErrors = append(templateErrors, err)
|
||||
continue
|
||||
}
|
||||
templates = append(templates, *tmp)
|
||||
}
|
||||
return templates, templateErrors, nil
|
||||
}
|
||||
|
||||
// GetPolicies will get Policy from K8s cluster
|
||||
func GetPolicies(ctx context.Context, namespace string, c common.Args) ([]types.Capability, []error, error) {
|
||||
newClient, err := c.GetClient()
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
var templates []types.Capability
|
||||
var defs v1beta1.PolicyDefinitionList
|
||||
err = newClient.List(ctx, &defs, &client.ListOptions{Namespace: namespace})
|
||||
if err != nil {
|
||||
return nil, nil, fmt.Errorf("list PolicyDefinition err: %w", err)
|
||||
}
|
||||
|
||||
var templateErrors []error
|
||||
for _, def := range defs.Items {
|
||||
tmp, err := GetCapabilityByPolicyDefinitionObject(def, nil)
|
||||
if err != nil {
|
||||
templateErrors = append(templateErrors, err)
|
||||
continue
|
||||
}
|
||||
templates = append(templates, *tmp)
|
||||
}
|
||||
return templates, templateErrors, nil
|
||||
}
|
||||
|
||||
// validateCapabilities validates whether helm charts are successful installed, GVK are successfully retrieved.
|
||||
func validateCapabilities(tmp *types.Capability, dm discoverymapper.DiscoveryMapper, definitionName string, reference commontypes.DefinitionReference) error {
|
||||
var err error
|
||||
@@ -411,11 +481,7 @@ func GetCapabilityByTraitDefinitionObject(traitDef v1beta1.TraitDefinition) (*ty
|
||||
|
||||
// GetCapabilityByWorkflowStepDefinitionObject gets capability by WorkflowStepDefinition object
|
||||
func GetCapabilityByWorkflowStepDefinitionObject(wfStepDef v1beta1.WorkflowStepDefinition, pd *packages.PackageDiscover) (*types.Capability, error) {
|
||||
var (
|
||||
capability types.Capability
|
||||
err error
|
||||
)
|
||||
capability, err = HandleDefinition(wfStepDef.Name, wfStepDef.Spec.Reference.Name, wfStepDef.Annotations, wfStepDef.Labels,
|
||||
capability, err := HandleDefinition(wfStepDef.Name, wfStepDef.Spec.Reference.Name, wfStepDef.Annotations, wfStepDef.Labels,
|
||||
nil, types.TypeWorkflowStep, nil, wfStepDef.Spec.Schematic, pd)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to handle WorkflowStepDefinition")
|
||||
@@ -423,3 +489,14 @@ func GetCapabilityByWorkflowStepDefinitionObject(wfStepDef v1beta1.WorkflowStepD
|
||||
capability.Namespace = wfStepDef.Namespace
|
||||
return &capability, nil
|
||||
}
|
||||
|
||||
// GetCapabilityByPolicyDefinitionObject gets capability by PolicyDefinition object
|
||||
func GetCapabilityByPolicyDefinitionObject(def v1beta1.PolicyDefinition, pd *packages.PackageDiscover) (*types.Capability, error) {
|
||||
capability, err := HandleDefinition(def.Name, def.Spec.Reference.Name, def.Annotations, def.Labels,
|
||||
nil, types.TypePolicy, nil, def.Spec.Schematic, pd)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to handle PolicyDefinition")
|
||||
}
|
||||
capability.Namespace = def.Namespace
|
||||
return &capability, nil
|
||||
}
|
||||
|
||||
@@ -430,7 +430,8 @@ variable "acl" {
|
||||
"configuration is in git remote": {
|
||||
args: args{
|
||||
cap: types.Capability{
|
||||
TerraformConfiguration: "https://github.com/zzxwill/terraform-alibaba-eip.git",
|
||||
Name: "ecs",
|
||||
TerraformConfiguration: "https://github.com/wonderflow/terraform-alicloud-ecs-instance.git",
|
||||
ConfigurationType: "remote",
|
||||
},
|
||||
},
|
||||
|
||||
@@ -55,14 +55,6 @@ const (
|
||||
KubeVelaIOTerraformPathZh = "../kubevela.io/i18n/zh/docusaurus-plugin-content-docs/current/end-user/components/cloud-services/terraform"
|
||||
// ReferenceSourcePath is the location for source reference
|
||||
ReferenceSourcePath = "hack/references"
|
||||
// ComponentDefinitionTypePath is the URL path for component typed capability
|
||||
ComponentDefinitionTypePath = "components"
|
||||
// WorkloadTypePath is the URL path for workload typed capability
|
||||
WorkloadTypePath = "workload-types"
|
||||
// TraitPath is the URL path for trait typed capability
|
||||
TraitPath = "traits"
|
||||
// WorkflowStepPath is the URL path for workflow step typed capability
|
||||
WorkflowStepPath = "workflowsteps"
|
||||
)
|
||||
|
||||
// Int64Type is int64 type
|
||||
@@ -656,18 +648,20 @@ func (ref *MarkdownReference) CreateMarkdown(ctx context.Context, caps []types.C
|
||||
sample string
|
||||
specification string
|
||||
)
|
||||
if c.Type != types.TypeWorkload && c.Type != types.TypeComponentDefinition && c.Type != types.TypeTrait {
|
||||
if c.Type != types.TypeWorkload && c.Type != types.TypeComponentDefinition && c.Type != types.TypeTrait &&
|
||||
c.Type != types.TypeWorkflowStep && c.Type != types.TypePolicy {
|
||||
return fmt.Errorf("the type of the capability is not right")
|
||||
}
|
||||
|
||||
fileName := fmt.Sprintf("%s.md", c.Name)
|
||||
if _, err := os.Stat(baseRefPath); err != nil && os.IsNotExist(err) {
|
||||
if err := os.MkdirAll(baseRefPath, 0750); err != nil {
|
||||
refPath := filepath.Join(baseRefPath, string(c.Type))
|
||||
if _, err := os.Stat(refPath); err != nil && os.IsNotExist(err) {
|
||||
if err := os.MkdirAll(refPath, 0750); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
markdownFile := filepath.Join(baseRefPath, fileName)
|
||||
fileName := fmt.Sprintf("%s.md", c.Name)
|
||||
markdownFile := filepath.Join(refPath, fileName)
|
||||
f, err := os.OpenFile(filepath.Clean(markdownFile), os.O_WRONLY|os.O_CREATE, 0600)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to open file %s: %w", markdownFile, err)
|
||||
@@ -869,11 +863,13 @@ func (ref *ParseReference) parseParameters(paraValue cue.Value, paramKey string,
|
||||
}
|
||||
if arguments.Len() == 0 {
|
||||
var param ReferenceParameter
|
||||
param.Name = "-"
|
||||
param.Name = "\\-"
|
||||
param.Required = true
|
||||
tl := paraValue.Template()
|
||||
if tl != nil { // is map type
|
||||
param.PrintableType = fmt.Sprintf("map[string]%s", tl("").IncompleteKind().String())
|
||||
} else {
|
||||
param.PrintableType = "{}"
|
||||
}
|
||||
params = append(params, param)
|
||||
}
|
||||
|
||||
@@ -198,14 +198,14 @@ template: {
|
||||
// +usage=Specifies a source the value of this var should come from
|
||||
valueFrom?: {
|
||||
// +usage=Selects a key of a secret in the pod's namespace
|
||||
secretKeyRef: {
|
||||
secretKeyRef?: {
|
||||
// +usage=The name of the secret in the pod's namespace to select from
|
||||
name: string
|
||||
// +usage=The key of the secret to select from. Must be a valid secret key
|
||||
key: string
|
||||
}
|
||||
// +usage=Selects a key of a config map in the pod's namespace
|
||||
configMapKeyRef: {
|
||||
configMapKeyRef?: {
|
||||
// +usage=The name of the config map in the pod's namespace to select from
|
||||
name: string
|
||||
// +usage=The key of the config map to select from. Must be a valid secret key
|
||||
|
||||
@@ -55,12 +55,12 @@
|
||||
}
|
||||
template: {
|
||||
#K8sObject: {
|
||||
apiVersion: string
|
||||
kind: string
|
||||
metadata: {
|
||||
name: string
|
||||
...
|
||||
}
|
||||
resource?: string
|
||||
group?: string
|
||||
name?: string
|
||||
namespace?: string
|
||||
cluster?: string
|
||||
labelSelector?: [string]: string
|
||||
...
|
||||
}
|
||||
|
||||
|
||||
@@ -180,14 +180,14 @@ template: {
|
||||
// +usage=Specifies a source the value of this var should come from
|
||||
valueFrom?: {
|
||||
// +usage=Selects a key of a secret in the pod's namespace
|
||||
secretKeyRef: {
|
||||
secretKeyRef?: {
|
||||
// +usage=The name of the secret in the pod's namespace to select from
|
||||
name: string
|
||||
// +usage=The key of the secret to select from. Must be a valid secret key
|
||||
key: string
|
||||
}
|
||||
// +usage=Selects a key of a config map in the pod's namespace
|
||||
configMapKeyRef: {
|
||||
configMapKeyRef?: {
|
||||
// +usage=The name of the config map in the pod's namespace to select from
|
||||
name: string
|
||||
// +usage=The key of the config map to select from. Must be a valid secret key
|
||||
|
||||
@@ -4,7 +4,7 @@
|
||||
labels: {
|
||||
"ui-hidden": "true"
|
||||
}
|
||||
description: "affinity specify affinity and tolerationon K8s pod for your workload which follows the pod spec in path 'spec.template'."
|
||||
description: "Affinity specifies affinity and toleration K8s pod for your workload which follows the pod spec in path 'spec.template'."
|
||||
attributes: {
|
||||
appliesToWorkloads: ["*"]
|
||||
podDisruptive: true
|
||||
|
||||
@@ -38,7 +38,7 @@ template: {
|
||||
volumeMounts: [{
|
||||
name: parameter.mountName
|
||||
mountPath: parameter.initMountPath
|
||||
}]
|
||||
}] + parameter.extraVolumeMounts
|
||||
}]
|
||||
// +patchKey=name
|
||||
volumes: [{
|
||||
@@ -92,5 +92,13 @@ template: {
|
||||
|
||||
// +usage=Specify the mount path of init container
|
||||
initMountPath: string
|
||||
|
||||
// +usage=Specify the extra volume mounts for the init container
|
||||
extraVolumeMounts: [...{
|
||||
// +usage=The name of the volume to be mounted
|
||||
name: string
|
||||
// +usage=The mountPath for mount in the init container
|
||||
mountPath: string
|
||||
}]
|
||||
}
|
||||
}
|
||||
|
||||
@@ -9,10 +9,115 @@
|
||||
}
|
||||
}
|
||||
template: {
|
||||
#Privileges: {
|
||||
// +usage=Specify the verbs to be allowed for the resource
|
||||
verbs: [...string]
|
||||
// +usage=Specify the apiGroups of the resource
|
||||
apiGroups?: [...string]
|
||||
// +usage=Specify the resources to be allowed
|
||||
resources?: [...string]
|
||||
// +usage=Specify the resourceNames to be allowed
|
||||
resourceNames?: [...string]
|
||||
// +usage=Specify the resource url to be allowed
|
||||
nonResourceURLs?: [...string]
|
||||
// +usage=Specify the scope of the privileges, default to be namespace scope
|
||||
scope: *"namespace" | "cluster"
|
||||
}
|
||||
parameter: {
|
||||
// +usage=Specify the name of ServiceAccount
|
||||
name: string
|
||||
// +usage=Specify whether to create new ServiceAccount or not
|
||||
create: *false | bool
|
||||
// +usage=Specify the privileges of the ServiceAccount, if not empty, RoleBindings(ClusterRoleBindings) will be created
|
||||
privileges?: [...#Privileges]
|
||||
}
|
||||
// +patchStrategy=retainKeys
|
||||
patch: spec: template: spec: serviceAccountName: parameter.name
|
||||
|
||||
_clusterPrivileges: [ for p in parameter.privileges if p.scope == "cluster" {p}]
|
||||
_namespacePrivileges: [ for p in parameter.privileges if p.scope == "namespace" {p}]
|
||||
outputs: {
|
||||
if parameter.create {
|
||||
"service-account": {
|
||||
apiVersion: "v1"
|
||||
kind: "ServiceAccount"
|
||||
metadata: name: parameter.name
|
||||
}
|
||||
}
|
||||
if parameter.privileges != _|_ {
|
||||
if len(_clusterPrivileges) > 0 {
|
||||
"cluster-role": {
|
||||
apiVersion: "rbac.authorization.k8s.io/v1"
|
||||
kind: "ClusterRole"
|
||||
metadata: name: "\(context.namespace):\(parameter.name)"
|
||||
rules: [ for p in _clusterPrivileges {
|
||||
verbs: p.verbs
|
||||
if p.apiGroups != _|_ {
|
||||
apiGroups: p.apiGroups
|
||||
}
|
||||
if p.resources != _|_ {
|
||||
resources: p.resources
|
||||
}
|
||||
if p.resourceNames != _|_ {
|
||||
resourceNames: p.resourceNames
|
||||
}
|
||||
if p.nonResourceURLs != _|_ {
|
||||
nonResourceURLs: p.nonResourceURLs
|
||||
}
|
||||
}]
|
||||
}
|
||||
"cluster-role-binding": {
|
||||
apiVersion: "rbac.authorization.k8s.io/v1"
|
||||
kind: "ClusterRoleBinding"
|
||||
metadata: name: "\(context.namespace):\(parameter.name)"
|
||||
roleRef: {
|
||||
apiGroup: "rbac.authorization.k8s.io"
|
||||
kind: "ClusterRole"
|
||||
name: "\(context.namespace):\(parameter.name)"
|
||||
}
|
||||
subjects: [{
|
||||
kind: "ServiceAccount"
|
||||
name: parameter.name
|
||||
namespace: "\(context.namespace)"
|
||||
}]
|
||||
}
|
||||
}
|
||||
if len(_namespacePrivileges) > 0 {
|
||||
"role": {
|
||||
apiVersion: "rbac.authorization.k8s.io/v1"
|
||||
kind: "Role"
|
||||
metadata: name: parameter.name
|
||||
rules: [ for p in _namespacePrivileges {
|
||||
verbs: p.verbs
|
||||
if p.apiGroups != _|_ {
|
||||
apiGroups: p.apiGroups
|
||||
}
|
||||
if p.resources != _|_ {
|
||||
resources: p.resources
|
||||
}
|
||||
if p.resourceNames != _|_ {
|
||||
resourceNames: p.resourceNames
|
||||
}
|
||||
if p.nonResourceURLs != _|_ {
|
||||
nonResourceURLs: p.nonResourceURLs
|
||||
}
|
||||
}]
|
||||
}
|
||||
"role-binding": {
|
||||
apiVersion: "rbac.authorization.k8s.io/v1"
|
||||
kind: "RoleBinding"
|
||||
metadata: name: parameter.name
|
||||
roleRef: {
|
||||
apiGroup: "rbac.authorization.k8s.io"
|
||||
kind: "Role"
|
||||
name: parameter.name
|
||||
}
|
||||
subjects: [{
|
||||
kind: "ServiceAccount"
|
||||
name: parameter.name
|
||||
}]
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -77,6 +77,11 @@ template: {
|
||||
// +usage=The key of the config map to select from. Must be a valid secret key
|
||||
key: string
|
||||
}
|
||||
// +usage=Specify the field reference for env
|
||||
fieldRef?: {
|
||||
// +usage=Specify the field path for env
|
||||
fieldPath: string
|
||||
}
|
||||
}
|
||||
}]
|
||||
|
||||
|
||||
Reference in New Issue
Block a user