mirror of
https://github.com/kubevela/kubevela.git
synced 2026-02-28 00:33:56 +00:00
Compare commits
24 Commits
v1.7.2
...
v1.4.0-bet
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
cd171d27db | ||
|
|
6d8be8b061 | ||
|
|
e93912acff | ||
|
|
e48e39987f | ||
|
|
6264a66021 | ||
|
|
9191127e01 | ||
|
|
1b047c10ba | ||
|
|
02a1d390c4 | ||
|
|
62866e19d8 | ||
|
|
3dc645ed52 | ||
|
|
e20ef02a6a | ||
|
|
371affb389 | ||
|
|
b35145be82 | ||
|
|
d92c8844ba | ||
|
|
82aaf5098b | ||
|
|
7399666275 | ||
|
|
0b394e766b | ||
|
|
eb386ce9f7 | ||
|
|
e4fa5a5cf1 | ||
|
|
165e011bd0 | ||
|
|
9489b8d511 | ||
|
|
d95942c992 | ||
|
|
c6aa8ddbbc | ||
|
|
c370ef04f3 |
24
.github/workflows/registry.yml
vendored
24
.github/workflows/registry.yml
vendored
@@ -99,9 +99,31 @@ jobs:
|
||||
docker.io/oamdev/vela-apiserver:${{ steps.get_version.outputs.VERSION }}
|
||||
ghcr.io/${{ github.repository }}/vela-apiserver:${{ steps.get_version.outputs.VERSION }}
|
||||
|
||||
- name: Build & Pushing vela CLI for ACR
|
||||
run: |
|
||||
docker build --build-arg GOPROXY=https://proxy.golang.org --build-arg VERSION=${{ steps.get_version.outputs.VERSION }} --build-arg GITVERSION=git-${{ steps.vars.outputs.git_revision }} -t kubevela-registry.cn-hangzhou.cr.aliyuncs.com/oamdev/vela-cli:${{ steps.get_version.outputs.VERSION }} -f Dockerfile.cli .
|
||||
docker push kubevela-registry.cn-hangzhou.cr.aliyuncs.com/oamdev/vela-cli:${{ steps.get_version.outputs.VERSION }}
|
||||
- uses: docker/build-push-action@v2
|
||||
name: Build & Pushing CLI for Dockerhub and GHCR
|
||||
with:
|
||||
context: .
|
||||
file: Dockerfile.cli
|
||||
labels: |-
|
||||
org.opencontainers.image.source=https://github.com/${{ github.repository }}
|
||||
org.opencontainers.image.revision=${{ github.sha }}
|
||||
platforms: linux/amd64,linux/arm64
|
||||
push: ${{ github.event_name != 'pull_request' }}
|
||||
build-args: |
|
||||
GITVERSION=git-${{ steps.vars.outputs.git_revision }}
|
||||
VERSION=${{ steps.get_version.outputs.VERSION }}
|
||||
GOPROXY=https://proxy.golang.org
|
||||
tags: |-
|
||||
docker.io/oamdev/vela-cli:${{ steps.get_version.outputs.VERSION }}
|
||||
ghcr.io/${{ github.repository }}/vela-cli:${{ steps.get_version.outputs.VERSION }}
|
||||
|
||||
- name: Build & Pushing vela runtime rollout for ACR
|
||||
run: |
|
||||
docker build --build-arg GOPROXY=https://proxy.golang.org --build-arg VERSION=${{ steps.get_version.outputs.VERSION }} --build-arg GITVERSION=git-${{ steps.vars.outputs.git_revision }} -t kubevela-registry.cn-hangzhou.cr.aliyuncs.com/oamdev/vela-rollout:${{ steps.get_version.outputs.VERSION }} .
|
||||
docker build --build-arg GOPROXY=https://proxy.golang.org --build-arg VERSION=${{ steps.get_version.outputs.VERSION }} --build-arg GITVERSION=git-${{ steps.vars.outputs.git_revision }} -t kubevela-registry.cn-hangzhou.cr.aliyuncs.com/oamdev/vela-rollout:${{ steps.get_version.outputs.VERSION }} -f runtime/rollout/Dockerfile .
|
||||
docker push kubevela-registry.cn-hangzhou.cr.aliyuncs.com/oamdev/vela-rollout:${{ steps.get_version.outputs.VERSION }}
|
||||
- uses: docker/build-push-action@v2
|
||||
name: Build & Pushing runtime rollout for Dockerhub and GHCR
|
||||
|
||||
1
.github/workflows/release.yml
vendored
1
.github/workflows/release.yml
vendored
@@ -121,6 +121,7 @@ jobs:
|
||||
run: ./ossutil --config-file .ossutilconfig sync ./_bin/vela oss://$BUCKET/binary/vela/${{ env.VELA_VERSION }}
|
||||
|
||||
- name: sync the latest version file
|
||||
if: ${{ !contains(env.VELA_VERSION,'alpha') && !contains(env.VELA_VERSION,'beta') }}
|
||||
run: |
|
||||
echo ${{ env.VELA_VERSION }} > ./latest_version
|
||||
./ossutil --config-file .ossutilconfig cp -u ./latest_version oss://$BUCKET/binary/vela/latest_version
|
||||
|
||||
@@ -36,7 +36,7 @@ RUN GO111MODULE=on CGO_ENABLED=0 GOOS=linux GOARCH=${TARGETARCH} \
|
||||
# Refer to https://github.com/GoogleContainerTools/distroless for more details
|
||||
# Overwrite `BASE_IMAGE` by passing `--build-arg=BASE_IMAGE=gcr.io/distroless/static:nonroot`
|
||||
FROM ${BASE_IMAGE:-alpine:3.15}
|
||||
# This is required by daemon connnecting with cri
|
||||
# This is required by daemon connecting with cri
|
||||
RUN apk add --no-cache ca-certificates bash expat
|
||||
|
||||
WORKDIR /
|
||||
|
||||
@@ -34,7 +34,7 @@ RUN GO111MODULE=on CGO_ENABLED=0 GOOS=linux GOARCH=${TARGETARCH} \
|
||||
# Overwrite `BASE_IMAGE` by passing `--build-arg=BASE_IMAGE=gcr.io/distroless/static:nonroot`
|
||||
|
||||
FROM ${BASE_IMAGE:-alpine:3.15}
|
||||
# This is required by daemon connnecting with cri
|
||||
# This is required by daemon connecting with cri
|
||||
RUN apk add --no-cache ca-certificates bash expat
|
||||
|
||||
WORKDIR /
|
||||
|
||||
43
Dockerfile.cli
Normal file
43
Dockerfile.cli
Normal file
@@ -0,0 +1,43 @@
|
||||
ARG BASE_IMAGE
|
||||
# Build the cli binary
|
||||
FROM --platform=${BUILDPLATFORM:-linux/amd64} golang:1.17-alpine as builder
|
||||
ARG GOPROXY
|
||||
ENV GOPROXY=${GOPROXY:-https://goproxy.cn}
|
||||
WORKDIR /workspace
|
||||
# Copy the Go Modules manifests
|
||||
COPY go.mod go.mod
|
||||
COPY go.sum go.sum
|
||||
# cache deps before building and copying source so that we don't need to re-download as much
|
||||
# and so that source changes don't invalidate our downloaded layer
|
||||
RUN go mod download
|
||||
|
||||
# Copy the go source
|
||||
COPY apis/ apis/
|
||||
COPY pkg/ pkg/
|
||||
COPY version/ version/
|
||||
COPY references/ references/
|
||||
|
||||
# Build
|
||||
ARG TARGETARCH
|
||||
ARG VERSION
|
||||
ARG GITVERSION
|
||||
|
||||
RUN GO111MODULE=on CGO_ENABLED=0 GOOS=linux GOARCH=${TARGETARCH:-amd64} \
|
||||
go build -a -ldflags "-s -w -X github.com/oam-dev/kubevela/version.VelaVersion=${VERSION:-undefined} -X github.com/oam-dev/kubevela/version.GitRevision=${GITVERSION:-undefined}" \
|
||||
-o vela-${TARGETARCH} ./references/cmd/cli/main.go
|
||||
|
||||
|
||||
# Use alpine as base image due to the discussion in issue #1448
|
||||
# You can replace distroless as minimal base image to package the manager binary
|
||||
# Refer to https://github.com/GoogleContainerTools/distroless for more details
|
||||
# Overwrite `BASE_IMAGE` by passing `--build-arg=BASE_IMAGE=gcr.io/distroless/static:nonroot`
|
||||
|
||||
FROM ${BASE_IMAGE:-alpine:3.15}
|
||||
# This is required by daemon connecting with cri
|
||||
RUN apk add --no-cache ca-certificates bash expat
|
||||
|
||||
WORKDIR /
|
||||
|
||||
ARG TARGETARCH
|
||||
COPY --from=builder /workspace/vela-${TARGETARCH} /vela
|
||||
ENTRYPOINT ["/vela"]
|
||||
@@ -39,7 +39,7 @@ RUN GO111MODULE=on CGO_ENABLED=0 GOOS=linux GOARCH=${TARGETARCH} \
|
||||
# Overwrite `BASE_IMAGE` by passing `--build-arg=BASE_IMAGE=gcr.io/distroless/static:nonroot`
|
||||
|
||||
FROM ${BASE_IMAGE:-alpine:3.15}
|
||||
# This is required by daemon connnecting with cri
|
||||
# This is required by daemon connecting with cri
|
||||
RUN apk add --no-cache ca-certificates bash expat
|
||||
|
||||
WORKDIR /
|
||||
|
||||
@@ -347,6 +347,8 @@ type WorkflowStep struct {
|
||||
|
||||
SubSteps []WorkflowSubStep `json:"subSteps,omitempty"`
|
||||
|
||||
If string `json:"if,omitempty"`
|
||||
|
||||
DependsOn []string `json:"dependsOn,omitempty"`
|
||||
|
||||
Inputs StepInputs `json:"inputs,omitempty"`
|
||||
@@ -364,6 +366,8 @@ type WorkflowSubStep struct {
|
||||
// +kubebuilder:pruning:PreserveUnknownFields
|
||||
Properties *runtime.RawExtension `json:"properties,omitempty"`
|
||||
|
||||
If string `json:"if,omitempty"`
|
||||
|
||||
DependsOn []string `json:"dependsOn,omitempty"`
|
||||
|
||||
Inputs StepInputs `json:"inputs,omitempty"`
|
||||
@@ -397,6 +401,8 @@ const (
|
||||
WorkflowStepPhaseSucceeded WorkflowStepPhase = "succeeded"
|
||||
// WorkflowStepPhaseFailed will report error in `message`.
|
||||
WorkflowStepPhaseFailed WorkflowStepPhase = "failed"
|
||||
// WorkflowStepPhaseSkipped will make the controller skip the step.
|
||||
WorkflowStepPhaseSkipped WorkflowStepPhase = "skipped"
|
||||
// WorkflowStepPhaseStopped will make the controller stop the workflow.
|
||||
WorkflowStepPhaseStopped WorkflowStepPhase = "stopped"
|
||||
// WorkflowStepPhaseRunning will make the controller continue the workflow.
|
||||
|
||||
@@ -117,6 +117,9 @@ type PlacementDecision struct {
|
||||
|
||||
// String encode placement decision
|
||||
func (in PlacementDecision) String() string {
|
||||
if in.Namespace == "" {
|
||||
return in.Cluster
|
||||
}
|
||||
return in.Cluster + "/" + in.Namespace
|
||||
}
|
||||
|
||||
|
||||
@@ -169,8 +169,3 @@ const (
|
||||
// VelaCoreConfig is to mark application, config and its secret or Terraform provider lelong to a KubeVela config
|
||||
VelaCoreConfig = "velacore-config"
|
||||
)
|
||||
|
||||
const (
|
||||
// ClusterGatewayAccessorGroup the group to impersonate which allows the access to the cluster-gateway
|
||||
ClusterGatewayAccessorGroup = "cluster-gateway-accessor"
|
||||
)
|
||||
|
||||
@@ -53,11 +53,12 @@ helm install --create-namespace -n vela-system kubevela kubevela/vela-core --wai
|
||||
|
||||
### KubeVela workflow parameters
|
||||
|
||||
| Name | Description | Value |
|
||||
| -------------------------------------- | ------------------------------------------------------ | ----- |
|
||||
| `workflow.backoff.maxTime.waitState` | The max backoff time of workflow in a wait condition | `60` |
|
||||
| `workflow.backoff.maxTime.failedState` | The max backoff time of workflow in a failed condition | `300` |
|
||||
| `workflow.step.errorRetryTimes` | The max retry times of a failed workflow step | `10` |
|
||||
| Name | Description | Value |
|
||||
| -------------------------------------- | ------------------------------------------------------ | ------- |
|
||||
| `workflow.enableSuspendOnFailure` | Enable suspend on workflow failure | `false` |
|
||||
| `workflow.backoff.maxTime.waitState` | The max backoff time of workflow in a wait condition | `60` |
|
||||
| `workflow.backoff.maxTime.failedState` | The max backoff time of workflow in a failed condition | `300` |
|
||||
| `workflow.step.errorRetryTimes` | The max retry times of a failed workflow step | `10` |
|
||||
|
||||
|
||||
### KubeVela controller parameters
|
||||
@@ -103,7 +104,7 @@ helm install --create-namespace -n vela-system kubevela kubevela/vela-core --wai
|
||||
| `multicluster.clusterGateway.replicaCount` | ClusterGateway replica count | `1` |
|
||||
| `multicluster.clusterGateway.port` | ClusterGateway port | `9443` |
|
||||
| `multicluster.clusterGateway.image.repository` | ClusterGateway image repository | `oamdev/cluster-gateway` |
|
||||
| `multicluster.clusterGateway.image.tag` | ClusterGateway image tag | `v1.3.2` |
|
||||
| `multicluster.clusterGateway.image.tag` | ClusterGateway image tag | `v1.4.0` |
|
||||
| `multicluster.clusterGateway.image.pullPolicy` | ClusterGateway image pull policy | `IfNotPresent` |
|
||||
| `multicluster.clusterGateway.resources.limits.cpu` | ClusterGateway cpu limit | `100m` |
|
||||
| `multicluster.clusterGateway.resources.limits.memory` | ClusterGateway memory limit | `200Mi` |
|
||||
|
||||
@@ -2209,6 +2209,8 @@ spec:
|
||||
items:
|
||||
type: string
|
||||
type: array
|
||||
if:
|
||||
type: string
|
||||
inputs:
|
||||
description: StepInputs defines variable input of
|
||||
WorkflowStep
|
||||
@@ -2253,6 +2255,8 @@ spec:
|
||||
items:
|
||||
type: string
|
||||
type: array
|
||||
if:
|
||||
type: string
|
||||
inputs:
|
||||
description: StepInputs defines variable input
|
||||
of WorkflowStep
|
||||
@@ -3954,6 +3958,8 @@ spec:
|
||||
items:
|
||||
type: string
|
||||
type: array
|
||||
if:
|
||||
type: string
|
||||
inputs:
|
||||
description: StepInputs defines variable input of WorkflowStep
|
||||
items:
|
||||
@@ -3995,6 +4001,8 @@ spec:
|
||||
items:
|
||||
type: string
|
||||
type: array
|
||||
if:
|
||||
type: string
|
||||
inputs:
|
||||
description: StepInputs defines variable input of
|
||||
WorkflowStep
|
||||
|
||||
@@ -1020,6 +1020,8 @@ spec:
|
||||
items:
|
||||
type: string
|
||||
type: array
|
||||
if:
|
||||
type: string
|
||||
inputs:
|
||||
description: StepInputs defines variable input of WorkflowStep
|
||||
items:
|
||||
@@ -1061,6 +1063,8 @@ spec:
|
||||
items:
|
||||
type: string
|
||||
type: array
|
||||
if:
|
||||
type: string
|
||||
inputs:
|
||||
description: StepInputs defines variable input of
|
||||
WorkflowStep
|
||||
|
||||
@@ -42,6 +42,8 @@ spec:
|
||||
items:
|
||||
type: string
|
||||
type: array
|
||||
if:
|
||||
type: string
|
||||
inputs:
|
||||
description: StepInputs defines variable input of WorkflowStep
|
||||
items:
|
||||
@@ -83,6 +85,8 @@ spec:
|
||||
items:
|
||||
type: string
|
||||
type: array
|
||||
if:
|
||||
type: string
|
||||
inputs:
|
||||
description: StepInputs defines variable input of WorkflowStep
|
||||
items:
|
||||
@@ -147,6 +151,8 @@ spec:
|
||||
items:
|
||||
type: string
|
||||
type: array
|
||||
if:
|
||||
type: string
|
||||
inputs:
|
||||
description: StepInputs defines variable input of WorkflowStep
|
||||
items:
|
||||
@@ -188,6 +194,8 @@ spec:
|
||||
items:
|
||||
type: string
|
||||
type: array
|
||||
if:
|
||||
type: string
|
||||
inputs:
|
||||
description: StepInputs defines variable input of WorkflowStep
|
||||
items:
|
||||
|
||||
@@ -31,7 +31,7 @@ spec:
|
||||
- "apiserver"
|
||||
- "--secure-port={{ .Values.multicluster.clusterGateway.port }}"
|
||||
- "--secret-namespace={{ .Release.Namespace }}"
|
||||
- "--feature-gates=APIPriorityAndFairness=false"
|
||||
- "--feature-gates=APIPriorityAndFairness=false,ClientIdentityPenetration={{ .Values.authentication.enabled }}"
|
||||
{{- if .Values.multicluster.clusterGateway.secureTLS.enabled }}
|
||||
- "--tls-cert-file={{ .Values.multicluster.clusterGateway.secureTLS.certPath }}/tls.crt"
|
||||
- "--tls-private-key-file={{ .Values.multicluster.clusterGateway.secureTLS.certPath }}/tls.key"
|
||||
@@ -129,7 +129,7 @@ spec:
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRole
|
||||
metadata:
|
||||
name: {{ include "kubevela.fullname" . }}:cluster-gateway-access-role
|
||||
name: {{ include "kubevela.fullname" . }}:cluster-gateway:proxy
|
||||
rules:
|
||||
- apiGroups: [ "cluster.core.oam.dev" ]
|
||||
resources: [ "clustergateways/proxy" ]
|
||||
@@ -138,15 +138,12 @@ rules:
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRoleBinding
|
||||
metadata:
|
||||
name: {{ include "kubevela.fullname" . }}:cluster-gateway-access-rolebinding
|
||||
name: {{ include "kubevela.fullname" . }}:cluster-gateway:proxy
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: ClusterRole
|
||||
name: {{ include "kubevela.fullname" . }}:cluster-gateway-access-role
|
||||
name: {{ include "kubevela.fullname" . }}:cluster-gateway:proxy
|
||||
subjects:
|
||||
- kind: Group
|
||||
name: cluster-gateway-accessor
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
- kind: Group
|
||||
name: kubevela:client
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
|
||||
186
charts/vela-core/templates/defwithtemplate/affinity.yaml
Normal file
186
charts/vela-core/templates/defwithtemplate/affinity.yaml
Normal file
@@ -0,0 +1,186 @@
|
||||
# Code generated by KubeVela templates. DO NOT EDIT. Please edit the original cue file.
|
||||
# Definition source cue file: vela-templates/definitions/internal/affinity.cue
|
||||
apiVersion: core.oam.dev/v1beta1
|
||||
kind: TraitDefinition
|
||||
metadata:
|
||||
annotations:
|
||||
definition.oam.dev/description: affinity specify affinity and tolerationon K8s pod for your workload which follows the pod spec in path 'spec.template'.
|
||||
labels:
|
||||
custom.definition.oam.dev/ui-hidden: "true"
|
||||
name: affinity
|
||||
namespace: {{ include "systemDefinitionNamespace" . }}
|
||||
spec:
|
||||
appliesToWorkloads:
|
||||
- '*'
|
||||
podDisruptive: true
|
||||
schematic:
|
||||
cue:
|
||||
template: |
|
||||
patch: spec: template: spec: {
|
||||
if parameter.podAffinity != _|_ {
|
||||
affinity: podAffinity: {
|
||||
if parameter.podAffinity.required != _|_ {
|
||||
requiredDuringSchedulingIgnoredDuringExecution: [
|
||||
for k in parameter.podAffinity.required {
|
||||
if k.labelSelector != _|_ {
|
||||
labelSelector: k.labelSelector
|
||||
}
|
||||
if k.namespace != _|_ {
|
||||
namespace: k.namespace
|
||||
}
|
||||
topologyKey: k.topologyKey
|
||||
if k.namespaceSelector != _|_ {
|
||||
namespaceSelector: k.namespaceSelector
|
||||
}
|
||||
}]
|
||||
}
|
||||
if parameter.podAffinity.preferred != _|_ {
|
||||
preferredDuringSchedulingIgnoredDuringExecution: [
|
||||
for k in parameter.podAffinity.preferred {
|
||||
weight: k.weight
|
||||
podAffinityTerm: k.podAffinityTerm
|
||||
}]
|
||||
}
|
||||
}
|
||||
}
|
||||
if parameter.podAntiAffinity != _|_ {
|
||||
affinity: podAntiAffinity: {
|
||||
if parameter.podAntiAffinity.required != _|_ {
|
||||
requiredDuringSchedulingIgnoredDuringExecution: [
|
||||
for k in parameter.podAntiAffinity.required {
|
||||
if k.labelSelector != _|_ {
|
||||
labelSelector: k.labelSelector
|
||||
}
|
||||
if k.namespace != _|_ {
|
||||
namespace: k.namespace
|
||||
}
|
||||
topologyKey: k.topologyKey
|
||||
if k.namespaceSelector != _|_ {
|
||||
namespaceSelector: k.namespaceSelector
|
||||
}
|
||||
}]
|
||||
}
|
||||
if parameter.podAntiAffinity.preferred != _|_ {
|
||||
preferredDuringSchedulingIgnoredDuringExecution: [
|
||||
for k in parameter.podAntiAffinity.preferred {
|
||||
weight: k.weight
|
||||
podAffinityTerm: k.podAffinityTerm
|
||||
}]
|
||||
}
|
||||
}
|
||||
}
|
||||
if parameter.nodeAffinity != _|_ {
|
||||
affinity: nodeAffinity: {
|
||||
if parameter.nodeAffinity.required != _|_ {
|
||||
requiredDuringSchedulingIgnoredDuringExecution: nodeSelectorTerms: [
|
||||
for k in parameter.nodeAffinity.required.nodeSelectorTerms {
|
||||
if k.matchExpressions != _|_ {
|
||||
matchExpressions: k.matchExpressions
|
||||
}
|
||||
if k.matchFields != _|_ {
|
||||
matchFields: k.matchFields
|
||||
}
|
||||
}]
|
||||
}
|
||||
if parameter.nodeAffinity.preferred != _|_ {
|
||||
preferredDuringSchedulingIgnoredDuringExecution: [
|
||||
for k in parameter.nodeAffinity.preferred {
|
||||
weight: k.weight
|
||||
preference: k.preference
|
||||
}]
|
||||
}
|
||||
}
|
||||
}
|
||||
if parameter.tolerations != _|_ {
|
||||
tolerations: [
|
||||
for k in parameter.tolerations {
|
||||
if k.key != _|_ {
|
||||
key: k.key
|
||||
}
|
||||
if k.effect != _|_ {
|
||||
effect: k.effect
|
||||
}
|
||||
if k.value != _|_ {
|
||||
value: k.value
|
||||
}
|
||||
operator: k.operator
|
||||
if k.tolerationSeconds != _|_ {
|
||||
tolerationSeconds: k.tolerationSeconds
|
||||
}
|
||||
}]
|
||||
}
|
||||
}
|
||||
#labelSelector: {
|
||||
matchLabels?: [string]: string
|
||||
matchExpressions?: [...{
|
||||
key: string
|
||||
operator: *"In" | "NotIn" | "Exists" | "DoesNotExist"
|
||||
values?: [...string]
|
||||
}]
|
||||
}
|
||||
#podAffinityTerm: {
|
||||
labelSelector?: #labelSelector
|
||||
namespaces?: [...string]
|
||||
topologyKey: string
|
||||
namespaceSelector?: #labelSelector
|
||||
}
|
||||
#nodeSelecor: {
|
||||
key: string
|
||||
operator: *"In" | "NotIn" | "Exists" | "DoesNotExist" | "Gt" | "Lt"
|
||||
values?: [...string]
|
||||
}
|
||||
#nodeSelectorTerm: {
|
||||
matchExpressions?: [...#nodeSelecor]
|
||||
matchFields?: [...#nodeSelecor]
|
||||
}
|
||||
parameter: {
|
||||
// +usage=Specify the pod affinity scheduling rules
|
||||
podAffinity?: {
|
||||
// +usage=Specify the required during scheduling ignored during execution
|
||||
required?: [...#podAffinityTerm]
|
||||
// +usage=Specify the preferred during scheduling ignored during execution
|
||||
preferred?: [...{
|
||||
// +usage=Specify weight associated with matching the corresponding podAffinityTerm
|
||||
weight: int & >=1 & <=100
|
||||
// +usage=Specify a set of pods
|
||||
podAffinityTerm: #podAffinityTerm
|
||||
}]
|
||||
}
|
||||
// +usage=Specify the pod anti-affinity scheduling rules
|
||||
podAntiAffinity?: {
|
||||
// +usage=Specify the required during scheduling ignored during execution
|
||||
required?: [...#podAffinityTerm]
|
||||
// +usage=Specify the preferred during scheduling ignored during execution
|
||||
preferred?: [...{
|
||||
// +usage=Specify weight associated with matching the corresponding podAffinityTerm
|
||||
weight: int & >=1 & <=100
|
||||
// +usage=Specify a set of pods
|
||||
podAffinityTerm: #podAffinityTerm
|
||||
}]
|
||||
}
|
||||
// +usage=Specify the node affinity scheduling rules for the pod
|
||||
nodeAffinity?: {
|
||||
// +usage=Specify the required during scheduling ignored during execution
|
||||
required?: {
|
||||
// +usage=Specify a list of node selector
|
||||
nodeSelectorTerms: [...#nodeSelectorTerm]
|
||||
}
|
||||
// +usage=Specify the preferred during scheduling ignored during execution
|
||||
preferred?: [...{
|
||||
// +usage=Specify weight associated with matching the corresponding nodeSelector
|
||||
weight: int & >=1 & <=100
|
||||
// +usage=Specify a node selector
|
||||
preference: #nodeSelectorTerm
|
||||
}]
|
||||
}
|
||||
// +usage=Specify tolerant taint
|
||||
tolerations?: [...{
|
||||
key?: string
|
||||
operator: *"Equal" | "Exists"
|
||||
value?: string
|
||||
effect?: "NoSchedule" | "PreferNoSchedule" | "NoExecute"
|
||||
// +usage=Specify the period of time the toleration
|
||||
tolerationSeconds?: int
|
||||
}]
|
||||
}
|
||||
|
||||
@@ -62,7 +62,8 @@ spec:
|
||||
}
|
||||
}
|
||||
}] + [ for k, v in _params.env if _delKeys[k] == _|_ && (_params.replace || _baseEnvMap[k] == _|_) {
|
||||
v
|
||||
name: k
|
||||
value: v
|
||||
}]
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,11 +1,12 @@
|
||||
# Code generated by KubeVela templates. DO NOT EDIT. Please edit the original cue file.
|
||||
# Definition source cue file: vela-templates/definitions/internal/node-affinity.cue
|
||||
# Definition source cue file: vela-templates/definitions/deprecated/node-affinity.cue
|
||||
apiVersion: core.oam.dev/v1beta1
|
||||
kind: TraitDefinition
|
||||
metadata:
|
||||
annotations:
|
||||
definition.oam.dev/description: affinity specify node affinity and toleration on K8s pod for your workload which follows the pod spec in path 'spec.template'.
|
||||
labels:
|
||||
custom.definition.oam.dev/deprecated: "true"
|
||||
custom.definition.oam.dev/ui-hidden: "true"
|
||||
name: node-affinity
|
||||
namespace: {{ include "systemDefinitionNamespace" . }}
|
||||
|
||||
@@ -25,9 +25,6 @@ subjects:
|
||||
- kind: ServiceAccount
|
||||
name: {{ include "kubevela.serviceAccountName" . }}
|
||||
namespace: {{ .Release.Namespace }}
|
||||
- kind: Group
|
||||
name: core.oam.dev
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
|
||||
---
|
||||
# permissions to do leader election.
|
||||
@@ -175,6 +172,7 @@ spec:
|
||||
- "--max-workflow-wait-backoff-time={{ .Values.workflow.backoff.maxTime.waitState }}"
|
||||
- "--max-workflow-failed-backoff-time={{ .Values.workflow.backoff.maxTime.failedState }}"
|
||||
- "--max-workflow-step-error-retry-times={{ .Values.workflow.step.errorRetryTimes }}"
|
||||
- "--feature-gates=EnableSuspendOnFailure={{- .Values.workflow.enableSuspendOnFailure | toString -}}"
|
||||
- "--feature-gates=AuthenticateApplication={{- .Values.authentication.enabled | toString -}}"
|
||||
{{ if .Values.authentication.enabled }}
|
||||
{{ if .Values.authentication.withUser }}
|
||||
|
||||
@@ -35,10 +35,12 @@ dependCheckWait: 30s
|
||||
|
||||
## @section KubeVela workflow parameters
|
||||
|
||||
## @param workflow.enableSuspendOnFailure Enable suspend on workflow failure
|
||||
## @param workflow.backoff.maxTime.waitState The max backoff time of workflow in a wait condition
|
||||
## @param workflow.backoff.maxTime.failedState The max backoff time of workflow in a failed condition
|
||||
## @param workflow.step.errorRetryTimes The max retry times of a failed workflow step
|
||||
workflow:
|
||||
enableSuspendOnFailure: false
|
||||
backoff:
|
||||
maxTime:
|
||||
waitState: 60
|
||||
@@ -130,7 +132,7 @@ multicluster:
|
||||
port: 9443
|
||||
image:
|
||||
repository: oamdev/cluster-gateway
|
||||
tag: v1.3.2
|
||||
tag: v1.4.0
|
||||
pullPolicy: IfNotPresent
|
||||
resources:
|
||||
limits:
|
||||
|
||||
@@ -72,11 +72,12 @@ helm install --create-namespace -n vela-system kubevela kubevela/vela-minimal --
|
||||
|
||||
### KubeVela workflow parameters
|
||||
|
||||
| Name | Description | Value |
|
||||
| -------------------------------------- | ------------------------------------------------------ | ----- |
|
||||
| `workflow.backoff.maxTime.waitState` | The max backoff time of workflow in a wait condition | `60` |
|
||||
| `workflow.backoff.maxTime.failedState` | The max backoff time of workflow in a failed condition | `300` |
|
||||
| `workflow.step.errorRetryTimes` | The max retry times of a failed workflow step | `10` |
|
||||
| Name | Description | Value |
|
||||
| -------------------------------------- | ------------------------------------------------------ | ------- |
|
||||
| `workflow.enableSuspendOnFailure` | Enable suspend on workflow failure | `false` |
|
||||
| `workflow.backoff.maxTime.waitState` | The max backoff time of workflow in a wait condition | `60` |
|
||||
| `workflow.backoff.maxTime.failedState` | The max backoff time of workflow in a failed condition | `300` |
|
||||
| `workflow.step.errorRetryTimes` | The max retry times of a failed workflow step | `10` |
|
||||
|
||||
|
||||
### KubeVela controller parameters
|
||||
@@ -105,7 +106,7 @@ helm install --create-namespace -n vela-system kubevela kubevela/vela-minimal --
|
||||
| `multicluster.clusterGateway.replicaCount` | ClusterGateway replica count | `1` |
|
||||
| `multicluster.clusterGateway.port` | ClusterGateway port | `9443` |
|
||||
| `multicluster.clusterGateway.image.repository` | ClusterGateway image repository | `oamdev/cluster-gateway` |
|
||||
| `multicluster.clusterGateway.image.tag` | ClusterGateway image tag | `v1.3.2` |
|
||||
| `multicluster.clusterGateway.image.tag` | ClusterGateway image tag | `v1.4.0` |
|
||||
| `multicluster.clusterGateway.image.pullPolicy` | ClusterGateway image pull policy | `IfNotPresent` |
|
||||
| `multicluster.clusterGateway.resources.limits.cpu` | ClusterGateway cpu limit | `100m` |
|
||||
| `multicluster.clusterGateway.resources.limits.memory` | ClusterGateway memory limit | `200Mi` |
|
||||
|
||||
@@ -2209,6 +2209,8 @@ spec:
|
||||
items:
|
||||
type: string
|
||||
type: array
|
||||
if:
|
||||
type: string
|
||||
inputs:
|
||||
description: StepInputs defines variable input of
|
||||
WorkflowStep
|
||||
@@ -2253,6 +2255,8 @@ spec:
|
||||
items:
|
||||
type: string
|
||||
type: array
|
||||
if:
|
||||
type: string
|
||||
inputs:
|
||||
description: StepInputs defines variable input
|
||||
of WorkflowStep
|
||||
@@ -3954,6 +3958,8 @@ spec:
|
||||
items:
|
||||
type: string
|
||||
type: array
|
||||
if:
|
||||
type: string
|
||||
inputs:
|
||||
description: StepInputs defines variable input of WorkflowStep
|
||||
items:
|
||||
@@ -3995,6 +4001,8 @@ spec:
|
||||
items:
|
||||
type: string
|
||||
type: array
|
||||
if:
|
||||
type: string
|
||||
inputs:
|
||||
description: StepInputs defines variable input of
|
||||
WorkflowStep
|
||||
|
||||
@@ -1020,6 +1020,8 @@ spec:
|
||||
items:
|
||||
type: string
|
||||
type: array
|
||||
if:
|
||||
type: string
|
||||
inputs:
|
||||
description: StepInputs defines variable input of WorkflowStep
|
||||
items:
|
||||
@@ -1061,6 +1063,8 @@ spec:
|
||||
items:
|
||||
type: string
|
||||
type: array
|
||||
if:
|
||||
type: string
|
||||
inputs:
|
||||
description: StepInputs defines variable input of
|
||||
WorkflowStep
|
||||
|
||||
@@ -31,7 +31,7 @@ spec:
|
||||
- "apiserver"
|
||||
- "--secure-port={{ .Values.multicluster.clusterGateway.port }}"
|
||||
- "--secret-namespace={{ .Release.Namespace }}"
|
||||
- "--feature-gates=APIPriorityAndFairness=false"
|
||||
- "--feature-gates=APIPriorityAndFairness=false,ClientIdentityPenetration={{ .Values.authentication.enabled }}"
|
||||
{{ if .Values.multicluster.clusterGateway.secureTLS.enabled }}
|
||||
- "--cert-dir={{ .Values.multicluster.clusterGateway.secureTLS.certPath }}"
|
||||
{{ end }}
|
||||
@@ -194,24 +194,22 @@ spec:
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRole
|
||||
metadata:
|
||||
name: {{ include "kubevela.fullname" . }}:cluster-gateway-access-role
|
||||
name: {{ include "kubevela.fullname" . }}:cluster-gateway:proxy
|
||||
rules:
|
||||
- apiGroups: [ "cluster.core.oam.dev" ]
|
||||
resources: [ "clustergateways/proxy" ]
|
||||
verbs: [ "get", "list", "watch", "create", "update", "patch", "delete" ]
|
||||
{{ end }}
|
||||
---
|
||||
{{ if and .Values.multicluster.enabled }}
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRoleBinding
|
||||
metadata:
|
||||
name: {{ include "kubevela.fullname" . }}:cluster-gateway-access-rolebinding
|
||||
name: {{ include "kubevela.fullname" . }}:cluster-gateway:proxy
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: ClusterRole
|
||||
name: {{ include "kubevela.fullname" . }}:cluster-gateway-access-role
|
||||
name: {{ include "kubevela.fullname" . }}:cluster-gateway:proxy
|
||||
subjects:
|
||||
- kind: Group
|
||||
name: cluster-gateway-accessor
|
||||
name: kubevela:client
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
{{ end }}
|
||||
186
charts/vela-minimal/templates/defwithtemplate/affinity.yaml
Normal file
186
charts/vela-minimal/templates/defwithtemplate/affinity.yaml
Normal file
@@ -0,0 +1,186 @@
|
||||
# Code generated by KubeVela templates. DO NOT EDIT. Please edit the original cue file.
|
||||
# Definition source cue file: vela-templates/definitions/internal/affinity.cue
|
||||
apiVersion: core.oam.dev/v1beta1
|
||||
kind: TraitDefinition
|
||||
metadata:
|
||||
annotations:
|
||||
definition.oam.dev/description: affinity specify affinity and tolerationon K8s pod for your workload which follows the pod spec in path 'spec.template'.
|
||||
labels:
|
||||
custom.definition.oam.dev/ui-hidden: "true"
|
||||
name: affinity
|
||||
namespace: {{ include "systemDefinitionNamespace" . }}
|
||||
spec:
|
||||
appliesToWorkloads:
|
||||
- '*'
|
||||
podDisruptive: true
|
||||
schematic:
|
||||
cue:
|
||||
template: |
|
||||
patch: spec: template: spec: {
|
||||
if parameter.podAffinity != _|_ {
|
||||
affinity: podAffinity: {
|
||||
if parameter.podAffinity.required != _|_ {
|
||||
requiredDuringSchedulingIgnoredDuringExecution: [
|
||||
for k in parameter.podAffinity.required {
|
||||
if k.labelSelector != _|_ {
|
||||
labelSelector: k.labelSelector
|
||||
}
|
||||
if k.namespace != _|_ {
|
||||
namespace: k.namespace
|
||||
}
|
||||
topologyKey: k.topologyKey
|
||||
if k.namespaceSelector != _|_ {
|
||||
namespaceSelector: k.namespaceSelector
|
||||
}
|
||||
}]
|
||||
}
|
||||
if parameter.podAffinity.preferred != _|_ {
|
||||
preferredDuringSchedulingIgnoredDuringExecution: [
|
||||
for k in parameter.podAffinity.preferred {
|
||||
weight: k.weight
|
||||
podAffinityTerm: k.podAffinityTerm
|
||||
}]
|
||||
}
|
||||
}
|
||||
}
|
||||
if parameter.podAntiAffinity != _|_ {
|
||||
affinity: podAntiAffinity: {
|
||||
if parameter.podAntiAffinity.required != _|_ {
|
||||
requiredDuringSchedulingIgnoredDuringExecution: [
|
||||
for k in parameter.podAntiAffinity.required {
|
||||
if k.labelSelector != _|_ {
|
||||
labelSelector: k.labelSelector
|
||||
}
|
||||
if k.namespace != _|_ {
|
||||
namespace: k.namespace
|
||||
}
|
||||
topologyKey: k.topologyKey
|
||||
if k.namespaceSelector != _|_ {
|
||||
namespaceSelector: k.namespaceSelector
|
||||
}
|
||||
}]
|
||||
}
|
||||
if parameter.podAntiAffinity.preferred != _|_ {
|
||||
preferredDuringSchedulingIgnoredDuringExecution: [
|
||||
for k in parameter.podAntiAffinity.preferred {
|
||||
weight: k.weight
|
||||
podAffinityTerm: k.podAffinityTerm
|
||||
}]
|
||||
}
|
||||
}
|
||||
}
|
||||
if parameter.nodeAffinity != _|_ {
|
||||
affinity: nodeAffinity: {
|
||||
if parameter.nodeAffinity.required != _|_ {
|
||||
requiredDuringSchedulingIgnoredDuringExecution: nodeSelectorTerms: [
|
||||
for k in parameter.nodeAffinity.required.nodeSelectorTerms {
|
||||
if k.matchExpressions != _|_ {
|
||||
matchExpressions: k.matchExpressions
|
||||
}
|
||||
if k.matchFields != _|_ {
|
||||
matchFields: k.matchFields
|
||||
}
|
||||
}]
|
||||
}
|
||||
if parameter.nodeAffinity.preferred != _|_ {
|
||||
preferredDuringSchedulingIgnoredDuringExecution: [
|
||||
for k in parameter.nodeAffinity.preferred {
|
||||
weight: k.weight
|
||||
preference: k.preference
|
||||
}]
|
||||
}
|
||||
}
|
||||
}
|
||||
if parameter.tolerations != _|_ {
|
||||
tolerations: [
|
||||
for k in parameter.tolerations {
|
||||
if k.key != _|_ {
|
||||
key: k.key
|
||||
}
|
||||
if k.effect != _|_ {
|
||||
effect: k.effect
|
||||
}
|
||||
if k.value != _|_ {
|
||||
value: k.value
|
||||
}
|
||||
operator: k.operator
|
||||
if k.tolerationSeconds != _|_ {
|
||||
tolerationSeconds: k.tolerationSeconds
|
||||
}
|
||||
}]
|
||||
}
|
||||
}
|
||||
#labelSelector: {
|
||||
matchLabels?: [string]: string
|
||||
matchExpressions?: [...{
|
||||
key: string
|
||||
operator: *"In" | "NotIn" | "Exists" | "DoesNotExist"
|
||||
values?: [...string]
|
||||
}]
|
||||
}
|
||||
#podAffinityTerm: {
|
||||
labelSelector?: #labelSelector
|
||||
namespaces?: [...string]
|
||||
topologyKey: string
|
||||
namespaceSelector?: #labelSelector
|
||||
}
|
||||
#nodeSelecor: {
|
||||
key: string
|
||||
operator: *"In" | "NotIn" | "Exists" | "DoesNotExist" | "Gt" | "Lt"
|
||||
values?: [...string]
|
||||
}
|
||||
#nodeSelectorTerm: {
|
||||
matchExpressions?: [...#nodeSelecor]
|
||||
matchFields?: [...#nodeSelecor]
|
||||
}
|
||||
parameter: {
|
||||
// +usage=Specify the pod affinity scheduling rules
|
||||
podAffinity?: {
|
||||
// +usage=Specify the required during scheduling ignored during execution
|
||||
required?: [...#podAffinityTerm]
|
||||
// +usage=Specify the preferred during scheduling ignored during execution
|
||||
preferred?: [...{
|
||||
// +usage=Specify weight associated with matching the corresponding podAffinityTerm
|
||||
weight: int & >=1 & <=100
|
||||
// +usage=Specify a set of pods
|
||||
podAffinityTerm: #podAffinityTerm
|
||||
}]
|
||||
}
|
||||
// +usage=Specify the pod anti-affinity scheduling rules
|
||||
podAntiAffinity?: {
|
||||
// +usage=Specify the required during scheduling ignored during execution
|
||||
required?: [...#podAffinityTerm]
|
||||
// +usage=Specify the preferred during scheduling ignored during execution
|
||||
preferred?: [...{
|
||||
// +usage=Specify weight associated with matching the corresponding podAffinityTerm
|
||||
weight: int & >=1 & <=100
|
||||
// +usage=Specify a set of pods
|
||||
podAffinityTerm: #podAffinityTerm
|
||||
}]
|
||||
}
|
||||
// +usage=Specify the node affinity scheduling rules for the pod
|
||||
nodeAffinity?: {
|
||||
// +usage=Specify the required during scheduling ignored during execution
|
||||
required?: {
|
||||
// +usage=Specify a list of node selector
|
||||
nodeSelectorTerms: [...#nodeSelectorTerm]
|
||||
}
|
||||
// +usage=Specify the preferred during scheduling ignored during execution
|
||||
preferred?: [...{
|
||||
// +usage=Specify weight associated with matching the corresponding nodeSelector
|
||||
weight: int & >=1 & <=100
|
||||
// +usage=Specify a node selector
|
||||
preference: #nodeSelectorTerm
|
||||
}]
|
||||
}
|
||||
// +usage=Specify tolerant taint
|
||||
tolerations?: [...{
|
||||
key?: string
|
||||
operator: *"Equal" | "Exists"
|
||||
value?: string
|
||||
effect?: "NoSchedule" | "PreferNoSchedule" | "NoExecute"
|
||||
// +usage=Specify the period of time the toleration
|
||||
tolerationSeconds?: int
|
||||
}]
|
||||
}
|
||||
|
||||
@@ -62,7 +62,8 @@ spec:
|
||||
}
|
||||
}
|
||||
}] + [ for k, v in _params.env if _delKeys[k] == _|_ && (_params.replace || _baseEnvMap[k] == _|_) {
|
||||
v
|
||||
name: k
|
||||
value: v
|
||||
}]
|
||||
}
|
||||
}
|
||||
|
||||
@@ -27,9 +27,6 @@ subjects:
|
||||
- kind: ServiceAccount
|
||||
name: {{ include "kubevela.serviceAccountName" . }}
|
||||
namespace: {{ .Release.Namespace }}
|
||||
- kind: Group
|
||||
name: core.oam.dev
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
|
||||
---
|
||||
# permissions to do leader election.
|
||||
@@ -145,6 +142,7 @@ spec:
|
||||
- "--max-workflow-wait-backoff-time={{ .Values.workflow.backoff.maxTime.waitState }}"
|
||||
- "--max-workflow-failed-backoff-time={{ .Values.workflow.backoff.maxTime.failedState }}"
|
||||
- "--max-workflow-step-error-retry-times={{ .Values.workflow.step.errorRetryTimes }}"
|
||||
- "--feature-gates=EnableSuspendOnFailure={{- .Values.workflow.enableSuspendOnFailure | toString -}}"
|
||||
- "--feature-gates=AuthenticateApplication={{- .Values.authentication.enabled | toString -}}"
|
||||
{{ if .Values.authentication.enabled }}
|
||||
{{ if .Values.authentication.withUser }}
|
||||
|
||||
@@ -38,10 +38,12 @@ dependCheckWait: 30s
|
||||
|
||||
## @section KubeVela workflow parameters
|
||||
|
||||
## @param workflow.enableSuspendOnFailure Enable suspend on workflow failure
|
||||
## @param workflow.backoff.maxTime.waitState The max backoff time of workflow in a wait condition
|
||||
## @param workflow.backoff.maxTime.failedState The max backoff time of workflow in a failed condition
|
||||
## @param workflow.step.errorRetryTimes The max retry times of a failed workflow step
|
||||
workflow:
|
||||
enableSuspendOnFailure: false
|
||||
backoff:
|
||||
maxTime:
|
||||
waitState: 60
|
||||
@@ -107,7 +109,7 @@ multicluster:
|
||||
port: 9443
|
||||
image:
|
||||
repository: oamdev/cluster-gateway
|
||||
tag: v1.3.2
|
||||
tag: v1.4.0
|
||||
pullPolicy: IfNotPresent
|
||||
resources:
|
||||
limits:
|
||||
|
||||
@@ -19,7 +19,6 @@ package main
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"flag"
|
||||
"fmt"
|
||||
"os"
|
||||
"os/signal"
|
||||
@@ -29,10 +28,12 @@ import (
|
||||
restfulspec "github.com/emicklei/go-restful-openapi/v2"
|
||||
"github.com/go-openapi/spec"
|
||||
"github.com/google/uuid"
|
||||
flag "github.com/spf13/pflag"
|
||||
|
||||
"github.com/oam-dev/kubevela/pkg/apiserver"
|
||||
"github.com/oam-dev/kubevela/pkg/apiserver/config"
|
||||
"github.com/oam-dev/kubevela/pkg/apiserver/utils/log"
|
||||
"github.com/oam-dev/kubevela/pkg/features"
|
||||
"github.com/oam-dev/kubevela/version"
|
||||
)
|
||||
|
||||
@@ -50,7 +51,7 @@ func main() {
|
||||
flag.BoolVar(&s.serverConfig.DisableStatisticCronJob, "disable-statistic-cronJob", false, "close the system statistic info calculating cronJob")
|
||||
flag.Float64Var(&s.serverConfig.KubeQPS, "kube-api-qps", 100, "the qps for kube clients. Low qps may lead to low throughput. High qps may give stress to api-server.")
|
||||
flag.IntVar(&s.serverConfig.KubeBurst, "kube-api-burst", 300, "the burst for kube clients. Recommend setting it qps*3.")
|
||||
|
||||
features.APIServerMutableFeatureGate.AddFlag(flag.CommandLine)
|
||||
flag.Parse()
|
||||
|
||||
if len(os.Args) > 2 && os.Args[1] == "build-swagger" {
|
||||
@@ -109,19 +110,13 @@ type Server struct {
|
||||
func (s *Server) run(ctx context.Context, errChan chan error) error {
|
||||
log.Logger.Infof("KubeVela information: version: %v, gitRevision: %v", version.VelaVersion, version.GitRevision)
|
||||
|
||||
server, err := apiserver.New(s.serverConfig)
|
||||
if err != nil {
|
||||
return fmt.Errorf("create apiserver failed : %w ", err)
|
||||
}
|
||||
server := apiserver.New(s.serverConfig)
|
||||
|
||||
return server.Run(ctx, errChan)
|
||||
}
|
||||
|
||||
func (s *Server) buildSwagger() (*spec.Swagger, error) {
|
||||
server, err := apiserver.New(s.serverConfig)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
server := apiserver.New(s.serverConfig)
|
||||
config, err := server.BuildRestfulConfig()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
||||
@@ -36,7 +36,6 @@ import (
|
||||
ctrl "sigs.k8s.io/controller-runtime"
|
||||
"sigs.k8s.io/controller-runtime/pkg/healthz"
|
||||
|
||||
apicommon "github.com/oam-dev/kubevela/apis/core.oam.dev/common"
|
||||
"github.com/oam-dev/kubevela/apis/types"
|
||||
"github.com/oam-dev/kubevela/pkg/auth"
|
||||
ctrlClient "github.com/oam-dev/kubevela/pkg/client"
|
||||
@@ -46,13 +45,11 @@ import (
|
||||
oamv1alpha2 "github.com/oam-dev/kubevela/pkg/controller/core.oam.dev/v1alpha2"
|
||||
"github.com/oam-dev/kubevela/pkg/controller/utils"
|
||||
"github.com/oam-dev/kubevela/pkg/cue/packages"
|
||||
"github.com/oam-dev/kubevela/pkg/features"
|
||||
_ "github.com/oam-dev/kubevela/pkg/monitor/metrics"
|
||||
"github.com/oam-dev/kubevela/pkg/multicluster"
|
||||
"github.com/oam-dev/kubevela/pkg/oam"
|
||||
"github.com/oam-dev/kubevela/pkg/oam/discoverymapper"
|
||||
"github.com/oam-dev/kubevela/pkg/resourcekeeper"
|
||||
pkgutils "github.com/oam-dev/kubevela/pkg/utils"
|
||||
"github.com/oam-dev/kubevela/pkg/utils/common"
|
||||
"github.com/oam-dev/kubevela/pkg/utils/system"
|
||||
"github.com/oam-dev/kubevela/pkg/utils/util"
|
||||
@@ -205,18 +202,10 @@ func main() {
|
||||
restConfig.QPS = float32(qps)
|
||||
restConfig.Burst = burst
|
||||
restConfig.Wrap(auth.NewImpersonatingRoundTripper)
|
||||
if utilfeature.DefaultMutableFeatureGate.Enabled(features.ControllerAutoImpersonation) {
|
||||
restConfig.Impersonate.UserName = types.VelaCoreName
|
||||
restConfig.Impersonate.Groups = []string{apicommon.Group}
|
||||
pkgutils.AutoSetSelfImpersonationInConfig(restConfig)
|
||||
}
|
||||
klog.InfoS("Kubernetes Config Loaded",
|
||||
"UserAgent", restConfig.UserAgent,
|
||||
"QPS", restConfig.QPS,
|
||||
"Burst", restConfig.Burst,
|
||||
"Auto-Impersonation", utilfeature.DefaultMutableFeatureGate.Enabled(features.ControllerAutoImpersonation),
|
||||
"Impersonate-User", restConfig.Impersonate.UserName,
|
||||
"Impersonate-Group", strings.Join(restConfig.Impersonate.Groups, ","),
|
||||
)
|
||||
|
||||
// wrapper the round tripper by multi cluster rewriter
|
||||
|
||||
@@ -3910,7 +3910,7 @@
|
||||
"parameters": [
|
||||
{
|
||||
"type": "string",
|
||||
"description": "identifier of the application ",
|
||||
"description": "identifier of the environment",
|
||||
"name": "envName",
|
||||
"in": "path",
|
||||
"required": true
|
||||
@@ -3950,7 +3950,7 @@
|
||||
"parameters": [
|
||||
{
|
||||
"type": "string",
|
||||
"description": "identifier of the application ",
|
||||
"description": "identifier of the environment",
|
||||
"name": "envName",
|
||||
"in": "path",
|
||||
"required": true
|
||||
@@ -4013,7 +4013,7 @@
|
||||
"tags": [
|
||||
"rbac"
|
||||
],
|
||||
"summary": "list all project level perm policies",
|
||||
"summary": "list all platform level perm policies",
|
||||
"operationId": "listPlatformPermissions",
|
||||
"responses": {
|
||||
"200": {
|
||||
@@ -4026,6 +4026,73 @@
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"post": {
|
||||
"consumes": [
|
||||
"application/xml",
|
||||
"application/json"
|
||||
],
|
||||
"produces": [
|
||||
"application/json",
|
||||
"application/xml"
|
||||
],
|
||||
"tags": [
|
||||
"rbac"
|
||||
],
|
||||
"summary": "create the platform perm policy",
|
||||
"operationId": "createPlatformPermission",
|
||||
"parameters": [
|
||||
{
|
||||
"name": "body",
|
||||
"in": "body",
|
||||
"required": true,
|
||||
"schema": {
|
||||
"$ref": "#/definitions/v1.CreatePermissionRequest"
|
||||
}
|
||||
}
|
||||
],
|
||||
"responses": {
|
||||
"200": {
|
||||
"description": "OK",
|
||||
"schema": {
|
||||
"$ref": "#/definitions/v1.PermissionBase"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"/api/v1/permissions/{permissionName}": {
|
||||
"delete": {
|
||||
"consumes": [
|
||||
"application/xml",
|
||||
"application/json"
|
||||
],
|
||||
"produces": [
|
||||
"application/json",
|
||||
"application/xml"
|
||||
],
|
||||
"tags": [
|
||||
"rbac"
|
||||
],
|
||||
"summary": "delete a platform perm policy",
|
||||
"operationId": "deletePlatformPermission",
|
||||
"parameters": [
|
||||
{
|
||||
"type": "string",
|
||||
"description": "identifier of the permission",
|
||||
"name": "permissionName",
|
||||
"in": "path",
|
||||
"required": true
|
||||
}
|
||||
],
|
||||
"responses": {
|
||||
"200": {
|
||||
"description": "OK",
|
||||
"schema": {
|
||||
"$ref": "#/definitions/v1.EmptyResponse"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"/api/v1/projects": {
|
||||
@@ -4276,6 +4343,85 @@
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"post": {
|
||||
"consumes": [
|
||||
"application/xml",
|
||||
"application/json"
|
||||
],
|
||||
"produces": [
|
||||
"application/json",
|
||||
"application/xml"
|
||||
],
|
||||
"tags": [
|
||||
"project"
|
||||
],
|
||||
"summary": "create a project level perm policy",
|
||||
"operationId": "createProjectPermission",
|
||||
"parameters": [
|
||||
{
|
||||
"type": "string",
|
||||
"description": "identifier of the project",
|
||||
"name": "projectName",
|
||||
"in": "path",
|
||||
"required": true
|
||||
}
|
||||
],
|
||||
"responses": {
|
||||
"200": {
|
||||
"description": "OK",
|
||||
"schema": {
|
||||
"type": "array",
|
||||
"items": {
|
||||
"$ref": "#/definitions/v1.PermissionBase"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"/api/v1/projects/{projectName}/permissions/{permissionName}": {
|
||||
"delete": {
|
||||
"consumes": [
|
||||
"application/xml",
|
||||
"application/json"
|
||||
],
|
||||
"produces": [
|
||||
"application/json",
|
||||
"application/xml"
|
||||
],
|
||||
"tags": [
|
||||
"project"
|
||||
],
|
||||
"summary": "delete a project level perm policy",
|
||||
"operationId": "deleteProjectPermission",
|
||||
"parameters": [
|
||||
{
|
||||
"type": "string",
|
||||
"description": "identifier of the project",
|
||||
"name": "projectName",
|
||||
"in": "path",
|
||||
"required": true
|
||||
},
|
||||
{
|
||||
"type": "string",
|
||||
"description": "identifier of the permission",
|
||||
"name": "permissionName",
|
||||
"in": "path",
|
||||
"required": true
|
||||
}
|
||||
],
|
||||
"responses": {
|
||||
"200": {
|
||||
"description": "OK",
|
||||
"schema": {
|
||||
"type": "array",
|
||||
"items": {
|
||||
"$ref": "#/definitions/v1.PermissionBase"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"/api/v1/projects/{projectName}/roles": {
|
||||
@@ -4990,6 +5136,13 @@
|
||||
"summary": "update platform level role",
|
||||
"operationId": "updatePlatformRole",
|
||||
"parameters": [
|
||||
{
|
||||
"type": "string",
|
||||
"description": "identifier of the role",
|
||||
"name": "roleName",
|
||||
"in": "path",
|
||||
"required": true
|
||||
},
|
||||
{
|
||||
"name": "body",
|
||||
"in": "body",
|
||||
@@ -5022,6 +5175,15 @@
|
||||
],
|
||||
"summary": "update platform level role",
|
||||
"operationId": "deletePlatformRole",
|
||||
"parameters": [
|
||||
{
|
||||
"type": "string",
|
||||
"description": "identifier of the role",
|
||||
"name": "roleName",
|
||||
"in": "path",
|
||||
"required": true
|
||||
}
|
||||
],
|
||||
"responses": {
|
||||
"200": {
|
||||
"description": "OK",
|
||||
@@ -6456,20 +6618,34 @@
|
||||
}
|
||||
}
|
||||
},
|
||||
"common.SubStepsStatus": {
|
||||
"common.StepStatus": {
|
||||
"required": [
|
||||
"id"
|
||||
],
|
||||
"properties": {
|
||||
"mode": {
|
||||
"firstExecuteTime": {
|
||||
"type": "string"
|
||||
},
|
||||
"stepIndex": {
|
||||
"type": "integer",
|
||||
"format": "int32"
|
||||
"id": {
|
||||
"type": "string"
|
||||
},
|
||||
"steps": {
|
||||
"type": "array",
|
||||
"items": {
|
||||
"$ref": "#/definitions/common.WorkflowSubStepStatus"
|
||||
}
|
||||
"lastExecuteTime": {
|
||||
"type": "string"
|
||||
},
|
||||
"message": {
|
||||
"type": "string"
|
||||
},
|
||||
"name": {
|
||||
"type": "string"
|
||||
},
|
||||
"phase": {
|
||||
"type": "string"
|
||||
},
|
||||
"reason": {
|
||||
"type": "string"
|
||||
},
|
||||
"type": {
|
||||
"type": "string"
|
||||
}
|
||||
}
|
||||
},
|
||||
@@ -6571,7 +6747,45 @@
|
||||
"type": "string"
|
||||
},
|
||||
"subSteps": {
|
||||
"$ref": "#/definitions/common.SubStepsStatus"
|
||||
"type": "array",
|
||||
"items": {
|
||||
"$ref": "#/definitions/common.WorkflowSubStepStatus"
|
||||
}
|
||||
},
|
||||
"type": {
|
||||
"type": "string"
|
||||
}
|
||||
}
|
||||
},
|
||||
"common.WorkflowSubStep": {
|
||||
"required": [
|
||||
"name",
|
||||
"type"
|
||||
],
|
||||
"properties": {
|
||||
"dependsOn": {
|
||||
"type": "array",
|
||||
"items": {
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
"inputs": {
|
||||
"type": "array",
|
||||
"items": {
|
||||
"$ref": "#/definitions/common.inputItem"
|
||||
}
|
||||
},
|
||||
"name": {
|
||||
"type": "string"
|
||||
},
|
||||
"outputs": {
|
||||
"type": "array",
|
||||
"items": {
|
||||
"$ref": "#/definitions/common.outputItem"
|
||||
}
|
||||
},
|
||||
"properties": {
|
||||
"type": "string"
|
||||
},
|
||||
"type": {
|
||||
"type": "string"
|
||||
@@ -6583,9 +6797,15 @@
|
||||
"id"
|
||||
],
|
||||
"properties": {
|
||||
"firstExecuteTime": {
|
||||
"type": "string"
|
||||
},
|
||||
"id": {
|
||||
"type": "string"
|
||||
},
|
||||
"lastExecuteTime": {
|
||||
"type": "string"
|
||||
},
|
||||
"message": {
|
||||
"type": "string"
|
||||
},
|
||||
@@ -6897,8 +7117,8 @@
|
||||
},
|
||||
"model.Cluster": {
|
||||
"required": [
|
||||
"createTime",
|
||||
"updateTime",
|
||||
"createTime",
|
||||
"name",
|
||||
"alias",
|
||||
"description",
|
||||
@@ -7449,8 +7669,8 @@
|
||||
},
|
||||
"v1.AddonStatusResponse": {
|
||||
"required": [
|
||||
"name",
|
||||
"phase",
|
||||
"name",
|
||||
"args"
|
||||
],
|
||||
"properties": {
|
||||
@@ -7606,12 +7826,12 @@
|
||||
},
|
||||
"v1.ApplicationDeployResponse": {
|
||||
"required": [
|
||||
"status",
|
||||
"note",
|
||||
"envName",
|
||||
"triggerType",
|
||||
"createTime",
|
||||
"version",
|
||||
"status"
|
||||
"envName"
|
||||
],
|
||||
"properties": {
|
||||
"codeInfo": {
|
||||
@@ -8580,6 +8800,38 @@
|
||||
}
|
||||
}
|
||||
},
|
||||
"v1.CreatePermissionRequest": {
|
||||
"required": [
|
||||
"name",
|
||||
"alias",
|
||||
"resources",
|
||||
"actions",
|
||||
"effect"
|
||||
],
|
||||
"properties": {
|
||||
"actions": {
|
||||
"type": "array",
|
||||
"items": {
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
"alias": {
|
||||
"type": "string"
|
||||
},
|
||||
"effect": {
|
||||
"type": "string"
|
||||
},
|
||||
"name": {
|
||||
"type": "string"
|
||||
},
|
||||
"resources": {
|
||||
"type": "array",
|
||||
"items": {
|
||||
"type": "string"
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"v1.CreatePolicyRequest": {
|
||||
"required": [
|
||||
"name",
|
||||
@@ -8776,11 +9028,11 @@
|
||||
},
|
||||
"v1.DetailAddonResponse": {
|
||||
"required": [
|
||||
"name",
|
||||
"version",
|
||||
"description",
|
||||
"icon",
|
||||
"invisible",
|
||||
"name",
|
||||
"description",
|
||||
"version",
|
||||
"icon",
|
||||
"schema",
|
||||
"uiSchema",
|
||||
"definitions",
|
||||
@@ -8860,13 +9112,13 @@
|
||||
},
|
||||
"v1.DetailApplicationResponse": {
|
||||
"required": [
|
||||
"name",
|
||||
"project",
|
||||
"description",
|
||||
"createTime",
|
||||
"icon",
|
||||
"alias",
|
||||
"createTime",
|
||||
"updateTime",
|
||||
"name",
|
||||
"policies",
|
||||
"envBindings",
|
||||
"resourceInfo"
|
||||
@@ -8923,20 +9175,20 @@
|
||||
},
|
||||
"v1.DetailClusterResponse": {
|
||||
"required": [
|
||||
"kubeConfig",
|
||||
"name",
|
||||
"description",
|
||||
"apiServerURL",
|
||||
"createTime",
|
||||
"icon",
|
||||
"status",
|
||||
"reason",
|
||||
"icon",
|
||||
"provider",
|
||||
"kubeConfigSecret",
|
||||
"labels",
|
||||
"dashboardURL",
|
||||
"createTime",
|
||||
"updateTime",
|
||||
"apiServerURL",
|
||||
"alias",
|
||||
"dashboardURL",
|
||||
"updateTime",
|
||||
"labels",
|
||||
"kubeConfig",
|
||||
"kubeConfigSecret",
|
||||
"resourceInfo"
|
||||
],
|
||||
"properties": {
|
||||
@@ -8994,14 +9246,14 @@
|
||||
},
|
||||
"v1.DetailComponentResponse": {
|
||||
"required": [
|
||||
"creator",
|
||||
"appPrimaryKey",
|
||||
"alias",
|
||||
"updateTime",
|
||||
"name",
|
||||
"type",
|
||||
"main",
|
||||
"createTime",
|
||||
"appPrimaryKey",
|
||||
"creator",
|
||||
"updateTime",
|
||||
"name",
|
||||
"definition"
|
||||
],
|
||||
"properties": {
|
||||
@@ -9089,11 +9341,11 @@
|
||||
},
|
||||
"v1.DetailDefinitionResponse": {
|
||||
"required": [
|
||||
"name",
|
||||
"status",
|
||||
"labels",
|
||||
"alias",
|
||||
"description",
|
||||
"status",
|
||||
"labels",
|
||||
"name",
|
||||
"icon",
|
||||
"schema",
|
||||
"uiSchema"
|
||||
@@ -9148,14 +9400,14 @@
|
||||
},
|
||||
"v1.DetailPolicyResponse": {
|
||||
"required": [
|
||||
"creator",
|
||||
"properties",
|
||||
"createTime",
|
||||
"updateTime",
|
||||
"envName",
|
||||
"name",
|
||||
"type",
|
||||
"description"
|
||||
"description",
|
||||
"creator",
|
||||
"properties"
|
||||
],
|
||||
"properties": {
|
||||
"createTime": {
|
||||
@@ -9188,16 +9440,16 @@
|
||||
},
|
||||
"v1.DetailRevisionResponse": {
|
||||
"required": [
|
||||
"updateTime",
|
||||
"note",
|
||||
"triggerType",
|
||||
"reason",
|
||||
"envName",
|
||||
"createTime",
|
||||
"deployUser",
|
||||
"updateTime",
|
||||
"reason",
|
||||
"appPrimaryKey",
|
||||
"version",
|
||||
"status",
|
||||
"envName",
|
||||
"deployUser",
|
||||
"note",
|
||||
"triggerType",
|
||||
"workflowName"
|
||||
],
|
||||
"properties": {
|
||||
@@ -9252,10 +9504,10 @@
|
||||
},
|
||||
"v1.DetailTargetResponse": {
|
||||
"required": [
|
||||
"name",
|
||||
"createTime",
|
||||
"updateTime",
|
||||
"project",
|
||||
"updateTime"
|
||||
"name"
|
||||
],
|
||||
"properties": {
|
||||
"alias": {
|
||||
@@ -9295,11 +9547,11 @@
|
||||
},
|
||||
"v1.DetailUserResponse": {
|
||||
"required": [
|
||||
"disabled",
|
||||
"createTime",
|
||||
"lastLoginTime",
|
||||
"name",
|
||||
"email",
|
||||
"disabled",
|
||||
"projects",
|
||||
"roles"
|
||||
],
|
||||
@@ -9340,12 +9592,12 @@
|
||||
},
|
||||
"v1.DetailWorkflowRecordResponse": {
|
||||
"required": [
|
||||
"namespace",
|
||||
"workflowName",
|
||||
"workflowAlias",
|
||||
"applicationRevision",
|
||||
"status",
|
||||
"name",
|
||||
"namespace",
|
||||
"workflowName",
|
||||
"workflowAlias",
|
||||
"deployTime",
|
||||
"deployUser",
|
||||
"note",
|
||||
@@ -9397,14 +9649,14 @@
|
||||
},
|
||||
"v1.DetailWorkflowResponse": {
|
||||
"required": [
|
||||
"createTime",
|
||||
"alias",
|
||||
"description",
|
||||
"enable",
|
||||
"updateTime",
|
||||
"createTime",
|
||||
"name",
|
||||
"default",
|
||||
"envName"
|
||||
"envName",
|
||||
"updateTime",
|
||||
"description"
|
||||
],
|
||||
"properties": {
|
||||
"alias": {
|
||||
@@ -9599,8 +9851,8 @@
|
||||
},
|
||||
"v1.EnvBindingTarget": {
|
||||
"required": [
|
||||
"name",
|
||||
"alias"
|
||||
"alias",
|
||||
"name"
|
||||
],
|
||||
"properties": {
|
||||
"alias": {
|
||||
@@ -9983,11 +10235,11 @@
|
||||
},
|
||||
"v1.LoginUserInfoResponse": {
|
||||
"required": [
|
||||
"createTime",
|
||||
"lastLoginTime",
|
||||
"name",
|
||||
"email",
|
||||
"disabled",
|
||||
"createTime",
|
||||
"projects",
|
||||
"platformPermissions",
|
||||
"projectPermissions"
|
||||
@@ -10992,6 +11244,12 @@
|
||||
"properties": {
|
||||
"type": "string"
|
||||
},
|
||||
"subSteps": {
|
||||
"type": "array",
|
||||
"items": {
|
||||
"$ref": "#/definitions/common.WorkflowSubStep"
|
||||
}
|
||||
},
|
||||
"type": {
|
||||
"type": "string"
|
||||
}
|
||||
@@ -11006,22 +11264,6 @@
|
||||
"$ref": "#/definitions/common.Schematic"
|
||||
}
|
||||
}
|
||||
},
|
||||
"v1beta2.BaseConfigurationSpec": {
|
||||
"properties": {
|
||||
"customRegion": {
|
||||
"type": "string"
|
||||
},
|
||||
"deleteResource": {
|
||||
"type": "boolean"
|
||||
},
|
||||
"providerRef": {
|
||||
"$ref": "#/definitions/types.Reference"
|
||||
},
|
||||
"writeConnectionSecretToRef": {
|
||||
"$ref": "#/definitions/types.SecretReference"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
27
docs/examples/traits/affinity/example.yaml
Normal file
27
docs/examples/traits/affinity/example.yaml
Normal file
@@ -0,0 +1,27 @@
|
||||
apiVersion: core.oam.dev/v1beta1
|
||||
kind: Application
|
||||
metadata:
|
||||
name: busybox
|
||||
spec:
|
||||
components:
|
||||
- name: busybox
|
||||
type: webservice
|
||||
properties:
|
||||
image: busybox
|
||||
cmd: ["sleep", "86400"]
|
||||
labels:
|
||||
label-key: label-value
|
||||
to-delete-label-key: to-delete-label-value
|
||||
traits:
|
||||
- type: affinity
|
||||
properties:
|
||||
podAffinity:
|
||||
preferred:
|
||||
- weight: 1
|
||||
podAffinityTerm:
|
||||
labelSelector:
|
||||
matchExpressions:
|
||||
- key: "secrity"
|
||||
values: ["S1"]
|
||||
namespaces: ["default"]
|
||||
topologyKey: "kubernetes.io/hostname"
|
||||
23
docs/examples/workflow/step-group/README.md
Normal file
23
docs/examples/workflow/step-group/README.md
Normal file
@@ -0,0 +1,23 @@
|
||||
# Step Group
|
||||
|
||||
## How to start
|
||||
|
||||
Edit a yaml file as `example.yaml`, then execute it with `vela up` command.
|
||||
|
||||
## Parameter Introduction
|
||||
|
||||
`step-group` has a `subSteps` parameter which is an array containing any step type whose valid parameters do not include the `step-group` step type itself.
|
||||
|
||||
`step-group` doesn't support `properties` for now.
|
||||
|
||||
## Execute process
|
||||
|
||||
When executing the `step-group` step, the subSteps in the step group are executed in dag mode. The step group will only complete when all subSteps have been executed to completion.
|
||||
SubStep has the same execution behavior as a normal step.
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
apiVersion: core.oam.dev/v1beta1
|
||||
kind: Application
|
||||
metadata:
|
||||
name: step-group-example
|
||||
name: example
|
||||
namespace: default
|
||||
spec:
|
||||
components:
|
||||
@@ -10,13 +10,22 @@ spec:
|
||||
properties:
|
||||
image: crccheck/hello-world
|
||||
port: 8000
|
||||
- name: express-server2
|
||||
type: webservice
|
||||
properties:
|
||||
image: crccheck/hello-world
|
||||
port: 8000
|
||||
|
||||
workflow:
|
||||
steps:
|
||||
- name: step
|
||||
type: step-group
|
||||
subSteps:
|
||||
- name: apply-server
|
||||
- name: apply-sub-step1
|
||||
type: apply-component
|
||||
properties:
|
||||
component: express-server
|
||||
- name: apply-sub-step2
|
||||
type: apply-component
|
||||
properties:
|
||||
component: express-server2
|
||||
@@ -19,6 +19,7 @@ package e2e
|
||||
import (
|
||||
context2 "context"
|
||||
"fmt"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/Netflix/go-expect"
|
||||
@@ -98,7 +99,7 @@ var ApplicationStatusDeeplyContext = func(context string, applicationName, workl
|
||||
cli := fmt.Sprintf("vela status %s", applicationName)
|
||||
output, err := e2e.LongTimeExec(cli, 120*time.Second)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
gomega.Expect(output).To(gomega.ContainSubstring("healthy"))
|
||||
gomega.Expect(strings.ToLower(output)).To(gomega.ContainSubstring("healthy"))
|
||||
// TODO(zzxwill) need to check workloadType after app status is refined
|
||||
})
|
||||
})
|
||||
|
||||
43
go.mod
43
go.mod
@@ -25,15 +25,19 @@ require (
|
||||
github.com/emicklei/go-restful-openapi/v2 v2.3.0
|
||||
github.com/emicklei/go-restful/v3 v3.0.0-rc2
|
||||
github.com/evanphx/json-patch v4.12.0+incompatible
|
||||
github.com/fatih/camelcase v1.0.0
|
||||
github.com/fatih/color v1.13.0
|
||||
github.com/fluxcd/helm-controller/api v0.21.0
|
||||
github.com/fluxcd/source-controller/api v0.24.4
|
||||
github.com/form3tech-oss/jwt-go v3.2.3+incompatible
|
||||
github.com/gertd/go-pluralize v0.1.7
|
||||
github.com/getkin/kin-openapi v0.94.0
|
||||
github.com/go-logr/logr v1.2.0
|
||||
github.com/go-logr/logr v1.2.2
|
||||
github.com/go-openapi/spec v0.19.8
|
||||
github.com/go-playground/validator/v10 v10.9.0
|
||||
github.com/go-resty/resty/v2 v2.7.0
|
||||
github.com/google/go-cmp v0.5.8
|
||||
github.com/google/go-containerregistry v0.9.0
|
||||
github.com/google/go-github/v32 v32.1.0
|
||||
github.com/google/uuid v1.3.0
|
||||
github.com/gosuri/uilive v0.0.4
|
||||
@@ -42,10 +46,10 @@ require (
|
||||
github.com/hashicorp/hcl/v2 v2.9.1
|
||||
github.com/hinshun/vt10x v0.0.0-20180616224451-1954e6464174
|
||||
github.com/imdario/mergo v0.3.12
|
||||
github.com/kubevela/prism v0.0.0-20220512081342-9b641aa819f3
|
||||
github.com/kubevela/prism v1.4.0
|
||||
github.com/kyokomi/emoji v2.2.4+incompatible
|
||||
github.com/mitchellh/hashstructure/v2 v2.0.1
|
||||
github.com/oam-dev/cluster-gateway v1.3.3-0.20220509095841-4272c540e1e9
|
||||
github.com/oam-dev/cluster-gateway v1.4.0
|
||||
github.com/oam-dev/cluster-register v1.0.4-0.20220325092210-cee4a3d3fb7d
|
||||
github.com/oam-dev/terraform-config-inspect v0.0.0-20210418082552-fc72d929aa28
|
||||
github.com/oam-dev/terraform-controller v0.7.0
|
||||
@@ -56,6 +60,7 @@ require (
|
||||
github.com/openkruise/kruise-api v1.1.0
|
||||
github.com/pkg/errors v0.9.1
|
||||
github.com/prometheus/client_golang v1.11.0
|
||||
github.com/robfig/cron/v3 v3.0.1
|
||||
github.com/sirupsen/logrus v1.8.1
|
||||
github.com/spf13/cobra v1.4.0
|
||||
github.com/spf13/pflag v1.0.5
|
||||
@@ -63,6 +68,7 @@ require (
|
||||
github.com/tidwall/gjson v1.9.3
|
||||
github.com/wercker/stern v0.0.0-20190705090245-4fa46dd6987f
|
||||
github.com/wonderflow/cert-manager-api v1.0.3
|
||||
github.com/xanzy/go-gitlab v0.60.0
|
||||
github.com/xlab/treeprint v1.1.0
|
||||
go.mongodb.org/mongo-driver v1.5.1
|
||||
go.uber.org/zap v1.19.1
|
||||
@@ -70,6 +76,7 @@ require (
|
||||
golang.org/x/oauth2 v0.0.0-20220411215720-9780585627b5
|
||||
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211
|
||||
golang.org/x/tools v0.1.11-0.20220316014157-77aa08bb151a // indirect
|
||||
gomodules.xyz/jsonpatch/v2 v2.2.0
|
||||
gopkg.in/alexcesaro/quotedprintable.v3 v3.0.0-20150716171945-2caba252f4dc // indirect
|
||||
gopkg.in/gomail.v2 v2.0.0-20160411212932-81ebce5c23df
|
||||
gopkg.in/src-d/go-git.v4 v4.13.1
|
||||
@@ -78,7 +85,7 @@ require (
|
||||
helm.sh/helm/v3 v3.7.2
|
||||
istio.io/client-go v0.0.0-20210128182905-ee2edd059e02
|
||||
k8s.io/api v0.23.6
|
||||
k8s.io/apiextensions-apiserver v0.23.5
|
||||
k8s.io/apiextensions-apiserver v0.23.6
|
||||
k8s.io/apimachinery v0.23.6
|
||||
k8s.io/apiserver v0.23.6
|
||||
k8s.io/cli-runtime v0.23.6
|
||||
@@ -98,20 +105,6 @@ require (
|
||||
sigs.k8s.io/yaml v1.3.0
|
||||
)
|
||||
|
||||
require (
|
||||
github.com/fatih/camelcase v1.0.0
|
||||
github.com/hashicorp/go-cleanhttp v0.5.2 // indirect
|
||||
github.com/hashicorp/go-retryablehttp v0.7.0 // indirect
|
||||
github.com/kevinburke/ssh_config v0.0.0-20201106050909-4977a11b4351 // indirect
|
||||
github.com/robfig/cron/v3 v3.0.1
|
||||
github.com/xanzy/go-gitlab v0.60.0
|
||||
github.com/xanzy/ssh-agent v0.3.0 // indirect
|
||||
golang.org/x/time v0.0.0-20220224211638-0e9765cccd65 // indirect
|
||||
gomodules.xyz/jsonpatch/v2 v2.2.0
|
||||
)
|
||||
|
||||
require github.com/google/go-containerregistry v0.9.0
|
||||
|
||||
require (
|
||||
cloud.google.com/go/compute v1.6.1 // indirect
|
||||
github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 // indirect
|
||||
@@ -168,6 +161,9 @@ require (
|
||||
github.com/exponent-io/jsonpath v0.0.0-20151013193312-d6023ce2651d // indirect
|
||||
github.com/facebookgo/structtag v0.0.0-20150214074306-217e25fb9691 // indirect
|
||||
github.com/felixge/httpsnoop v1.0.1 // indirect
|
||||
github.com/fluxcd/pkg/apis/acl v0.0.3 // indirect
|
||||
github.com/fluxcd/pkg/apis/kustomize v0.3.3 // indirect
|
||||
github.com/fluxcd/pkg/apis/meta v0.13.0 // indirect
|
||||
github.com/fsnotify/fsnotify v1.5.1 // indirect
|
||||
github.com/fvbommel/sortorder v1.0.1 // indirect
|
||||
github.com/ghodss/yaml v1.0.0 // indirect
|
||||
@@ -187,13 +183,15 @@ require (
|
||||
github.com/golang/snappy v0.0.3 // indirect
|
||||
github.com/google/btree v1.0.1 // indirect
|
||||
github.com/google/go-querystring v1.1.0 // indirect
|
||||
github.com/google/gofuzz v1.1.0 // indirect
|
||||
github.com/google/gofuzz v1.2.0 // indirect
|
||||
github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 // indirect
|
||||
github.com/googleapis/gnostic v0.5.5 // indirect
|
||||
github.com/gorilla/mux v1.8.0 // indirect
|
||||
github.com/gregjones/httpcache v0.0.0-20190611155906-901d90724c79 // indirect
|
||||
github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 // indirect
|
||||
github.com/grpc-ecosystem/grpc-gateway v1.16.0 // indirect
|
||||
github.com/hashicorp/go-cleanhttp v0.5.2 // indirect
|
||||
github.com/hashicorp/go-retryablehttp v0.7.0 // indirect
|
||||
github.com/hashicorp/hcl v1.0.0 // indirect
|
||||
github.com/huandu/xstrings v1.3.2 // indirect
|
||||
github.com/inconshreveable/mousetrap v1.0.0 // indirect
|
||||
@@ -203,6 +201,7 @@ require (
|
||||
github.com/josharian/intern v1.0.0 // indirect
|
||||
github.com/json-iterator/go v1.1.12 // indirect
|
||||
github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51 // indirect
|
||||
github.com/kevinburke/ssh_config v0.0.0-20201106050909-4977a11b4351 // indirect
|
||||
github.com/klauspost/compress v1.15.4 // indirect
|
||||
github.com/kr/pretty v0.3.0 // indirect
|
||||
github.com/kr/pty v1.1.8 // indirect
|
||||
@@ -255,6 +254,7 @@ require (
|
||||
github.com/tidwall/match v1.1.1 // indirect
|
||||
github.com/tidwall/pretty v1.2.0 // indirect
|
||||
github.com/tjfoc/gmsm v1.3.2 // indirect
|
||||
github.com/xanzy/ssh-agent v0.3.0 // indirect
|
||||
github.com/xdg-go/pbkdf2 v1.0.0 // indirect
|
||||
github.com/xdg-go/scram v1.0.2 // indirect
|
||||
github.com/xdg-go/stringprep v1.0.2 // indirect
|
||||
@@ -285,6 +285,7 @@ require (
|
||||
golang.org/x/sync v0.0.0-20220513210516-0976fa681c29 // indirect
|
||||
golang.org/x/sys v0.0.0-20220513210249-45d2b4557a2a // indirect
|
||||
golang.org/x/text v0.3.7 // indirect
|
||||
golang.org/x/time v0.0.0-20220224211638-0e9765cccd65 // indirect
|
||||
golang.org/x/xerrors v0.0.0-20220411194840-2f41105eb62f // indirect
|
||||
google.golang.org/appengine v1.6.7 // indirect
|
||||
google.golang.org/genproto v0.0.0-20220421151946-72621c1f0bd3 // indirect
|
||||
@@ -301,10 +302,10 @@ require (
|
||||
istio.io/api v0.0.0-20210128181506-0c4b8e54850f // indirect
|
||||
istio.io/gogo-genproto v0.0.0-20190930162913-45029607206a // indirect
|
||||
oras.land/oras-go v0.4.0 // indirect
|
||||
sigs.k8s.io/apiserver-network-proxy v0.0.24 // indirect
|
||||
sigs.k8s.io/apiserver-network-proxy v0.0.30 // indirect
|
||||
sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.30 // indirect
|
||||
sigs.k8s.io/apiserver-runtime v1.1.1 // indirect
|
||||
sigs.k8s.io/json v0.0.0-20211020170558-c049b76a60c6 // indirect
|
||||
sigs.k8s.io/json v0.0.0-20211208200746-9f7c6b3444d2 // indirect
|
||||
sigs.k8s.io/kustomize/api v0.10.1 // indirect
|
||||
sigs.k8s.io/kustomize/kyaml v0.13.0 // indirect
|
||||
sigs.k8s.io/structured-merge-diff/v4 v4.2.1 // indirect
|
||||
|
||||
38
go.sum
38
go.sum
@@ -668,6 +668,16 @@ github.com/fatih/structtag v1.1.0/go.mod h1:mBJUNpUnHmRKrKlQQlmCrh5PuhftFbNv8Ys4
|
||||
github.com/fatih/structtag v1.2.0/go.mod h1:mBJUNpUnHmRKrKlQQlmCrh5PuhftFbNv8Ys4/aAZl94=
|
||||
github.com/felixge/httpsnoop v1.0.1 h1:lvB5Jl89CsZtGIWuTcDM1E/vkVs49/Ml7JJe07l8SPQ=
|
||||
github.com/felixge/httpsnoop v1.0.1/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U=
|
||||
github.com/fluxcd/helm-controller/api v0.21.0 h1:MWvVzz6u9jR1aE7j1YaSEjBehw0zMndkODnjAE0/1nQ=
|
||||
github.com/fluxcd/helm-controller/api v0.21.0/go.mod h1:cgP5ZR46HIhC8phUfx4Z60He9zNuIHbH3r8YEVl5ip8=
|
||||
github.com/fluxcd/pkg/apis/acl v0.0.3 h1:Lw0ZHdpnO4G7Zy9KjrzwwBmDZQuy4qEjaU/RvA6k1lc=
|
||||
github.com/fluxcd/pkg/apis/acl v0.0.3/go.mod h1:XPts6lRJ9C9fIF9xVWofmQwftvhY25n1ps7W9xw0XLU=
|
||||
github.com/fluxcd/pkg/apis/kustomize v0.3.3 h1:bPN29SdVzWl0yhgivuf/83IAe2R6vUuDVcB3LzyVU8E=
|
||||
github.com/fluxcd/pkg/apis/kustomize v0.3.3/go.mod h1:5HTOFZfQFVMMqR2rvuxpbZhpb+sQpcTT6RCQZOhjFzA=
|
||||
github.com/fluxcd/pkg/apis/meta v0.13.0 h1:0QuNKEExSjk+Rv0I6a85p2H3xOlWhdxZRsh10waEL/c=
|
||||
github.com/fluxcd/pkg/apis/meta v0.13.0/go.mod h1:Z26X5uTU5LxAyWETGueRQY7TvdPaGfKU7Wye9bdUlho=
|
||||
github.com/fluxcd/source-controller/api v0.24.4 h1:m54sS1rJlgJf5j9qDRgKLhbPJAnJ9dY+VrstPKj0aQo=
|
||||
github.com/fluxcd/source-controller/api v0.24.4/go.mod h1:b0MmMPGE8gcpgSyGXe5m7see77tBW26eZrvGkkPstUs=
|
||||
github.com/flynn/go-shlex v0.0.0-20150515145356-3f9db97f8568/go.mod h1:xEzjJPgXI435gkrCt3MPfRiAkVrwSbHsst4LCFVfpJc=
|
||||
github.com/fogleman/gg v1.2.1-0.20190220221249-0403632d5b90/go.mod h1:R/bRT+9gY/C5z7JzPU0zXsXHKM4/ayA+zqcVNZzPa1k=
|
||||
github.com/form3tech-oss/jwt-go v3.2.2+incompatible/go.mod h1:pbq4aXjuKjdthFRnoDwaVPLA+WlJuPGy+QneDUgJi2k=
|
||||
@@ -722,8 +732,9 @@ github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG
|
||||
github.com/go-logr/logr v0.1.0/go.mod h1:ixOQHD9gLJUVQQ2ZOR7zLEifBX6tGkNJF4QyIY7sIas=
|
||||
github.com/go-logr/logr v0.2.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU=
|
||||
github.com/go-logr/logr v0.4.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU=
|
||||
github.com/go-logr/logr v1.2.0 h1:QK40JKJyMdUDz+h+xvCsru/bJhvG0UxvePV0ufL/AcE=
|
||||
github.com/go-logr/logr v1.2.0/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
|
||||
github.com/go-logr/logr v1.2.2 h1:ahHml/yUpnlb96Rp8HCvtYVPY8ZYpxq3g7UYchIYwbs=
|
||||
github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
|
||||
github.com/go-logr/zapr v0.1.0/go.mod h1:tabnROwaDl0UNxkVeFRbY8bwB37GwRv0P8lg6aAiEnk=
|
||||
github.com/go-logr/zapr v0.4.0/go.mod h1:tabnROwaDl0UNxkVeFRbY8bwB37GwRv0P8lg6aAiEnk=
|
||||
github.com/go-logr/zapr v1.2.0 h1:n4JnPI1T3Qq1SFEi/F8rwLrZERp2bso19PJZDB9dayk=
|
||||
@@ -1007,8 +1018,9 @@ github.com/google/go-querystring v1.1.0/go.mod h1:Kcdr2DB4koayq7X8pmAG4sNG59So17
|
||||
github.com/google/gofuzz v0.0.0-20161122191042-44d81051d367/go.mod h1:HP5RmnzzSNb993RKQDq4+1A4ia9nllfqcQFTQJedwGI=
|
||||
github.com/google/gofuzz v0.0.0-20170612174753-24818f796faf/go.mod h1:HP5RmnzzSNb993RKQDq4+1A4ia9nllfqcQFTQJedwGI=
|
||||
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
|
||||
github.com/google/gofuzz v1.1.0 h1:Hsa8mG0dQ46ij8Sl2AYJDUv1oA9/d6Vk+3LG99Oe02g=
|
||||
github.com/google/gofuzz v1.1.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
|
||||
github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0=
|
||||
github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
|
||||
github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs=
|
||||
github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0=
|
||||
github.com/google/martian/v3 v3.1.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0=
|
||||
@@ -1331,8 +1343,8 @@ github.com/kr/pty v1.1.8/go.mod h1:O1sed60cT9XZ5uDucP5qwvh+TE3NnUj51EiZO/lmSfw=
|
||||
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
|
||||
github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
|
||||
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
|
||||
github.com/kubevela/prism v0.0.0-20220512081342-9b641aa819f3 h1:SiQjuAJVLa75M/uVufyEylzlmi+1sZzCnsDN76IFTr4=
|
||||
github.com/kubevela/prism v0.0.0-20220512081342-9b641aa819f3/go.mod h1:Ms3P9eWEeddXk7Q51+9sV1cGQL0cjlpBcSdp1/HoCaI=
|
||||
github.com/kubevela/prism v1.4.0 h1:wYCKXA3p9YpkcSsZjGnSEGBVL+3bPoZNEt4DYs3IxW4=
|
||||
github.com/kubevela/prism v1.4.0/go.mod h1:RP69+bRb57Occer6BeeF5zK3hrD1IhnYf2RNRsIdh9E=
|
||||
github.com/kulti/thelper v0.4.0/go.mod h1:vMu2Cizjy/grP+jmsvOFDx1kYP6+PD1lqg4Yu5exl2U=
|
||||
github.com/kunwardeep/paralleltest v1.0.2/go.mod h1:ZPqNm1fVHPllh5LPVujzbVz1JN2GhLxSfY+oqUsvG30=
|
||||
github.com/kunwardeep/paralleltest v1.0.3/go.mod h1:vLydzomDFpk7yu5UX02RmP0H8QfRPOV/oFhWN85Mjb4=
|
||||
@@ -1551,8 +1563,8 @@ github.com/nishanths/predeclared v0.2.1/go.mod h1:HvkGJcA3naj4lOwnFXFDkFxVtSqQMB
|
||||
github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A=
|
||||
github.com/nxadm/tail v1.4.8 h1:nPr65rt6Y5JFSKQO7qToXr7pePgD6Gwiw05lkbyAQTE=
|
||||
github.com/nxadm/tail v1.4.8/go.mod h1:+ncqLTQzXmGhMZNUePPaPqPvBxHAIsmXswZKocGu+AU=
|
||||
github.com/oam-dev/cluster-gateway v1.3.3-0.20220509095841-4272c540e1e9 h1:D3Z+QJi/5+J+wHwVPEo2ygFHXPaMS9t4NHfyIHZatiw=
|
||||
github.com/oam-dev/cluster-gateway v1.3.3-0.20220509095841-4272c540e1e9/go.mod h1:WcxTF3tOZFxRm1wztAJnXPM4cpjYqnEFIuAU9EM6pD0=
|
||||
github.com/oam-dev/cluster-gateway v1.4.0 h1:ZZcNRYsUDRWM5JnNX28/zdSPRKERGstcAY+PaJKA0mE=
|
||||
github.com/oam-dev/cluster-gateway v1.4.0/go.mod h1:qnCczkXtTY7h0SqxjZqAAyKQPwrJjLIFy+IdeoaYKCU=
|
||||
github.com/oam-dev/cluster-register v1.0.4-0.20220325092210-cee4a3d3fb7d h1:ZZsBkksYDzwJEjqx9/XBD+VwlhHz8flkZvMJYzO4ASA=
|
||||
github.com/oam-dev/cluster-register v1.0.4-0.20220325092210-cee4a3d3fb7d/go.mod h1:nKEUMfuEB8pHKsaSah9IA+UQzezrPYebBdRozyNtlZc=
|
||||
github.com/oam-dev/stern v1.13.2 h1:jlGgtJbKmIVhzkH44ft5plkgs8XEfvxbFrQdX60CQR4=
|
||||
@@ -2353,6 +2365,7 @@ golang.org/x/net v0.0.0-20211015210444-4f30a5c0130f/go.mod h1:9nx3DQGgdP8bBQD5qx
|
||||
golang.org/x/net v0.0.0-20211029224645-99673261e6eb/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
|
||||
golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
|
||||
golang.org/x/net v0.0.0-20211209124913-491a49abca63/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
|
||||
golang.org/x/net v0.0.0-20211215060638-4ddde0e984e9/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
|
||||
golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
|
||||
golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
|
||||
golang.org/x/net v0.0.0-20220325170049-de3da57026de/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
|
||||
@@ -2533,6 +2546,7 @@ golang.org/x/sys v0.0.0-20210927094055-39ccf1dd6fa6/go.mod h1:oPkhp1MJrh7nUepCBc
|
||||
golang.org/x/sys v0.0.0-20211013075003-97ac67df715c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20211019181941-9d821ace8654/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20211029165221-6e7872819dc8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20211110154304-99a53858aa08/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20211124211545-fe61309f8881/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20211210111614-af8b64212486/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
@@ -2905,6 +2919,7 @@ google.golang.org/grpc v1.39.0/go.mod h1:PImNr+rS9TWYb2O4/emRugxiyHZ5JyHW5F+RPnD
|
||||
google.golang.org/grpc v1.39.1/go.mod h1:PImNr+rS9TWYb2O4/emRugxiyHZ5JyHW5F+RPnDzfrE=
|
||||
google.golang.org/grpc v1.40.0/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34=
|
||||
google.golang.org/grpc v1.40.1/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34=
|
||||
google.golang.org/grpc v1.42.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU=
|
||||
google.golang.org/grpc v1.44.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU=
|
||||
google.golang.org/grpc v1.45.0 h1:NEpgUqV3Z+ZjkqMsxMg11IaDrXY4RY6CQukSGK0uI1M=
|
||||
google.golang.org/grpc v1.45.0/go.mod h1:lN7owxKUQEqMfSyQikvvk5tf/6zMPsrK+ONuO11+0rQ=
|
||||
@@ -3063,8 +3078,9 @@ k8s.io/apiextensions-apiserver v0.21.3/go.mod h1:kl6dap3Gd45+21Jnh6utCx8Z2xxLm8L
|
||||
k8s.io/apiextensions-apiserver v0.22.1/go.mod h1:HeGmorjtRmRLE+Q8dJu6AYRoZccvCMsghwS8XTUYb2c=
|
||||
k8s.io/apiextensions-apiserver v0.22.4/go.mod h1:kH9lxD8dbJ+k0ZizGET55lFgdGjO8t45fgZnCVdZEpw=
|
||||
k8s.io/apiextensions-apiserver v0.23.0/go.mod h1:xIFAEEDlAZgpVBl/1VSjGDmLoXAWRG40+GsWhKhAxY4=
|
||||
k8s.io/apiextensions-apiserver v0.23.5 h1:5SKzdXyvIJKu+zbfPc3kCbWpbxi+O+zdmAJBm26UJqI=
|
||||
k8s.io/apiextensions-apiserver v0.23.5/go.mod h1:ntcPWNXS8ZPKN+zTXuzYMeg731CP0heCTl6gYBxLcuQ=
|
||||
k8s.io/apiextensions-apiserver v0.23.6 h1:v58cQ6Z0/GK1IXYr+oW0fnYl52o9LTY0WgoWvI8uv5Q=
|
||||
k8s.io/apiextensions-apiserver v0.23.6/go.mod h1:YVh17Mphv183THQJA5spNFp9XfoidFyL3WoDgZxQIZU=
|
||||
k8s.io/apimachinery v0.0.0-20190612205821-1799e75a0719/go.mod h1:I4A+glKBHiTgiEjQiCCQfCAIcIMFGt291SmsvcrFzJA=
|
||||
k8s.io/apimachinery v0.0.0-20190809020650-423f5d784010/go.mod h1:Waf/xTS2FGRrgXCkO5FP3XxTOWh0qLf2QhL1qFZZ/R8=
|
||||
k8s.io/apimachinery v0.0.0-20190913080033-27d36303b655/go.mod h1:nL6pwRT8NgfF8TT68DBI8uEePRt89cSvoXUVqbkWHq4=
|
||||
@@ -3260,6 +3276,7 @@ k8s.io/utils v0.0.0-20210802155522-efc7438f0176/go.mod h1:jPW/WVKK9YHAvNhRxK0md/
|
||||
k8s.io/utils v0.0.0-20210819203725-bdf08cb9a70a/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA=
|
||||
k8s.io/utils v0.0.0-20210930125809-cb0fa318a74b/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA=
|
||||
k8s.io/utils v0.0.0-20211116205334-6203023598ed/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA=
|
||||
k8s.io/utils v0.0.0-20211208161948-7d6a63dca704/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA=
|
||||
k8s.io/utils v0.0.0-20220210201930-3a6ce19ff2f9 h1:HNSDgDCrr/6Ly3WEGKZftiE7IY19Vz2GdbOCyI4qqhc=
|
||||
k8s.io/utils v0.0.0-20220210201930-3a6ce19ff2f9/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA=
|
||||
modernc.org/cc v1.0.0/go.mod h1:1Sk4//wdnYJiUIxnW8ddKpaOJCF37yAdqYnkxUpaYxw=
|
||||
@@ -3287,8 +3304,8 @@ rsc.io/letsencrypt v0.0.3/go.mod h1:buyQKZ6IXrRnB7TdkHP0RyEybLx18HHyOSoTyoOLqNY=
|
||||
rsc.io/pdf v0.1.1/go.mod h1:n8OzWcQ6Sp37PL01nO98y4iUCRdTGarVfzxY20ICaU4=
|
||||
rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0=
|
||||
rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA=
|
||||
sigs.k8s.io/apiserver-network-proxy v0.0.24 h1:yaswrAqidc2XdLK2GRacVEBb55g4dg91f/B7b0SYliY=
|
||||
sigs.k8s.io/apiserver-network-proxy v0.0.24/go.mod h1:z/U9KltvRVSMttVl3cdQo8cPuXEjr+Qn3A5sUJR55XI=
|
||||
sigs.k8s.io/apiserver-network-proxy v0.0.30 h1:Zr5Zqd2GymcYUwijHUDEaQ1I3Dx0giTIWaD80N6j2mE=
|
||||
sigs.k8s.io/apiserver-network-proxy v0.0.30/go.mod h1:0wSWl5ohhp7kYl5XOP0w1IZSWTHhe9TojjDGityZxnc=
|
||||
sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.24 h1:bCO6TN9VG1bK3nCG5ghQ5httx1HpsG5MD8XtRDySHDM=
|
||||
sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.24/go.mod h1:LEScyzhFmoF5pso/YSeBstl57mOzx9xlU9n85RGrDQg=
|
||||
sigs.k8s.io/apiserver-runtime v1.1.0/go.mod h1:cmahVEn9R791yUnSiFMFdwTqi2dOe5WQRNwcY6jb7l0=
|
||||
@@ -3308,8 +3325,9 @@ sigs.k8s.io/controller-tools v0.2.4/go.mod h1:m/ztfQNocGYBgTTCmFdnK94uVvgxeZeE3L
|
||||
sigs.k8s.io/controller-tools v0.2.8/go.mod h1:9VKHPszmf2DHz/QmHkcfZoewO6BL7pPs9uAiBVsaJSE=
|
||||
sigs.k8s.io/controller-tools v0.6.2 h1:+Y8L0UsAugDipGRw8lrkPoAi6XqlQVZuf1DQHME3PgU=
|
||||
sigs.k8s.io/controller-tools v0.6.2/go.mod h1:oaeGpjXn6+ZSEIQkUe/+3I40PNiDYp9aeawbt3xTgJ8=
|
||||
sigs.k8s.io/json v0.0.0-20211020170558-c049b76a60c6 h1:fD1pz4yfdADVNfFmcP2aBEtudwUQ1AlLnRBALr33v3s=
|
||||
sigs.k8s.io/json v0.0.0-20211020170558-c049b76a60c6/go.mod h1:p4QtZmO4uMYipTQNzagwnNoseA6OxSUutVw05NhYDRs=
|
||||
sigs.k8s.io/json v0.0.0-20211208200746-9f7c6b3444d2 h1:kDi4JBNAsJWfz1aEXhO8Jg87JJaPNLh5tIzYHgStQ9Y=
|
||||
sigs.k8s.io/json v0.0.0-20211208200746-9f7c6b3444d2/go.mod h1:B+TnT182UBxE84DiCz4CVE26eOSDAeYCpfDnC2kdKMY=
|
||||
sigs.k8s.io/kind v0.9.0 h1:SoDlXq6pEc7dGagHULNRCCBYrLH6xOi7lqXTRXeAlg4=
|
||||
sigs.k8s.io/kind v0.9.0/go.mod h1:cxKQWwmbtRDzQ+RNKnR6gZG6fjbeTtItp5cGf+ww+1Y=
|
||||
sigs.k8s.io/kube-storage-version-migrator v0.0.4/go.mod h1:mXfSLkx9xbJHQsgNDDUZK/iQTs2tMbx/hsJlWe6Fthw=
|
||||
|
||||
@@ -2209,6 +2209,8 @@ spec:
|
||||
items:
|
||||
type: string
|
||||
type: array
|
||||
if:
|
||||
type: string
|
||||
inputs:
|
||||
description: StepInputs defines variable input of
|
||||
WorkflowStep
|
||||
@@ -2253,6 +2255,8 @@ spec:
|
||||
items:
|
||||
type: string
|
||||
type: array
|
||||
if:
|
||||
type: string
|
||||
inputs:
|
||||
description: StepInputs defines variable input
|
||||
of WorkflowStep
|
||||
@@ -3954,6 +3958,8 @@ spec:
|
||||
items:
|
||||
type: string
|
||||
type: array
|
||||
if:
|
||||
type: string
|
||||
inputs:
|
||||
description: StepInputs defines variable input of WorkflowStep
|
||||
items:
|
||||
@@ -3995,6 +4001,8 @@ spec:
|
||||
items:
|
||||
type: string
|
||||
type: array
|
||||
if:
|
||||
type: string
|
||||
inputs:
|
||||
description: StepInputs defines variable input of
|
||||
WorkflowStep
|
||||
|
||||
@@ -1021,6 +1021,8 @@ spec:
|
||||
items:
|
||||
type: string
|
||||
type: array
|
||||
if:
|
||||
type: string
|
||||
inputs:
|
||||
description: StepInputs defines variable input of WorkflowStep
|
||||
items:
|
||||
@@ -1062,6 +1064,8 @@ spec:
|
||||
items:
|
||||
type: string
|
||||
type: array
|
||||
if:
|
||||
type: string
|
||||
inputs:
|
||||
description: StepInputs defines variable input of
|
||||
WorkflowStep
|
||||
|
||||
@@ -42,6 +42,8 @@ spec:
|
||||
items:
|
||||
type: string
|
||||
type: array
|
||||
if:
|
||||
type: string
|
||||
inputs:
|
||||
description: StepInputs defines variable input of WorkflowStep
|
||||
items:
|
||||
@@ -83,6 +85,8 @@ spec:
|
||||
items:
|
||||
type: string
|
||||
type: array
|
||||
if:
|
||||
type: string
|
||||
inputs:
|
||||
description: StepInputs defines variable input of WorkflowStep
|
||||
items:
|
||||
@@ -147,6 +151,8 @@ spec:
|
||||
items:
|
||||
type: string
|
||||
type: array
|
||||
if:
|
||||
type: string
|
||||
inputs:
|
||||
description: StepInputs defines variable input of WorkflowStep
|
||||
items:
|
||||
@@ -188,6 +194,8 @@ spec:
|
||||
items:
|
||||
type: string
|
||||
type: array
|
||||
if:
|
||||
type: string
|
||||
inputs:
|
||||
description: StepInputs defines variable input of WorkflowStep
|
||||
items:
|
||||
|
||||
@@ -9,7 +9,7 @@ kubectl-vela:
|
||||
|
||||
# Build the docker image
|
||||
.PHONY: docker-build
|
||||
docker-build: docker-build-core docker-build-apiserver
|
||||
docker-build: docker-build-core docker-build-apiserver docker-build-cli
|
||||
@$(OK)
|
||||
|
||||
.PHONY: docker-build-core
|
||||
@@ -20,6 +20,10 @@ docker-build-core:
|
||||
docker-build-apiserver:
|
||||
docker build --build-arg=VERSION=$(VELA_VERSION) --build-arg=GITVERSION=$(GIT_COMMIT) -t $(VELA_APISERVER_IMAGE) -f Dockerfile.apiserver .
|
||||
|
||||
.PHONY: docker-build-cli
|
||||
docker-build-cli:
|
||||
docker build --build-arg=VERSION=$(VELA_VERSION) --build-arg=GITVERSION=$(GIT_COMMIT) -t $(VELA_CLI_IMAGE) -f Dockerfile.cli .
|
||||
|
||||
# Build the runtime docker image
|
||||
.PHONY: docker-build-runtime-rollout
|
||||
docker-build-runtime-rollout:
|
||||
|
||||
@@ -43,6 +43,7 @@ endif
|
||||
|
||||
# Image URL to use all building/pushing image targets
|
||||
VELA_CORE_IMAGE ?= vela-core:latest
|
||||
VELA_CLI_IMAGE ?= oamdev/vela-cli:latest
|
||||
VELA_CORE_TEST_IMAGE ?= vela-core-test:$(GIT_COMMIT)
|
||||
VELA_APISERVER_IMAGE ?= apiserver:latest
|
||||
VELA_RUNTIME_ROLLOUT_IMAGE ?= vela-runtime-rollout:latest
|
||||
|
||||
@@ -61,6 +61,7 @@ import (
|
||||
"github.com/oam-dev/kubevela/apis/core.oam.dev/v1alpha1"
|
||||
"github.com/oam-dev/kubevela/apis/core.oam.dev/v1beta1"
|
||||
"github.com/oam-dev/kubevela/apis/types"
|
||||
"github.com/oam-dev/kubevela/pkg/apiserver/utils/log"
|
||||
utils2 "github.com/oam-dev/kubevela/pkg/controller/utils"
|
||||
cuemodel "github.com/oam-dev/kubevela/pkg/cue/model"
|
||||
"github.com/oam-dev/kubevela/pkg/cue/model/value"
|
||||
@@ -1418,13 +1419,29 @@ func checkSemVer(actual string, require string) (bool, error) {
|
||||
l := strings.ReplaceAll(require, "v", " ")
|
||||
constraint, err := semver.NewConstraint(l)
|
||||
if err != nil {
|
||||
log.Logger.Errorf("fail to new constraint: %s", err.Error())
|
||||
return false, err
|
||||
}
|
||||
v, err := semver.NewVersion(smeVer)
|
||||
if err != nil {
|
||||
log.Logger.Errorf("fail to new version %s: %s", smeVer, err.Error())
|
||||
return false, err
|
||||
}
|
||||
return constraint.Check(v), nil
|
||||
if constraint.Check(v) {
|
||||
return true, nil
|
||||
}
|
||||
if strings.Contains(actual, "-") && !strings.Contains(require, "-") {
|
||||
smeVer := strings.TrimPrefix(actual[:strings.Index(actual, "-")], "v")
|
||||
v, err := semver.NewVersion(smeVer)
|
||||
if err != nil {
|
||||
log.Logger.Errorf("fail to new version %s: %s", smeVer, err.Error())
|
||||
return false, err
|
||||
}
|
||||
if constraint.Check(v) {
|
||||
return true, nil
|
||||
}
|
||||
}
|
||||
return false, nil
|
||||
}
|
||||
|
||||
func fetchVelaCoreImageTag(ctx context.Context, k8sClient client.Client) (string, error) {
|
||||
|
||||
@@ -763,6 +763,21 @@ func TestCheckSemVer(t *testing.T) {
|
||||
require: ">=v1.3.0-beta.2",
|
||||
res: true,
|
||||
},
|
||||
{
|
||||
actual: "v1.4.0-beta.1",
|
||||
require: ">=v1.3.0",
|
||||
res: true,
|
||||
},
|
||||
{
|
||||
actual: "v1.4.0",
|
||||
require: ">=v1.3.0-beta.2",
|
||||
res: true,
|
||||
},
|
||||
{
|
||||
actual: "1.2.4-beta.2",
|
||||
require: ">=v1.2.4-beta.3",
|
||||
res: false,
|
||||
},
|
||||
}
|
||||
for _, testCase := range testCases {
|
||||
result, err := checkSemVer(testCase.actual, testCase.require)
|
||||
|
||||
54
pkg/apiserver/domain/repository/grpc.go
Normal file
54
pkg/apiserver/domain/repository/grpc.go
Normal file
@@ -0,0 +1,54 @@
|
||||
/*
|
||||
Copyright 2022 The KubeVela Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package repository
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/oam-dev/kubevela/pkg/apiserver/domain/model"
|
||||
"github.com/oam-dev/kubevela/pkg/apiserver/infrastructure/datastore"
|
||||
)
|
||||
|
||||
// ListRoles list roles from store
|
||||
func ListRoles(ctx context.Context, store datastore.DataStore, projectName string, page, pageSize int) ([]*model.Role, int64, error) {
|
||||
var role = model.Role{
|
||||
Project: projectName,
|
||||
}
|
||||
var filter datastore.FilterOptions
|
||||
if projectName == "" {
|
||||
filter.IsNotExist = append(filter.IsNotExist, datastore.IsNotExistQueryOption{
|
||||
Key: "project",
|
||||
})
|
||||
}
|
||||
entities, err := store.List(ctx, &role, &datastore.ListOptions{FilterOptions: filter, Page: page, PageSize: pageSize, SortBy: []datastore.SortOption{{Key: "createTime", Order: datastore.SortOrderDescending}}})
|
||||
if err != nil {
|
||||
return nil, 0, err
|
||||
}
|
||||
var roles []*model.Role
|
||||
for i := range entities {
|
||||
roles = append(roles, entities[i].(*model.Role))
|
||||
}
|
||||
count := int64(len(roles))
|
||||
if page > 0 && pageSize > 0 {
|
||||
var err error
|
||||
count, err = store.Count(ctx, &role, &filter)
|
||||
if err != nil {
|
||||
return nil, 0, err
|
||||
}
|
||||
}
|
||||
return roles, count, nil
|
||||
}
|
||||
@@ -463,6 +463,8 @@ func destroySyncConfigsApp(ctx context.Context, k8sClient client.Client, project
|
||||
if !kerrors.IsNotFound(err) {
|
||||
return err
|
||||
}
|
||||
klog.InfoS("config sync application doesn't exist, no need destroy", "application", name)
|
||||
return nil
|
||||
}
|
||||
return k8sClient.Delete(ctx, app)
|
||||
}
|
||||
|
||||
@@ -27,6 +27,7 @@ import (
|
||||
terraformtypes "github.com/oam-dev/terraform-controller/api/types"
|
||||
terraformapi "github.com/oam-dev/terraform-controller/api/v1beta1"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/meta"
|
||||
"k8s.io/klog/v2"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
|
||||
@@ -363,6 +364,12 @@ func (p *projectServiceImpl) UpdateProject(ctx context.Context, projectName stri
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
if _, err := p.AddProjectUser(ctx, projectName, apisv1.AddProjectUserRequest{
|
||||
UserName: req.Owner,
|
||||
UserRoles: []string{"project-admin"},
|
||||
}); err != nil && !errors.Is(err, bcode.ErrProjectUserExist) {
|
||||
return nil, err
|
||||
}
|
||||
project.Owner = req.Owner
|
||||
}
|
||||
err = p.Store.Put(ctx, project)
|
||||
@@ -495,7 +502,11 @@ func (p *projectServiceImpl) GetConfigs(ctx context.Context, projectName, config
|
||||
// legacy providers
|
||||
var providers = &terraformapi.ProviderList{}
|
||||
if err := p.K8sClient.List(ctx, providers, client.InNamespace(types.DefaultAppNamespace)); err != nil {
|
||||
return nil, err
|
||||
// this logic depends on the terraform addon, ignore the no matches kind error before the terraform addon is installed.
|
||||
if !meta.IsNoMatchError(err) {
|
||||
return nil, err
|
||||
}
|
||||
log.Logger.Infof("terraform Provider CRD is not installed")
|
||||
}
|
||||
for _, p := range providers.Items {
|
||||
if p.Labels[types.LabelConfigCatalog] == types.VelaCoreConfig {
|
||||
|
||||
@@ -189,6 +189,27 @@ var _ = Describe("Test project service functions", func() {
|
||||
Expect(base.Description).Should(BeEquivalentTo("Change description"))
|
||||
Expect(base.Owner.Alias).Should(BeEquivalentTo("Administrator"))
|
||||
|
||||
user := &model.User{
|
||||
Name: "admin-2",
|
||||
Alias: "Administrator2",
|
||||
Password: "ddddd",
|
||||
Disabled: false,
|
||||
}
|
||||
err = projectService.Store.Add(context.TODO(), user)
|
||||
Expect(err).Should(BeNil())
|
||||
base, err = projectService.UpdateProject(context.TODO(), "test-project", apisv1.UpdateProjectRequest{
|
||||
Alias: "Change alias",
|
||||
Description: "Change description",
|
||||
Owner: "admin-2",
|
||||
})
|
||||
Expect(err).Should(BeNil())
|
||||
Expect(base.Alias).Should(BeEquivalentTo("Change alias"))
|
||||
Expect(base.Description).Should(BeEquivalentTo("Change description"))
|
||||
Expect(base.Owner.Alias).Should(BeEquivalentTo("Administrator2"))
|
||||
res, err := projectService.ListProjectUser(context.TODO(), "test-project", 0, 0)
|
||||
Expect(err).Should(BeNil())
|
||||
Expect(res.Total).Should(Equal(int64(2)))
|
||||
|
||||
_, err = projectService.UpdateProject(context.TODO(), "test-project", apisv1.UpdateProjectRequest{
|
||||
Alias: "Change alias",
|
||||
Description: "Change description",
|
||||
|
||||
@@ -27,8 +27,11 @@ import (
|
||||
"github.com/emicklei/go-restful/v3"
|
||||
|
||||
"github.com/oam-dev/kubevela/pkg/apiserver/domain/model"
|
||||
"github.com/oam-dev/kubevela/pkg/apiserver/domain/repository"
|
||||
"github.com/oam-dev/kubevela/pkg/apiserver/infrastructure/datastore"
|
||||
assembler "github.com/oam-dev/kubevela/pkg/apiserver/interfaces/api/assembler/v1"
|
||||
apisv1 "github.com/oam-dev/kubevela/pkg/apiserver/interfaces/api/dto/v1"
|
||||
apiserverutils "github.com/oam-dev/kubevela/pkg/apiserver/utils"
|
||||
"github.com/oam-dev/kubevela/pkg/apiserver/utils/bcode"
|
||||
"github.com/oam-dev/kubevela/pkg/apiserver/utils/log"
|
||||
"github.com/oam-dev/kubevela/pkg/utils"
|
||||
@@ -51,7 +54,7 @@ var defaultProjectPermissionTemplate = []*model.PermissionTemplate{
|
||||
{
|
||||
Name: "app-management",
|
||||
Alias: "App Management",
|
||||
Resources: []string{"project:{projectName}/application:*/*", "definition:*"},
|
||||
Resources: []string{"project:{projectName}/application:*/*"},
|
||||
Actions: []string{"*"},
|
||||
Effect: "Allow",
|
||||
Scope: "project",
|
||||
@@ -72,6 +75,14 @@ var defaultProjectPermissionTemplate = []*model.PermissionTemplate{
|
||||
Effect: "Allow",
|
||||
Scope: "project",
|
||||
},
|
||||
{
|
||||
Name: "configuration-read",
|
||||
Alias: "Environment Management",
|
||||
Resources: []string{"project:{projectName}/config:*"},
|
||||
Actions: []string{"list", "detail"},
|
||||
Effect: "Allow",
|
||||
Scope: "project",
|
||||
},
|
||||
}
|
||||
|
||||
var defaultPlatformPermission = []*model.PermissionTemplate{
|
||||
@@ -123,6 +134,14 @@ var defaultPlatformPermission = []*model.PermissionTemplate{
|
||||
Effect: "Allow",
|
||||
Scope: "platform",
|
||||
},
|
||||
{
|
||||
Name: "integration-management",
|
||||
Alias: "Integration Management",
|
||||
Resources: []string{"configType:*/*"},
|
||||
Actions: []string{"*"},
|
||||
Effect: "Allow",
|
||||
Scope: "platform",
|
||||
},
|
||||
{
|
||||
Name: "admin",
|
||||
Alias: "Admin",
|
||||
@@ -182,7 +201,7 @@ var ResourceMaps = map[string]resourceMetadata{
|
||||
pathName: "userName",
|
||||
},
|
||||
"applicationTemplate": {},
|
||||
"configs": {},
|
||||
"config": {},
|
||||
"image": {},
|
||||
},
|
||||
pathName: "projectName",
|
||||
@@ -205,8 +224,10 @@ var ResourceMaps = map[string]resourceMetadata{
|
||||
"user": {
|
||||
pathName: "userName",
|
||||
},
|
||||
"role": {},
|
||||
"permission": {},
|
||||
"role": {},
|
||||
"permission": {
|
||||
pathName: "permissionName",
|
||||
},
|
||||
"systemSetting": {},
|
||||
"definition": {
|
||||
pathName: "definitionName",
|
||||
@@ -325,6 +346,7 @@ type RBACService interface {
|
||||
ListRole(ctx context.Context, projectName string, page, pageSize int) (*apisv1.ListRolesResponse, error)
|
||||
ListPermissionTemplate(ctx context.Context, projectName string) ([]apisv1.PermissionTemplateBase, error)
|
||||
ListPermissions(ctx context.Context, projectName string) ([]apisv1.PermissionBase, error)
|
||||
CreatePermission(ctx context.Context, projectName string, req apisv1.CreatePermissionRequest) (*apisv1.PermissionBase, error)
|
||||
DeletePermission(ctx context.Context, projectName, permName string) error
|
||||
InitDefaultRoleAndUsersForProject(ctx context.Context, project *model.Project) error
|
||||
Init(ctx context.Context) error
|
||||
@@ -429,9 +451,9 @@ func (p *rbacServiceImpl) GetUserPermissions(ctx context.Context, user *model.Us
|
||||
return perms, nil
|
||||
}
|
||||
|
||||
func (p *rbacServiceImpl) UpdatePermission(ctx context.Context, projetName string, permissionName string, req *apisv1.UpdatePermissionRequest) (*apisv1.PermissionBase, error) {
|
||||
func (p *rbacServiceImpl) UpdatePermission(ctx context.Context, projectName string, permissionName string, req *apisv1.UpdatePermissionRequest) (*apisv1.PermissionBase, error) {
|
||||
perm := &model.Permission{
|
||||
Project: projetName,
|
||||
Project: projectName,
|
||||
Name: permissionName,
|
||||
}
|
||||
err := p.Store.Get(ctx, perm)
|
||||
@@ -539,6 +561,7 @@ func (p *rbacServiceImpl) CheckPerm(resource string, actions ...string) func(req
|
||||
}
|
||||
return req.PathParameter(name)
|
||||
})
|
||||
ra.SetActions(actions)
|
||||
|
||||
// get user's perm list.
|
||||
projectName := getProjectName()
|
||||
@@ -552,6 +575,7 @@ func (p *rbacServiceImpl) CheckPerm(resource string, actions ...string) func(req
|
||||
bcode.ReturnError(req, res, bcode.ErrForbidden)
|
||||
return
|
||||
}
|
||||
apiserverutils.SetUsernameAndProjectInRequestContext(req, userName, projectName)
|
||||
chain.ProcessFilter(req, res)
|
||||
}
|
||||
return f
|
||||
@@ -585,7 +609,7 @@ func (p *rbacServiceImpl) CreateRole(ctx context.Context, projectName string, re
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
return ConvertRole2Model(&role, policies), nil
|
||||
return assembler.ConvertRole2DTO(&role, policies), nil
|
||||
}
|
||||
|
||||
func (p *rbacServiceImpl) DeleteRole(ctx context.Context, projectName, roleName string) error {
|
||||
@@ -603,6 +627,19 @@ func (p *rbacServiceImpl) DeleteRole(ctx context.Context, projectName, roleName
|
||||
}
|
||||
|
||||
func (p *rbacServiceImpl) DeletePermission(ctx context.Context, projectName, permName string) error {
|
||||
roles, _, err := repository.ListRoles(ctx, p.Store, projectName, 0, 0)
|
||||
if err != nil {
|
||||
log.Logger.Errorf("fail to list the roles: %s", err.Error())
|
||||
return bcode.ErrPermissionIsUsed
|
||||
}
|
||||
for _, role := range roles {
|
||||
for _, p := range role.Permissions {
|
||||
if p == permName {
|
||||
return bcode.ErrPermissionIsUsed
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
var perm = model.Permission{
|
||||
Name: permName,
|
||||
Project: projectName,
|
||||
@@ -647,26 +684,17 @@ func (p *rbacServiceImpl) UpdateRole(ctx context.Context, projectName, roleName
|
||||
if err := p.Store.Put(ctx, &role); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return ConvertRole2Model(&role, policies), nil
|
||||
return assembler.ConvertRole2DTO(&role, policies), nil
|
||||
}
|
||||
|
||||
func (p *rbacServiceImpl) ListRole(ctx context.Context, projectName string, page, pageSize int) (*apisv1.ListRolesResponse, error) {
|
||||
var role = model.Role{
|
||||
Project: projectName,
|
||||
}
|
||||
var filter datastore.FilterOptions
|
||||
if projectName == "" {
|
||||
filter.IsNotExist = append(filter.IsNotExist, datastore.IsNotExistQueryOption{
|
||||
Key: "project",
|
||||
})
|
||||
}
|
||||
entities, err := p.Store.List(ctx, &role, &datastore.ListOptions{FilterOptions: filter, Page: page, PageSize: pageSize, SortBy: []datastore.SortOption{{Key: "createTime", Order: datastore.SortOrderDescending}}})
|
||||
roles, count, err := repository.ListRoles(ctx, p.Store, projectName, 0, 0)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
var policySet = make(map[string]string)
|
||||
for _, entity := range entities {
|
||||
for _, p := range entity.(*model.Role).Permissions {
|
||||
for _, role := range roles {
|
||||
for _, p := range role.Permissions {
|
||||
policySet[p] = p
|
||||
}
|
||||
}
|
||||
@@ -680,17 +708,12 @@ func (p *rbacServiceImpl) ListRole(ctx context.Context, projectName string, page
|
||||
policyMap[policy.Name] = policies[i]
|
||||
}
|
||||
var res apisv1.ListRolesResponse
|
||||
for _, entity := range entities {
|
||||
role := entity.(*model.Role)
|
||||
for _, role := range roles {
|
||||
var rolePolicies []*model.Permission
|
||||
for _, perm := range role.Permissions {
|
||||
rolePolicies = append(rolePolicies, policyMap[perm])
|
||||
}
|
||||
res.Roles = append(res.Roles, ConvertRole2Model(entity.(*model.Role), rolePolicies))
|
||||
}
|
||||
count, err := p.Store.Count(ctx, &role, &filter)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
res.Roles = append(res.Roles, assembler.ConvertRole2DTO(role, rolePolicies))
|
||||
}
|
||||
res.Total = count
|
||||
return &res, nil
|
||||
@@ -728,11 +751,50 @@ func (p *rbacServiceImpl) ListPermissions(ctx context.Context, projectName strin
|
||||
return perms, nil
|
||||
}
|
||||
|
||||
func (p *rbacServiceImpl) CreatePermission(ctx context.Context, projectName string, req apisv1.CreatePermissionRequest) (*apisv1.PermissionBase, error) {
|
||||
if projectName != "" {
|
||||
var project = model.Project{
|
||||
Name: projectName,
|
||||
}
|
||||
if err := p.Store.Get(ctx, &project); err != nil {
|
||||
return nil, bcode.ErrProjectIsNotExist
|
||||
}
|
||||
}
|
||||
if len(req.Resources) == 0 {
|
||||
return nil, bcode.ErrRolePermissionCheckFailure
|
||||
}
|
||||
|
||||
if len(req.Actions) == 0 {
|
||||
req.Actions = []string{"*"}
|
||||
}
|
||||
|
||||
if req.Effect == "" {
|
||||
req.Effect = "Allow"
|
||||
}
|
||||
|
||||
var permission = model.Permission{
|
||||
Name: req.Name,
|
||||
Alias: req.Alias,
|
||||
Project: projectName,
|
||||
Resources: req.Resources,
|
||||
Actions: req.Actions,
|
||||
Effect: req.Effect,
|
||||
}
|
||||
|
||||
if err := p.Store.Add(ctx, &permission); err != nil {
|
||||
if errors.Is(err, datastore.ErrRecordExist) {
|
||||
return nil, bcode.ErrPermissionIsExist
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
return assembler.ConvertPermission2DTO(&permission), nil
|
||||
}
|
||||
|
||||
func (p *rbacServiceImpl) InitDefaultRoleAndUsersForProject(ctx context.Context, project *model.Project) error {
|
||||
var batchData []datastore.Entity
|
||||
for _, permissionTemp := range defaultProjectPermissionTemplate {
|
||||
var rra = RequestResourceAction{}
|
||||
var formatedResource []string
|
||||
var formattedResource []string
|
||||
for _, resource := range permissionTemp.Resources {
|
||||
rra.SetResourceWithName(resource, func(name string) string {
|
||||
if name == ResourceMaps["project"].pathName {
|
||||
@@ -740,13 +802,13 @@ func (p *rbacServiceImpl) InitDefaultRoleAndUsersForProject(ctx context.Context,
|
||||
}
|
||||
return ""
|
||||
})
|
||||
formatedResource = append(formatedResource, rra.GetResource().String())
|
||||
formattedResource = append(formattedResource, rra.GetResource().String())
|
||||
}
|
||||
batchData = append(batchData, &model.Permission{
|
||||
Name: permissionTemp.Name,
|
||||
Alias: permissionTemp.Alias,
|
||||
Project: project.Name,
|
||||
Resources: formatedResource,
|
||||
Resources: formattedResource,
|
||||
Actions: permissionTemp.Actions,
|
||||
Effect: permissionTemp.Effect,
|
||||
})
|
||||
@@ -754,12 +816,12 @@ func (p *rbacServiceImpl) InitDefaultRoleAndUsersForProject(ctx context.Context,
|
||||
batchData = append(batchData, &model.Role{
|
||||
Name: "app-developer",
|
||||
Alias: "App Developer",
|
||||
Permissions: []string{"project-read", "app-management", "env-management"},
|
||||
Permissions: []string{"project-read", "app-management", "env-management", "configuration-read"},
|
||||
Project: project.Name,
|
||||
}, &model.Role{
|
||||
Name: "project-admin",
|
||||
Alias: "Project Admin",
|
||||
Permissions: []string{"project-read", "app-management", "env-management", "role-management"},
|
||||
Permissions: []string{"project-read", "app-management", "env-management", "role-management", "configuration-read"},
|
||||
Project: project.Name,
|
||||
})
|
||||
if project.Owner != "" {
|
||||
@@ -773,24 +835,6 @@ func (p *rbacServiceImpl) InitDefaultRoleAndUsersForProject(ctx context.Context,
|
||||
return p.Store.BatchAdd(ctx, batchData)
|
||||
}
|
||||
|
||||
// ConvertRole2Model convert role model to role base struct
|
||||
func ConvertRole2Model(role *model.Role, policies []*model.Permission) *apisv1.RoleBase {
|
||||
return &apisv1.RoleBase{
|
||||
CreateTime: role.CreateTime,
|
||||
UpdateTime: role.UpdateTime,
|
||||
Name: role.Name,
|
||||
Alias: role.Alias,
|
||||
Permissions: func() (list []apisv1.NameAlias) {
|
||||
for _, policy := range policies {
|
||||
if policy != nil {
|
||||
list = append(list, apisv1.NameAlias{Name: policy.Name, Alias: policy.Alias})
|
||||
}
|
||||
}
|
||||
return
|
||||
}(),
|
||||
}
|
||||
}
|
||||
|
||||
// ResourceName it is similar to ARNs
|
||||
// <type>:<value>/<type>:<value>
|
||||
type ResourceName struct {
|
||||
|
||||
@@ -51,6 +51,10 @@ var _ = Describe("Test rbac service", func() {
|
||||
Expect(err).Should(BeNil())
|
||||
Expect(path).Should(BeEquivalentTo("project:{projectName}/application:{appName}"))
|
||||
|
||||
path, err = checkResourcePath("environment")
|
||||
Expect(err).Should(BeNil())
|
||||
Expect(path).Should(BeEquivalentTo("project:{projectName}/environment:{envName}"))
|
||||
|
||||
_, err = checkResourcePath("applications")
|
||||
Expect(err).ShouldNot(BeNil())
|
||||
|
||||
@@ -94,7 +98,7 @@ var _ = Describe("Test rbac service", func() {
|
||||
Expect(err).Should(BeNil())
|
||||
policies, err := rbacService.ListPermissions(context.TODO(), "")
|
||||
Expect(err).Should(BeNil())
|
||||
Expect(len(policies)).Should(BeEquivalentTo(int64(7)))
|
||||
Expect(len(policies)).Should(BeEquivalentTo(int64(8)))
|
||||
})
|
||||
|
||||
It("Test checkPerm by admin user", func() {
|
||||
@@ -194,7 +198,7 @@ var _ = Describe("Test rbac service", func() {
|
||||
|
||||
policies, err := rbacService.ListPermissions(context.TODO(), "init-test")
|
||||
Expect(err).Should(BeNil())
|
||||
Expect(len(policies)).Should(BeEquivalentTo(int64(4)))
|
||||
Expect(len(policies)).Should(BeEquivalentTo(int64(5)))
|
||||
})
|
||||
|
||||
It("Test UpdatePermission", func() {
|
||||
@@ -263,6 +267,21 @@ func TestRequestResourceActionMatch(t *testing.T) {
|
||||
ra5.SetActions([]string{"list"})
|
||||
assert.Equal(t, ra5.Match([]*model.Permission{{Resources: []string{"project:*/application:*"}, Actions: []string{"list"}, Effect: "Allow"}}), true)
|
||||
|
||||
ra6 := &RequestResourceAction{}
|
||||
path, err := checkResourcePath("environment")
|
||||
assert.Equal(t, err, nil)
|
||||
ra6.SetResourceWithName(path, func(name string) string {
|
||||
if name == "projectName" {
|
||||
return "default"
|
||||
}
|
||||
return ""
|
||||
})
|
||||
ra6.SetActions([]string{"create"})
|
||||
assert.Equal(t, ra6.Match([]*model.Permission{{Resources: []string{
|
||||
"project:*/*", "addon:* addonRegistry:*", "target:*", "cluster:*/namespace:*", "user:*", "role:*", "permission:*", "configType:*/*", "project:*",
|
||||
"project:default/config:*", "project:default/role:*", "project:default/projectUser:*", "project:default/permission:*", "project:default/environment:*", "project:default/application:*/*", "project:default",
|
||||
}, Actions: []string{"list", "detail"}, Effect: "Allow"}}), false)
|
||||
|
||||
}
|
||||
|
||||
func TestRegisterResourceAction(t *testing.T) {
|
||||
|
||||
@@ -17,6 +17,7 @@ limitations under the License.
|
||||
package service
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
@@ -27,8 +28,10 @@ import (
|
||||
"github.com/oam-dev/kubevela/pkg/apiserver/domain/repository"
|
||||
"github.com/oam-dev/kubevela/pkg/apiserver/infrastructure/datastore"
|
||||
apisv1 "github.com/oam-dev/kubevela/pkg/apiserver/interfaces/api/dto/v1"
|
||||
"github.com/oam-dev/kubevela/pkg/apiserver/utils"
|
||||
"github.com/oam-dev/kubevela/pkg/apiserver/utils/bcode"
|
||||
"github.com/oam-dev/kubevela/pkg/apiserver/utils/log"
|
||||
"github.com/oam-dev/kubevela/pkg/auth"
|
||||
"github.com/oam-dev/kubevela/pkg/multicluster"
|
||||
)
|
||||
|
||||
@@ -117,6 +120,9 @@ func (dt *targetServiceImpl) DeleteTarget(ctx context.Context, targetName string
|
||||
if err = repository.DeleteTargetNamespace(ctx, dt.K8sClient, ddt.Cluster.ClusterName, ddt.Cluster.Namespace, targetName); err != nil {
|
||||
return err
|
||||
}
|
||||
if err = managePrivilegesForTarget(ctx, dt.K8sClient, ddt, true); err != nil {
|
||||
return err
|
||||
}
|
||||
if err = dt.Store.Delete(ctx, target); err != nil {
|
||||
if errors.Is(err, datastore.ErrRecordNotExist) {
|
||||
return bcode.ErrTargetNotExist
|
||||
@@ -142,6 +148,9 @@ func (dt *targetServiceImpl) CreateTarget(ctx context.Context, req apisv1.Create
|
||||
if err := repository.CreateTargetNamespace(ctx, dt.K8sClient, req.Cluster.ClusterName, req.Cluster.Namespace, req.Name); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := managePrivilegesForTarget(ctx, dt.K8sClient, &target, false); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
err := repository.CreateTarget(ctx, dt.Store, &target)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -227,3 +236,22 @@ func (dt *targetServiceImpl) convertFromTargetModel(ctx context.Context, target
|
||||
}
|
||||
return targetBase
|
||||
}
|
||||
|
||||
// managePrivilegesForTarget grant or revoke privileges for target
|
||||
func managePrivilegesForTarget(ctx context.Context, cli client.Client, target *model.Target, revoke bool) error {
|
||||
if target.Cluster == nil {
|
||||
return nil
|
||||
}
|
||||
p := &auth.ScopedPrivilege{Cluster: target.Cluster.ClusterName, Namespace: target.Cluster.Namespace}
|
||||
identity := &auth.Identity{Groups: []string{utils.KubeVelaProjectGroupPrefix + target.Project}}
|
||||
writer := &bytes.Buffer{}
|
||||
f, msg := auth.GrantPrivileges, "GrantPrivileges"
|
||||
if revoke {
|
||||
f, msg = auth.RevokePrivileges, "RevokePrivileges"
|
||||
}
|
||||
if err := f(ctx, cli, []auth.PrivilegeDescription{p}, identity, writer); err != nil {
|
||||
return err
|
||||
}
|
||||
log.Logger.Debugf("%s: %s", msg, writer.String())
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -37,24 +37,14 @@ type VelaQLService interface {
|
||||
}
|
||||
|
||||
type velaQLServiceImpl struct {
|
||||
kubeClient client.Client
|
||||
kubeConfig *rest.Config
|
||||
KubeClient client.Client `inject:"kubeClient"`
|
||||
KubeConfig *rest.Config `inject:"kubeConfig"`
|
||||
dm discoverymapper.DiscoveryMapper
|
||||
pd *packages.PackageDiscover
|
||||
}
|
||||
|
||||
// NewVelaQLService new velaQL service
|
||||
func NewVelaQLService() VelaQLService {
|
||||
k8sClient, err := clients.GetKubeClient()
|
||||
if err != nil {
|
||||
log.Logger.Fatalf("get kubeclient failure %s", err.Error())
|
||||
}
|
||||
|
||||
kubeConfig, err := clients.GetKubeConfig()
|
||||
if err != nil {
|
||||
log.Logger.Fatalf("get kubeconfig failure %s", err.Error())
|
||||
}
|
||||
|
||||
dm, err := clients.GetDiscoverMapper()
|
||||
if err != nil {
|
||||
log.Logger.Fatalf("get discover mapper failure %s", err.Error())
|
||||
@@ -65,10 +55,8 @@ func NewVelaQLService() VelaQLService {
|
||||
log.Logger.Fatalf("get package discover failure %s", err.Error())
|
||||
}
|
||||
return &velaQLServiceImpl{
|
||||
kubeClient: k8sClient,
|
||||
kubeConfig: kubeConfig,
|
||||
dm: dm,
|
||||
pd: pd,
|
||||
dm: dm,
|
||||
pd: pd,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -79,7 +67,7 @@ func (v *velaQLServiceImpl) QueryView(ctx context.Context, velaQL string) (*apis
|
||||
return nil, bcode.ErrParseVelaQL
|
||||
}
|
||||
|
||||
queryValue, err := velaql.NewViewHandler(v.kubeClient, v.kubeConfig, v.dm, v.pd).QueryView(ctx, query)
|
||||
queryValue, err := velaql.NewViewHandler(v.KubeClient, v.KubeConfig, v.dm, v.pd).QueryView(ctx, query)
|
||||
if err != nil {
|
||||
log.Logger.Errorf("fail to query the view %s", err.Error())
|
||||
return nil, bcode.ErrViewQuery
|
||||
@@ -88,6 +76,7 @@ func (v *velaQLServiceImpl) QueryView(ctx context.Context, velaQL string) (*apis
|
||||
resp := apis.VelaQLViewResponse{}
|
||||
err = queryValue.UnmarshalTo(&resp)
|
||||
if err != nil {
|
||||
log.Logger.Errorf("decode the velaQL response to json failure %s", err.Error())
|
||||
return nil, bcode.ErrParseQuery2Json
|
||||
}
|
||||
return &resp, err
|
||||
|
||||
@@ -43,6 +43,7 @@ import (
|
||||
"github.com/oam-dev/kubevela/pkg/oam/util"
|
||||
utils2 "github.com/oam-dev/kubevela/pkg/utils"
|
||||
"github.com/oam-dev/kubevela/pkg/utils/apply"
|
||||
"github.com/oam-dev/kubevela/pkg/workflow/tasks/custom"
|
||||
)
|
||||
|
||||
// WorkflowService workflow manage api
|
||||
@@ -577,8 +578,7 @@ func (w *workflowServiceImpl) TerminateRecord(ctx context.Context, appModel *mod
|
||||
return err
|
||||
}
|
||||
|
||||
oamApp.Status.Workflow.Terminated = true
|
||||
if err := w.KubeClient.Status().Patch(ctx, oamApp, client.Merge); err != nil {
|
||||
if err := TerminateWorkflow(ctx, w.KubeClient, oamApp); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := w.syncWorkflowStatus(ctx, oamApp, recordName, oamApp.Name); err != nil {
|
||||
@@ -588,6 +588,42 @@ func (w *workflowServiceImpl) TerminateRecord(ctx context.Context, appModel *mod
|
||||
return nil
|
||||
}
|
||||
|
||||
// TerminateWorkflow terminate workflow
|
||||
func TerminateWorkflow(ctx context.Context, kubecli client.Client, app *v1beta1.Application) error {
|
||||
// set the workflow terminated to true
|
||||
app.Status.Workflow.Terminated = true
|
||||
steps := app.Status.Workflow.Steps
|
||||
for i, step := range steps {
|
||||
switch step.Phase {
|
||||
case common.WorkflowStepPhaseFailed:
|
||||
if step.Reason != custom.StatusReasonFailedAfterRetries {
|
||||
steps[i].Reason = custom.StatusReasonTerminate
|
||||
}
|
||||
case common.WorkflowStepPhaseRunning:
|
||||
steps[i].Phase = common.WorkflowStepPhaseFailed
|
||||
steps[i].Reason = custom.StatusReasonTerminate
|
||||
default:
|
||||
}
|
||||
for j, sub := range step.SubStepsStatus {
|
||||
switch sub.Phase {
|
||||
case common.WorkflowStepPhaseFailed:
|
||||
if sub.Reason != custom.StatusReasonFailedAfterRetries {
|
||||
steps[i].SubStepsStatus[j].Phase = custom.StatusReasonTerminate
|
||||
}
|
||||
case common.WorkflowStepPhaseRunning:
|
||||
steps[i].SubStepsStatus[j].Phase = common.WorkflowStepPhaseFailed
|
||||
steps[i].SubStepsStatus[j].Reason = custom.StatusReasonTerminate
|
||||
default:
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if err := kubecli.Status().Patch(ctx, app, client.Merge); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (w *workflowServiceImpl) RollbackRecord(ctx context.Context, appModel *model.Application, workflow *model.Workflow, recordName, revisionVersion string) error {
|
||||
if revisionVersion == "" {
|
||||
// find the latest complete revision version
|
||||
|
||||
@@ -18,6 +18,7 @@ package clients
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
|
||||
"k8s.io/client-go/discovery"
|
||||
"k8s.io/client-go/rest"
|
||||
@@ -25,6 +26,7 @@ import (
|
||||
"sigs.k8s.io/controller-runtime/pkg/client/config"
|
||||
|
||||
apiConfig "github.com/oam-dev/kubevela/pkg/apiserver/config"
|
||||
"github.com/oam-dev/kubevela/pkg/auth"
|
||||
"github.com/oam-dev/kubevela/pkg/cue/packages"
|
||||
"github.com/oam-dev/kubevela/pkg/multicluster"
|
||||
"github.com/oam-dev/kubevela/pkg/oam/discoverymapper"
|
||||
@@ -39,6 +41,18 @@ func SetKubeClient(c client.Client) {
|
||||
kubeClient = c
|
||||
}
|
||||
|
||||
func setKubeConfig(conf *rest.Config) (err error) {
|
||||
if conf == nil {
|
||||
conf, err = config.GetConfig()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
kubeConfig = conf
|
||||
kubeConfig.Wrap(auth.NewImpersonatingRoundTripper)
|
||||
return nil
|
||||
}
|
||||
|
||||
// SetKubeConfig generate the kube config from the config of apiserver
|
||||
func SetKubeConfig(c apiConfig.Config) error {
|
||||
conf, err := config.GetConfig()
|
||||
@@ -48,7 +62,7 @@ func SetKubeConfig(c apiConfig.Config) error {
|
||||
kubeConfig = conf
|
||||
kubeConfig.Burst = c.KubeBurst
|
||||
kubeConfig.QPS = float32(c.KubeQPS)
|
||||
return nil
|
||||
return setKubeConfig(kubeConfig)
|
||||
}
|
||||
|
||||
// GetKubeClient create and return kube runtime client
|
||||
@@ -57,11 +71,7 @@ func GetKubeClient() (client.Client, error) {
|
||||
return kubeClient, nil
|
||||
}
|
||||
if kubeConfig == nil {
|
||||
conf, err := config.GetConfig()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
kubeConfig = conf
|
||||
return nil, fmt.Errorf("please call SetKubeConfig first")
|
||||
}
|
||||
var err error
|
||||
kubeClient, err = multicluster.Initialize(kubeConfig, false)
|
||||
@@ -81,10 +91,8 @@ func GetKubeClient() (client.Client, error) {
|
||||
|
||||
// GetKubeConfig create/get kube runtime config
|
||||
func GetKubeConfig() (*rest.Config, error) {
|
||||
var err error
|
||||
if kubeConfig == nil {
|
||||
kubeConfig, err = config.GetConfig()
|
||||
return kubeConfig, err
|
||||
return nil, fmt.Errorf("please call SetKubeConfig first")
|
||||
}
|
||||
return kubeConfig, nil
|
||||
}
|
||||
|
||||
@@ -275,7 +275,7 @@ func (m *mongodb) Count(ctx context.Context, entity datastore.Entity, filterOpti
|
||||
if entity.Index() != nil {
|
||||
for k, v := range entity.Index() {
|
||||
filter = append(filter, bson.E{
|
||||
Key: k,
|
||||
Key: strings.ToLower(k),
|
||||
Value: v,
|
||||
})
|
||||
}
|
||||
|
||||
@@ -232,6 +232,11 @@ var _ = Describe("Test mongodb datastore driver", func() {
|
||||
}})
|
||||
Expect(err).ShouldNot(HaveOccurred())
|
||||
Expect(count).Should(Equal(int64(3)))
|
||||
|
||||
app.Name = "kubevela-app-3"
|
||||
count, err = mongodbDriver.Count(context.TODO(), &app, &datastore.FilterOptions{})
|
||||
Expect(err).ShouldNot(HaveOccurred())
|
||||
Expect(count).Should(Equal(int64(1)))
|
||||
})
|
||||
|
||||
It("Test isExist function", func() {
|
||||
|
||||
@@ -201,6 +201,40 @@ func ConvertPolicyModelToBase(policy *model.ApplicationPolicy) *apisv1.PolicyBas
|
||||
return pb
|
||||
}
|
||||
|
||||
// ConvertRole2DTO convert role model to role base struct
|
||||
func ConvertRole2DTO(role *model.Role, policies []*model.Permission) *apisv1.RoleBase {
|
||||
return &apisv1.RoleBase{
|
||||
CreateTime: role.CreateTime,
|
||||
UpdateTime: role.UpdateTime,
|
||||
Name: role.Name,
|
||||
Alias: role.Alias,
|
||||
Permissions: func() (list []apisv1.NameAlias) {
|
||||
for _, policy := range policies {
|
||||
if policy != nil {
|
||||
list = append(list, apisv1.NameAlias{Name: policy.Name, Alias: policy.Alias})
|
||||
}
|
||||
}
|
||||
return
|
||||
}(),
|
||||
}
|
||||
}
|
||||
|
||||
// ConvertPermission2DTO convert permission model to the DTO
|
||||
func ConvertPermission2DTO(permission *model.Permission) *apisv1.PermissionBase {
|
||||
if permission == nil {
|
||||
return nil
|
||||
}
|
||||
return &apisv1.PermissionBase{
|
||||
Name: permission.Name,
|
||||
Alias: permission.Alias,
|
||||
Resources: permission.Resources,
|
||||
Actions: permission.Actions,
|
||||
Effect: permission.Effect,
|
||||
CreateTime: permission.CreateTime,
|
||||
UpdateTime: permission.UpdateTime,
|
||||
}
|
||||
}
|
||||
|
||||
func convertBool(b *bool) bool {
|
||||
if b == nil {
|
||||
return false
|
||||
|
||||
@@ -65,7 +65,7 @@ func (s *configAPIInterface) GetWebServiceRoute() *restful.WebService {
|
||||
ws.Route(ws.POST("/{configType}").To(s.createConfig).
|
||||
Doc("create or update a config").
|
||||
Metadata(restfulspec.KeyOpenAPITags, tags).
|
||||
Filter(s.RbacService.CheckPerm("configType", "create")).
|
||||
Filter(s.RbacService.CheckPerm("configType/config", "create")).
|
||||
Param(ws.PathParameter("configType", "identifier of the config type").DataType("string")).
|
||||
Reads(apis.CreateConfigRequest{}).
|
||||
Returns(200, "OK", apis.EmptyResponse{}).
|
||||
@@ -76,7 +76,7 @@ func (s *configAPIInterface) GetWebServiceRoute() *restful.WebService {
|
||||
ws.Route(ws.GET("/{configType}/configs").To(s.getConfigs).
|
||||
Doc("get configs from a config type").
|
||||
Metadata(restfulspec.KeyOpenAPITags, tags).
|
||||
Filter(s.RbacService.CheckPerm("config", "list")).
|
||||
Filter(s.RbacService.CheckPerm("configType/config", "list")).
|
||||
Param(ws.PathParameter("configType", "identifier of the config").DataType("string")).
|
||||
Returns(200, "OK", []*apis.Config{}).
|
||||
Returns(400, "Bad Request", bcode.Bcode{}).
|
||||
@@ -85,7 +85,7 @@ func (s *configAPIInterface) GetWebServiceRoute() *restful.WebService {
|
||||
ws.Route(ws.GET("/{configType}/configs/{name}").To(s.getConfig).
|
||||
Doc("get a config from a config type").
|
||||
Metadata(restfulspec.KeyOpenAPITags, tags).
|
||||
Filter(s.RbacService.CheckPerm("config", "get")).
|
||||
Filter(s.RbacService.CheckPerm("configType/config", "get")).
|
||||
Param(ws.PathParameter("configType", "identifier of the config type").DataType("string")).
|
||||
Param(ws.PathParameter("name", "identifier of the config").DataType("string")).
|
||||
Returns(200, "OK", []*apis.Config{}).
|
||||
@@ -95,7 +95,7 @@ func (s *configAPIInterface) GetWebServiceRoute() *restful.WebService {
|
||||
ws.Route(ws.DELETE("/{configType}/configs/{name}").To(s.deleteConfig).
|
||||
Doc("delete a config").
|
||||
Metadata(restfulspec.KeyOpenAPITags, tags).
|
||||
Filter(s.RbacService.CheckPerm("config", "delete")).
|
||||
Filter(s.RbacService.CheckPerm("configType/config", "delete")).
|
||||
Param(ws.PathParameter("configType", "identifier of the config type").DataType("string")).
|
||||
Param(ws.PathParameter("name", "identifier of the config").DataType("string")).
|
||||
Returns(200, "OK", apis.EmptyResponse{}).
|
||||
|
||||
@@ -1345,7 +1345,7 @@ type PermissionBase struct {
|
||||
UpdateTime time.Time `json:"updateTime"`
|
||||
}
|
||||
|
||||
// UpdatePermissionRequest the request body that update permission policy
|
||||
// UpdatePermissionRequest the request body that updating a permission policy
|
||||
type UpdatePermissionRequest struct {
|
||||
Alias string `json:"alias" validate:"checkalias"`
|
||||
Resources []string `json:"resources"`
|
||||
@@ -1353,6 +1353,15 @@ type UpdatePermissionRequest struct {
|
||||
Effect string `json:"effect" validate:"oneof=Allow Deny"`
|
||||
}
|
||||
|
||||
// CreatePermissionRequest the request body that creating a permission policy
|
||||
type CreatePermissionRequest struct {
|
||||
Name string `json:"name" validate:"checkname"`
|
||||
Alias string `json:"alias" validate:"checkalias"`
|
||||
Resources []string `json:"resources"`
|
||||
Actions []string `json:"actions"`
|
||||
Effect string `json:"effect" validate:"oneof=Allow Deny"`
|
||||
}
|
||||
|
||||
// LoginUserInfoResponse the response body of login user info
|
||||
type LoginUserInfoResponse struct {
|
||||
UserBase
|
||||
|
||||
@@ -70,7 +70,7 @@ func (n *envAPIInterface) GetWebServiceRoute() *restful.WebService {
|
||||
Doc("update an env").
|
||||
Metadata(restfulspec.KeyOpenAPITags, tags).
|
||||
Filter(n.RBACService.CheckPerm("environment", "update")).
|
||||
Param(ws.PathParameter("envName", "identifier of the application ").DataType("string")).
|
||||
Param(ws.PathParameter("envName", "identifier of the environment").DataType("string")).
|
||||
Reads(apis.CreateEnvRequest{}).
|
||||
Returns(200, "OK", apis.Env{}).
|
||||
Writes(apis.Env{}))
|
||||
@@ -80,7 +80,7 @@ func (n *envAPIInterface) GetWebServiceRoute() *restful.WebService {
|
||||
Doc("delete one env").
|
||||
Metadata(restfulspec.KeyOpenAPITags, tags).
|
||||
Filter(n.RBACService.CheckPerm("environment", "delete")).
|
||||
Param(ws.PathParameter("envName", "identifier of the application ").DataType("string")).
|
||||
Param(ws.PathParameter("envName", "identifier of the environment").DataType("string")).
|
||||
Returns(200, "OK", apis.EmptyResponse{}).
|
||||
Returns(400, "Bad Request", bcode.Bcode{}).
|
||||
Writes(apis.EmptyResponse{}))
|
||||
|
||||
@@ -176,10 +176,27 @@ func (n *projectAPIInterface) GetWebServiceRoute() *restful.WebService {
|
||||
Returns(200, "OK", []apis.PermissionBase{}).
|
||||
Writes([]apis.PermissionBase{}))
|
||||
|
||||
ws.Route(ws.POST("/{projectName}/permissions").To(n.createProjectPermission).
|
||||
Doc("create a project level perm policy").
|
||||
Metadata(restfulspec.KeyOpenAPITags, tags).
|
||||
Param(ws.PathParameter("projectName", "identifier of the project").DataType("string")).
|
||||
Filter(n.RbacService.CheckPerm("project/permission", "list")).
|
||||
Returns(200, "OK", []apis.PermissionBase{}).
|
||||
Writes([]apis.PermissionBase{}))
|
||||
|
||||
ws.Route(ws.DELETE("/{projectName}/permissions/{permissionName}").To(n.deleteProjectPermission).
|
||||
Doc("delete a project level perm policy").
|
||||
Metadata(restfulspec.KeyOpenAPITags, tags).
|
||||
Param(ws.PathParameter("projectName", "identifier of the project").DataType("string")).
|
||||
Param(ws.PathParameter("permissionName", "identifier of the permission").DataType("string")).
|
||||
Filter(n.RbacService.CheckPerm("project/permission", "list")).
|
||||
Returns(200, "OK", []apis.PermissionBase{}).
|
||||
Writes([]apis.PermissionBase{}))
|
||||
|
||||
ws.Route(ws.GET("/{projectName}/configs").To(n.getConfigs).
|
||||
Doc("get configs which are in a project").
|
||||
Metadata(restfulspec.KeyOpenAPITags, tags).
|
||||
Filter(n.RbacService.CheckPerm("project/configs", "list")).
|
||||
Filter(n.RbacService.CheckPerm("project/config", "list")).
|
||||
Param(ws.QueryParameter("configType", "config type").DataType("string")).
|
||||
Param(ws.PathParameter("projectName", "identifier of the project").DataType("string")).
|
||||
Returns(200, "OK", []*apis.Config{}).
|
||||
@@ -524,6 +541,45 @@ func (n *projectAPIInterface) listProjectPermissions(req *restful.Request, res *
|
||||
}
|
||||
}
|
||||
|
||||
func (n *projectAPIInterface) createProjectPermission(req *restful.Request, res *restful.Response) {
|
||||
// Verify the validity of parameters
|
||||
var createReq apis.CreatePermissionRequest
|
||||
if err := req.ReadEntity(&createReq); err != nil {
|
||||
bcode.ReturnError(req, res, err)
|
||||
return
|
||||
}
|
||||
if err := validate.Struct(&createReq); err != nil {
|
||||
bcode.ReturnError(req, res, err)
|
||||
return
|
||||
}
|
||||
// Call the domain layer code
|
||||
permissionBase, err := n.RbacService.CreatePermission(req.Request.Context(), req.PathParameter("projectName"), createReq)
|
||||
if err != nil {
|
||||
log.Logger.Errorf("create the permission failure %s", err.Error())
|
||||
bcode.ReturnError(req, res, err)
|
||||
return
|
||||
}
|
||||
|
||||
// Write back response data
|
||||
if err := res.WriteEntity(permissionBase); err != nil {
|
||||
bcode.ReturnError(req, res, err)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
func (n *projectAPIInterface) deleteProjectPermission(req *restful.Request, res *restful.Response) {
|
||||
err := n.RbacService.DeletePermission(req.Request.Context(), req.PathParameter("projectName"), req.PathParameter("permissionName"))
|
||||
if err != nil {
|
||||
bcode.ReturnError(req, res, err)
|
||||
return
|
||||
}
|
||||
// Write back response data
|
||||
if err := res.WriteEntity(apis.EmptyResponse{}); err != nil {
|
||||
bcode.ReturnError(req, res, err)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
func (n *projectAPIInterface) getConfigs(req *restful.Request, res *restful.Response) {
|
||||
configs, err := n.ProjectService.GetConfigs(req.Request.Context(), req.PathParameter("projectName"), req.QueryParameter("configType"))
|
||||
if err != nil {
|
||||
|
||||
@@ -63,6 +63,7 @@ func (r *rbacAPIInterface) GetWebServiceRoute() *restful.WebService {
|
||||
ws.Route(ws.PUT("/roles/{roleName}").To(r.updatePlatformRole).
|
||||
Doc("update platform level role").
|
||||
Metadata(restfulspec.KeyOpenAPITags, tags).
|
||||
Param(ws.PathParameter("roleName", "identifier of the role").DataType("string")).
|
||||
Filter(r.RbacService.CheckPerm("role", "update")).
|
||||
Reads(apis.UpdateRoleRequest{}).
|
||||
Returns(200, "OK", apis.RoleBase{}).
|
||||
@@ -71,17 +72,34 @@ func (r *rbacAPIInterface) GetWebServiceRoute() *restful.WebService {
|
||||
ws.Route(ws.DELETE("/roles/{roleName}").To(r.deletePlatformRole).
|
||||
Doc("update platform level role").
|
||||
Metadata(restfulspec.KeyOpenAPITags, tags).
|
||||
Param(ws.PathParameter("roleName", "identifier of the role").DataType("string")).
|
||||
Filter(r.RbacService.CheckPerm("role", "delete")).
|
||||
Returns(200, "OK", apis.EmptyResponse{}).
|
||||
Writes(apis.EmptyResponse{}))
|
||||
|
||||
ws.Route(ws.GET("/permissions").To(r.listPlatformPermissions).
|
||||
Doc("list all project level perm policies").
|
||||
Doc("list all platform level perm policies").
|
||||
Metadata(restfulspec.KeyOpenAPITags, tags).
|
||||
Filter(r.RbacService.CheckPerm("permission", "list")).
|
||||
Returns(200, "OK", []apis.PermissionBase{}).
|
||||
Writes([]apis.PermissionBase{}))
|
||||
|
||||
ws.Route(ws.POST("/permissions").To(r.createPlatformPermission).
|
||||
Doc("create the platform perm policy").
|
||||
Metadata(restfulspec.KeyOpenAPITags, tags).
|
||||
Reads(apis.CreatePermissionRequest{}).
|
||||
Filter(r.RbacService.CheckPerm("permission", "create")).
|
||||
Returns(200, "OK", apis.PermissionBase{}).
|
||||
Writes(apis.PermissionBase{}))
|
||||
|
||||
ws.Route(ws.DELETE("/permissions/{permissionName}").To(r.deletePlatformPermission).
|
||||
Doc("delete a platform perm policy").
|
||||
Metadata(restfulspec.KeyOpenAPITags, tags).
|
||||
Param(ws.PathParameter("permissionName", "identifier of the permission").DataType("string")).
|
||||
Filter(r.RbacService.CheckPerm("permission", "delete")).
|
||||
Returns(200, "OK", apis.EmptyResponse{}).
|
||||
Writes(apis.EmptyResponse{}))
|
||||
|
||||
ws.Filter(authCheckFilter)
|
||||
return ws
|
||||
}
|
||||
@@ -179,3 +197,42 @@ func (r *rbacAPIInterface) listPlatformPermissions(req *restful.Request, res *re
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
func (r *rbacAPIInterface) createPlatformPermission(req *restful.Request, res *restful.Response) {
|
||||
// Verify the validity of parameters
|
||||
var createReq apis.CreatePermissionRequest
|
||||
if err := req.ReadEntity(&createReq); err != nil {
|
||||
bcode.ReturnError(req, res, err)
|
||||
return
|
||||
}
|
||||
if err := validate.Struct(&createReq); err != nil {
|
||||
bcode.ReturnError(req, res, err)
|
||||
return
|
||||
}
|
||||
// Call the domain layer code
|
||||
permissionBase, err := r.RbacService.CreatePermission(req.Request.Context(), "", createReq)
|
||||
if err != nil {
|
||||
log.Logger.Errorf("create the permission failure %s", err.Error())
|
||||
bcode.ReturnError(req, res, err)
|
||||
return
|
||||
}
|
||||
|
||||
// Write back response data
|
||||
if err := res.WriteEntity(permissionBase); err != nil {
|
||||
bcode.ReturnError(req, res, err)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
func (r *rbacAPIInterface) deletePlatformPermission(req *restful.Request, res *restful.Response) {
|
||||
err := r.RbacService.DeletePermission(req.Request.Context(), "", req.PathParameter("permissionName"))
|
||||
if err != nil {
|
||||
bcode.ReturnError(req, res, err)
|
||||
return
|
||||
}
|
||||
// Write back response data
|
||||
if err := res.WriteEntity(apis.EmptyResponse{}); err != nil {
|
||||
bcode.ReturnError(req, res, err)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
@@ -61,37 +61,18 @@ type restServer struct {
|
||||
}
|
||||
|
||||
// New create api server with config data
|
||||
func New(cfg config.Config) (a APIServer, err error) {
|
||||
var ds datastore.DataStore
|
||||
switch cfg.Datastore.Type {
|
||||
case "mongodb":
|
||||
ds, err = mongodb.New(context.Background(), cfg.Datastore)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("create mongodb datastore instance failure %w", err)
|
||||
}
|
||||
case "kubeapi":
|
||||
ds, err = kubeapi.New(context.Background(), cfg.Datastore)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("create kubeapi datastore instance failure %w", err)
|
||||
}
|
||||
default:
|
||||
return nil, fmt.Errorf("not support datastore type %s", cfg.Datastore.Type)
|
||||
}
|
||||
|
||||
func New(cfg config.Config) (a APIServer) {
|
||||
s := &restServer{
|
||||
webContainer: restful.NewContainer(),
|
||||
beanContainer: container.NewContainer(),
|
||||
cfg: cfg,
|
||||
dataStore: ds,
|
||||
}
|
||||
return s, nil
|
||||
return s
|
||||
}
|
||||
|
||||
func (s *restServer) buildIoCContainer() error {
|
||||
// infrastructure
|
||||
if err := s.beanContainer.ProvideWithName("datastore", s.dataStore); err != nil {
|
||||
return fmt.Errorf("fail to provides the datastore bean to the container: %w", err)
|
||||
}
|
||||
|
||||
err := clients.SetKubeConfig(s.cfg)
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -104,6 +85,27 @@ func (s *restServer) buildIoCContainer() error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
var ds datastore.DataStore
|
||||
switch s.cfg.Datastore.Type {
|
||||
case "mongodb":
|
||||
ds, err = mongodb.New(context.Background(), s.cfg.Datastore)
|
||||
if err != nil {
|
||||
return fmt.Errorf("create mongodb datastore instance failure %w", err)
|
||||
}
|
||||
case "kubeapi":
|
||||
ds, err = kubeapi.New(context.Background(), s.cfg.Datastore)
|
||||
if err != nil {
|
||||
return fmt.Errorf("create kubeapi datastore instance failure %w", err)
|
||||
}
|
||||
default:
|
||||
return fmt.Errorf("not support datastore type %s", s.cfg.Datastore.Type)
|
||||
}
|
||||
s.dataStore = ds
|
||||
if err := s.beanContainer.ProvideWithName("datastore", s.dataStore); err != nil {
|
||||
return fmt.Errorf("fail to provides the datastore bean to the container: %w", err)
|
||||
}
|
||||
|
||||
kubeClient = utils.NewAuthApplicationClient(kubeClient)
|
||||
if err := s.beanContainer.ProvideWithName("kubeClient", kubeClient); err != nil {
|
||||
return fmt.Errorf("fail to provides the kubeClient bean to the container: %w", err)
|
||||
}
|
||||
|
||||
125
pkg/apiserver/utils/auth.go
Normal file
125
pkg/apiserver/utils/auth.go
Normal file
@@ -0,0 +1,125 @@
|
||||
/*
|
||||
Copyright 2022 The KubeVela Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package utils
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/emicklei/go-restful/v3"
|
||||
"k8s.io/apiserver/pkg/authentication/user"
|
||||
"k8s.io/apiserver/pkg/endpoints/request"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
|
||||
"github.com/oam-dev/kubevela/apis/core.oam.dev/v1beta1"
|
||||
"github.com/oam-dev/kubevela/pkg/apiserver/domain/model"
|
||||
"github.com/oam-dev/kubevela/pkg/features"
|
||||
)
|
||||
|
||||
// KubeVelaProjectGroupPrefix the prefix kubevela project
|
||||
const KubeVelaProjectGroupPrefix = "kubevela:project:"
|
||||
|
||||
// ContextWithUserInfo extract user from context (parse username and project) for impersonation
|
||||
func ContextWithUserInfo(ctx context.Context) context.Context {
|
||||
if !features.APIServerFeatureGate.Enabled(features.APIServerEnableImpersonation) {
|
||||
return ctx
|
||||
}
|
||||
userInfo := &user.DefaultInfo{Name: user.Anonymous}
|
||||
if username, ok := UsernameFrom(ctx); ok {
|
||||
userInfo.Name = username
|
||||
}
|
||||
if project, ok := ProjectFrom(ctx); ok {
|
||||
userInfo.Groups = []string{KubeVelaProjectGroupPrefix + project}
|
||||
}
|
||||
if userInfo.Name == model.DefaultAdminUserName && !features.APIServerFeatureGate.Enabled(features.APIServerEnableAdminImpersonation) {
|
||||
return ctx
|
||||
}
|
||||
return request.WithUser(ctx, userInfo)
|
||||
}
|
||||
|
||||
// SetUsernameAndProjectInRequestContext .
|
||||
func SetUsernameAndProjectInRequestContext(req *restful.Request, userName string, projectName string) {
|
||||
ctx := req.Request.Context()
|
||||
ctx = WithUsername(ctx, userName)
|
||||
ctx = WithProject(ctx, projectName)
|
||||
req.Request = req.Request.WithContext(ctx)
|
||||
}
|
||||
|
||||
// NewAuthApplicationClient will carry UserInfo for mutating requests related to application automatically
|
||||
func NewAuthApplicationClient(cli client.Client) client.Client {
|
||||
return &authAppClient{Client: cli}
|
||||
}
|
||||
|
||||
type authAppClient struct {
|
||||
client.Client
|
||||
}
|
||||
|
||||
// Status .
|
||||
func (c *authAppClient) Status() client.StatusWriter {
|
||||
return &authAppStatusClient{StatusWriter: c.Client.Status()}
|
||||
}
|
||||
|
||||
// Create .
|
||||
func (c *authAppClient) Create(ctx context.Context, obj client.Object, opts ...client.CreateOption) error {
|
||||
if _, ok := obj.(*v1beta1.Application); ok {
|
||||
ctx = ContextWithUserInfo(ctx)
|
||||
}
|
||||
return c.Client.Create(ctx, obj, opts...)
|
||||
}
|
||||
|
||||
// Delete .
|
||||
func (c *authAppClient) Delete(ctx context.Context, obj client.Object, opts ...client.DeleteOption) error {
|
||||
if _, ok := obj.(*v1beta1.Application); ok {
|
||||
ctx = ContextWithUserInfo(ctx)
|
||||
}
|
||||
return c.Client.Delete(ctx, obj, opts...)
|
||||
}
|
||||
|
||||
// Update .
|
||||
func (c *authAppClient) Update(ctx context.Context, obj client.Object, opts ...client.UpdateOption) error {
|
||||
if _, ok := obj.(*v1beta1.Application); ok {
|
||||
ctx = ContextWithUserInfo(ctx)
|
||||
}
|
||||
return c.Client.Update(ctx, obj, opts...)
|
||||
}
|
||||
|
||||
// Patch .
|
||||
func (c *authAppClient) Patch(ctx context.Context, obj client.Object, patch client.Patch, opts ...client.PatchOption) error {
|
||||
if _, ok := obj.(*v1beta1.Application); ok {
|
||||
ctx = ContextWithUserInfo(ctx)
|
||||
}
|
||||
return c.Client.Patch(ctx, obj, patch, opts...)
|
||||
}
|
||||
|
||||
type authAppStatusClient struct {
|
||||
client.StatusWriter
|
||||
}
|
||||
|
||||
// Update .
|
||||
func (c *authAppStatusClient) Update(ctx context.Context, obj client.Object, opts ...client.UpdateOption) error {
|
||||
if _, ok := obj.(*v1beta1.Application); ok {
|
||||
ctx = ContextWithUserInfo(ctx)
|
||||
}
|
||||
return c.StatusWriter.Update(ctx, obj, opts...)
|
||||
}
|
||||
|
||||
// Patch .
|
||||
func (c *authAppStatusClient) Patch(ctx context.Context, obj client.Object, patch client.Patch, opts ...client.PatchOption) error {
|
||||
if _, ok := obj.(*v1beta1.Application); ok {
|
||||
ctx = ContextWithUserInfo(ctx)
|
||||
}
|
||||
return c.StatusWriter.Patch(ctx, obj, patch, opts...)
|
||||
}
|
||||
@@ -25,4 +25,8 @@ var (
|
||||
ErrRoleIsNotExist = NewBcode(400, 15003, "the role is not exist")
|
||||
// ErrPermissionNotExist means the permission is not exist
|
||||
ErrPermissionNotExist = NewBcode(404, 15004, "the permission is not exist")
|
||||
// ErrPermissionIsExist means the he permission is exist
|
||||
ErrPermissionIsExist = NewBcode(400, 15005, "the permission name is exist")
|
||||
// ErrPermissionIsUsed means the permission is bound by role, can not be deleted
|
||||
ErrPermissionIsUsed = NewBcode(400, 15006, "the permission have been used")
|
||||
)
|
||||
|
||||
50
pkg/apiserver/utils/context.go
Normal file
50
pkg/apiserver/utils/context.go
Normal file
@@ -0,0 +1,50 @@
|
||||
/*
|
||||
Copyright 2022 The KubeVela Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package utils
|
||||
|
||||
import (
|
||||
"context"
|
||||
)
|
||||
|
||||
type contextKey int
|
||||
|
||||
const (
|
||||
projectKey contextKey = iota
|
||||
usernameKey
|
||||
)
|
||||
|
||||
// WithProject carries project in context
|
||||
func WithProject(parent context.Context, project string) context.Context {
|
||||
return context.WithValue(parent, projectKey, project)
|
||||
}
|
||||
|
||||
// ProjectFrom extract project from context
|
||||
func ProjectFrom(ctx context.Context) (string, bool) {
|
||||
project, ok := ctx.Value(projectKey).(string)
|
||||
return project, ok
|
||||
}
|
||||
|
||||
// WithUsername carries username in context
|
||||
func WithUsername(parent context.Context, username string) context.Context {
|
||||
return context.WithValue(parent, usernameKey, username)
|
||||
}
|
||||
|
||||
// UsernameFrom extract username from context
|
||||
func UsernameFrom(ctx context.Context) (string, bool) {
|
||||
username, ok := ctx.Value(usernameKey).(string)
|
||||
return username, ok
|
||||
}
|
||||
@@ -28,6 +28,7 @@ import (
|
||||
"github.com/xlab/treeprint"
|
||||
rbacv1 "k8s.io/api/rbac/v1"
|
||||
"k8s.io/apiextensions-apiserver/pkg/apis/apiextensions"
|
||||
kerrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/utils/strings/slices"
|
||||
@@ -325,6 +326,23 @@ func mergeSubjects(src []rbacv1.Subject, merge []rbacv1.Subject) []rbacv1.Subjec
|
||||
return subs
|
||||
}
|
||||
|
||||
func removeSubjects(src []rbacv1.Subject, toRemove []rbacv1.Subject) []rbacv1.Subject {
|
||||
var subs []rbacv1.Subject
|
||||
for _, sub := range src {
|
||||
add := true
|
||||
for _, t := range toRemove {
|
||||
if reflect.DeepEqual(t, sub) {
|
||||
add = false
|
||||
break
|
||||
}
|
||||
}
|
||||
if add {
|
||||
subs = append(subs, sub)
|
||||
}
|
||||
}
|
||||
return subs
|
||||
}
|
||||
|
||||
// GrantPrivileges grant privileges to identity
|
||||
func GrantPrivileges(ctx context.Context, cli client.Client, privileges []PrivilegeDescription, identity *Identity, writer io.Writer) error {
|
||||
subs := identity.Subjects()
|
||||
@@ -372,3 +390,59 @@ func GrantPrivileges(ctx context.Context, cli client.Client, privileges []Privil
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// RevokePrivileges revoke privileges (notice that the revoking process only deletes bond subject in the
|
||||
// RoleBinding/ClusterRoleBinding, it does not ensure the identity's other related privileges are removed to
|
||||
// prevent identity from accessing)
|
||||
func RevokePrivileges(ctx context.Context, cli client.Client, privileges []PrivilegeDescription, identity *Identity, writer io.Writer) error {
|
||||
subs := identity.Subjects()
|
||||
if len(subs) == 0 {
|
||||
return fmt.Errorf("failed to find RBAC subjects in identity")
|
||||
}
|
||||
for _, p := range privileges {
|
||||
cluster := p.GetCluster()
|
||||
_ctx := multicluster.ContextWithClusterName(ctx, cluster)
|
||||
binding := p.GetRoleBinding(subs)
|
||||
kind, key := "ClusterRoleBinding", binding.GetName()
|
||||
if binding.GetNamespace() != "" {
|
||||
kind, key = "RoleBinding", binding.GetNamespace()+"/"+binding.GetName()
|
||||
}
|
||||
var err error
|
||||
remove := false
|
||||
var toDel client.Object
|
||||
switch bindingObj := binding.(type) {
|
||||
case *rbacv1.RoleBinding:
|
||||
obj := &rbacv1.RoleBinding{}
|
||||
if err = cli.Get(_ctx, client.ObjectKeyFromObject(bindingObj), obj); err == nil {
|
||||
bindingObj.Subjects = removeSubjects(obj.Subjects, bindingObj.Subjects)
|
||||
remove = len(bindingObj.Subjects) == 0
|
||||
toDel = obj
|
||||
}
|
||||
case *rbacv1.ClusterRoleBinding:
|
||||
obj := &rbacv1.ClusterRoleBinding{}
|
||||
if err = cli.Get(_ctx, client.ObjectKeyFromObject(bindingObj), obj); err == nil {
|
||||
bindingObj.Subjects = removeSubjects(obj.Subjects, bindingObj.Subjects)
|
||||
remove = len(bindingObj.Subjects) == 0
|
||||
toDel = obj
|
||||
}
|
||||
}
|
||||
if err != nil {
|
||||
if !kerrors.IsNotFound(err) {
|
||||
return fmt.Errorf("failed to fetch %s %s in cluster %s: %w", kind, key, cluster, err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
if remove {
|
||||
if err = cli.Delete(_ctx, toDel); err != nil {
|
||||
return fmt.Errorf("failed to delete %s %s in cluster %s: %w", kind, key, cluster, err)
|
||||
}
|
||||
} else {
|
||||
res, err := utils.CreateOrUpdate(_ctx, cli, binding)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to update %s %s in cluster %s: %w", kind, key, cluster, err)
|
||||
}
|
||||
_, _ = fmt.Fprintf(writer, "%s %s %s in cluster %s.\n", kind, key, res, cluster)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -25,7 +25,6 @@ import (
|
||||
"k8s.io/apiserver/pkg/endpoints/request"
|
||||
"k8s.io/client-go/transport"
|
||||
|
||||
"github.com/oam-dev/kubevela/apis/types"
|
||||
"github.com/oam-dev/kubevela/pkg/utils"
|
||||
)
|
||||
|
||||
@@ -54,11 +53,8 @@ func (rt *impersonatingRoundTripper) RoundTrip(req *http.Request) (*http.Respons
|
||||
if exists && userInfo != nil {
|
||||
if name := userInfo.GetName(); name != "" {
|
||||
req.Header.Set(transport.ImpersonateUserHeader, name)
|
||||
req.Header.Set(transport.ImpersonateGroupHeader, types.ClusterGatewayAccessorGroup)
|
||||
for _, group := range userInfo.GetGroups() {
|
||||
if group != types.ClusterGatewayAccessorGroup {
|
||||
req.Header.Add(transport.ImpersonateGroupHeader, group)
|
||||
}
|
||||
req.Header.Add(transport.ImpersonateGroupHeader, group)
|
||||
}
|
||||
q := req.URL.Query()
|
||||
q.Add(impersonateKey, "true")
|
||||
|
||||
@@ -31,7 +31,6 @@ import (
|
||||
featuregatetesting "k8s.io/component-base/featuregate/testing"
|
||||
|
||||
"github.com/oam-dev/kubevela/apis/core.oam.dev/v1beta1"
|
||||
"github.com/oam-dev/kubevela/apis/types"
|
||||
"github.com/oam-dev/kubevela/pkg/features"
|
||||
"github.com/oam-dev/kubevela/pkg/oam"
|
||||
)
|
||||
@@ -66,21 +65,21 @@ func TestImpersonatingRoundTripper(t *testing.T) {
|
||||
return ContextWithUserInfo(ctx, app)
|
||||
},
|
||||
expectedUser: "system:serviceaccount:vela-system:default",
|
||||
expectedGroup: []string{types.ClusterGatewayAccessorGroup},
|
||||
expectedGroup: nil,
|
||||
},
|
||||
"without service account and app": {
|
||||
ctxFn: func(ctx context.Context) context.Context {
|
||||
return ContextWithUserInfo(ctx, nil)
|
||||
},
|
||||
expectedUser: "",
|
||||
expectedGroup: []string{types.ClusterGatewayAccessorGroup},
|
||||
expectedGroup: nil,
|
||||
},
|
||||
"without service account": {
|
||||
ctxFn: func(ctx context.Context) context.Context {
|
||||
return ContextWithUserInfo(ctx, &v1beta1.Application{})
|
||||
},
|
||||
expectedUser: AuthenticationDefaultUser,
|
||||
expectedGroup: []string{types.ClusterGatewayAccessorGroup},
|
||||
expectedGroup: nil,
|
||||
},
|
||||
"with user and groups": {
|
||||
ctxFn: func(ctx context.Context) context.Context {
|
||||
@@ -92,7 +91,7 @@ func TestImpersonatingRoundTripper(t *testing.T) {
|
||||
return ContextWithUserInfo(ctx, app)
|
||||
},
|
||||
expectedUser: "username",
|
||||
expectedGroup: []string{types.ClusterGatewayAccessorGroup, "kubevela:group1", "kubevela:group2"},
|
||||
expectedGroup: []string{"kubevela:group1", "kubevela:group2"},
|
||||
},
|
||||
}
|
||||
for name, ts := range testSets {
|
||||
|
||||
@@ -29,6 +29,7 @@ import (
|
||||
kerrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apiserver/pkg/util/feature"
|
||||
"k8s.io/client-go/util/workqueue"
|
||||
ctrl "sigs.k8s.io/controller-runtime"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
@@ -47,6 +48,7 @@ import (
|
||||
common2 "github.com/oam-dev/kubevela/pkg/controller/common"
|
||||
core "github.com/oam-dev/kubevela/pkg/controller/core.oam.dev"
|
||||
"github.com/oam-dev/kubevela/pkg/cue/packages"
|
||||
"github.com/oam-dev/kubevela/pkg/features"
|
||||
monitorContext "github.com/oam-dev/kubevela/pkg/monitor/context"
|
||||
"github.com/oam-dev/kubevela/pkg/monitor/metrics"
|
||||
"github.com/oam-dev/kubevela/pkg/oam"
|
||||
@@ -229,7 +231,7 @@ func (r *Reconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Resu
|
||||
handler.app.Status.Workflow.SuspendState = ""
|
||||
return r.gcResourceTrackers(logCtx, handler, common.ApplicationRunningWorkflow, false, false)
|
||||
}
|
||||
if !workflow.IsFailedAfterRetry(app) {
|
||||
if !workflow.IsFailedAfterRetry(app) || !feature.DefaultMutableFeatureGate.Enabled(features.EnableSuspendOnFailure) {
|
||||
r.stateKeep(logCtx, handler, app)
|
||||
}
|
||||
return r.gcResourceTrackers(logCtx, handler, common.ApplicationWorkflowSuspending, false, true)
|
||||
|
||||
@@ -23,8 +23,11 @@ import (
|
||||
"net"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
||||
featuregatetesting "k8s.io/component-base/featuregate/testing"
|
||||
"k8s.io/utils/pointer"
|
||||
|
||||
. "github.com/onsi/ginkgo"
|
||||
@@ -50,6 +53,7 @@ import (
|
||||
"github.com/oam-dev/kubevela/apis/core.oam.dev/v1beta1"
|
||||
stdv1alpha1 "github.com/oam-dev/kubevela/apis/standard.oam.dev/v1alpha1"
|
||||
velatypes "github.com/oam-dev/kubevela/apis/types"
|
||||
"github.com/oam-dev/kubevela/pkg/features"
|
||||
"github.com/oam-dev/kubevela/pkg/oam"
|
||||
"github.com/oam-dev/kubevela/pkg/oam/testutil"
|
||||
"github.com/oam-dev/kubevela/pkg/oam/util"
|
||||
@@ -424,6 +428,31 @@ var _ = Describe("Test Application Controller", func() {
|
||||
},
|
||||
}
|
||||
|
||||
appWithAffinity := &v1beta1.Application{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
Kind: "Application",
|
||||
APIVersion: "core.oam.dev/v1beta1",
|
||||
},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "app-with-affinity",
|
||||
},
|
||||
Spec: v1beta1.ApplicationSpec{
|
||||
Components: []common.ApplicationComponent{
|
||||
{
|
||||
Name: "myweb",
|
||||
Type: "worker",
|
||||
Properties: &runtime.RawExtension{Raw: []byte("{\"cmd\":[\"sleep\",\"1000\"],\"image\":\"busybox\"}")},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
appWithAffinity.Spec.Components[0].Traits = []common.ApplicationTrait{
|
||||
{
|
||||
Type: "affinity",
|
||||
Properties: &runtime.RawExtension{Raw: []byte("{\"podAffinity\":{\"preferred\":[{\"podAffinityTerm\":{\"labelSelector\":{\"matchExpressions\":[{\"key\": \"security\",\"values\": [\"S1\"]}]},\"namespaces\":[\"default\"],\"topologyKey\": \"kubernetes.io/hostname\"},\"weight\": 1}]}}")},
|
||||
},
|
||||
}
|
||||
|
||||
cd := &v1beta1.ComponentDefinition{}
|
||||
cDDefJson, _ := yaml.YAMLToJSON([]byte(componentDefYaml))
|
||||
k8sObjectsCDJson, _ := yaml.YAMLToJSON([]byte(k8sObjectsComponentDefinitionYaml))
|
||||
@@ -443,6 +472,8 @@ var _ = Describe("Test Application Controller", func() {
|
||||
|
||||
importHubCpuScaler := &v1beta1.TraitDefinition{}
|
||||
|
||||
importPodAffinity := &v1beta1.TraitDefinition{}
|
||||
|
||||
webserverwd := &v1alpha2.ComponentDefinition{}
|
||||
webserverwdJson, _ := yaml.YAMLToJSON([]byte(webComponentDefYaml))
|
||||
|
||||
@@ -494,6 +525,11 @@ var _ = Describe("Test Application Controller", func() {
|
||||
Expect(json.Unmarshal(hubCpuScalerJson, importHubCpuScaler)).Should(BeNil())
|
||||
Expect(k8sClient.Create(ctx, importHubCpuScaler.DeepCopy())).Should(SatisfyAny(BeNil(), &util.AlreadyExistMatcher{}))
|
||||
|
||||
affinityJson, podAffinityErr := yaml.YAMLToJSON([]byte(affinityYaml))
|
||||
Expect(podAffinityErr).ShouldNot(HaveOccurred())
|
||||
Expect(json.Unmarshal(affinityJson, importPodAffinity)).Should(BeNil())
|
||||
Expect(k8sClient.Create(ctx, importPodAffinity.DeepCopy())).Should(SatisfyAny(BeNil(), &util.AlreadyExistMatcher{}))
|
||||
|
||||
Expect(json.Unmarshal(tDDefJson, td)).Should(BeNil())
|
||||
Expect(k8sClient.Create(ctx, td.DeepCopy())).Should(SatisfyAny(BeNil(), &util.AlreadyExistMatcher{}))
|
||||
|
||||
@@ -1864,6 +1900,7 @@ var _ = Describe("Test Application Controller", func() {
|
||||
})
|
||||
|
||||
It("application with dag workflow failed after retries", func() {
|
||||
defer featuregatetesting.SetFeatureGateDuringTest(&testing.T{}, utilfeature.DefaultFeatureGate, features.EnableSuspendOnFailure, true)()
|
||||
ns := corev1.Namespace{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "dag-failed-after-retries",
|
||||
@@ -1906,7 +1943,7 @@ var _ = Describe("Test Application Controller", func() {
|
||||
Expect(checkApp.Status.Phase).Should(BeEquivalentTo(common.ApplicationRunningWorkflow))
|
||||
Expect(checkApp.Status.Workflow.Message).Should(BeEquivalentTo(workflow.MessageInitializingWorkflow))
|
||||
|
||||
By("verify the first twenty reconciles")
|
||||
By("verify the first ten reconciles")
|
||||
for i := 0; i < custom.MaxWorkflowStepErrorRetryTimes; i++ {
|
||||
testutil.ReconcileOnce(reconciler, reconcile.Request{NamespacedName: appKey})
|
||||
Expect(k8sClient.Get(ctx, appKey, checkApp)).Should(BeNil())
|
||||
@@ -1915,12 +1952,13 @@ var _ = Describe("Test Application Controller", func() {
|
||||
Expect(checkApp.Status.Workflow.Steps[1].Phase).Should(BeEquivalentTo(common.WorkflowStepPhaseFailed))
|
||||
}
|
||||
|
||||
By("application should be suspended after failed twenty reconciles")
|
||||
By("application should be suspended after failed max reconciles")
|
||||
testutil.ReconcileOnce(reconciler, reconcile.Request{NamespacedName: appKey})
|
||||
Expect(k8sClient.Get(ctx, appKey, checkApp)).Should(BeNil())
|
||||
Expect(checkApp.Status.Phase).Should(BeEquivalentTo(common.ApplicationWorkflowSuspending))
|
||||
Expect(checkApp.Status.Workflow.Message).Should(BeEquivalentTo(workflow.MessageFailedAfterRetries))
|
||||
Expect(checkApp.Status.Workflow.Message).Should(BeEquivalentTo(workflow.MessageSuspendFailedAfterRetries))
|
||||
Expect(checkApp.Status.Workflow.Steps[1].Phase).Should(BeEquivalentTo(common.WorkflowStepPhaseFailed))
|
||||
Expect(checkApp.Status.Workflow.Steps[1].Reason).Should(BeEquivalentTo(custom.StatusReasonFailedAfterRetries))
|
||||
|
||||
By("resume the suspended application")
|
||||
Expect(k8sClient.Get(ctx, appKey, checkApp)).Should(BeNil())
|
||||
@@ -1952,7 +1990,7 @@ var _ = Describe("Test Application Controller", func() {
|
||||
Expect(k8sClient.Get(ctx, appKey, checkApp)).Should(BeNil())
|
||||
Expect(checkApp.Status.Phase).Should(BeEquivalentTo(common.ApplicationRunningWorkflow))
|
||||
|
||||
for i := 0; i < custom.MaxWorkflowStepErrorRetryTimes+1; i++ {
|
||||
for i := 0; i < custom.MaxWorkflowStepErrorRetryTimes-1; i++ {
|
||||
testutil.ReconcileOnce(reconciler, reconcile.Request{NamespacedName: appKey})
|
||||
Expect(k8sClient.Get(ctx, appKey, checkApp)).Should(BeNil())
|
||||
Expect(checkApp.Status.Phase).Should(BeEquivalentTo(common.ApplicationRunningWorkflow))
|
||||
@@ -1960,9 +1998,18 @@ var _ = Describe("Test Application Controller", func() {
|
||||
Expect(checkApp.Status.Workflow.Steps[0].Phase).Should(BeEquivalentTo(common.WorkflowStepPhaseRunning))
|
||||
Expect(checkApp.Status.Workflow.Steps[1].Phase).Should(BeEquivalentTo(common.WorkflowStepPhaseFailed))
|
||||
}
|
||||
|
||||
testutil.ReconcileOnce(reconciler, reconcile.Request{NamespacedName: appKey})
|
||||
Expect(k8sClient.Get(ctx, appKey, checkApp)).Should(BeNil())
|
||||
Expect(checkApp.Status.Phase).Should(BeEquivalentTo(common.ApplicationRunningWorkflow))
|
||||
Expect(checkApp.Status.Workflow.Message).Should(BeEquivalentTo(string(common.WorkflowStateExecuting)))
|
||||
Expect(checkApp.Status.Workflow.Steps[0].Phase).Should(BeEquivalentTo(common.WorkflowStepPhaseRunning))
|
||||
Expect(checkApp.Status.Workflow.Steps[1].Phase).Should(BeEquivalentTo(common.WorkflowStepPhaseFailed))
|
||||
Expect(checkApp.Status.Workflow.Steps[1].Reason).Should(BeEquivalentTo(custom.StatusReasonFailedAfterRetries))
|
||||
})
|
||||
|
||||
It("application with step by step workflow failed after retries", func() {
|
||||
defer featuregatetesting.SetFeatureGateDuringTest(&testing.T{}, utilfeature.DefaultFeatureGate, features.EnableSuspendOnFailure, true)()
|
||||
ns := corev1.Namespace{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "step-by-step-failed-after-retries",
|
||||
@@ -2028,12 +2075,13 @@ var _ = Describe("Test Application Controller", func() {
|
||||
Expect(checkApp.Status.Workflow.Steps[1].Phase).Should(BeEquivalentTo(common.WorkflowStepPhaseFailed))
|
||||
}
|
||||
|
||||
By("application should be suspended after failed twenty reconciles")
|
||||
By("application should be suspended after failed max reconciles")
|
||||
testutil.ReconcileOnce(reconciler, reconcile.Request{NamespacedName: appKey})
|
||||
Expect(k8sClient.Get(ctx, appKey, checkApp)).Should(BeNil())
|
||||
Expect(checkApp.Status.Phase).Should(BeEquivalentTo(common.ApplicationWorkflowSuspending))
|
||||
Expect(checkApp.Status.Workflow.Message).Should(BeEquivalentTo(workflow.MessageFailedAfterRetries))
|
||||
Expect(checkApp.Status.Workflow.Message).Should(BeEquivalentTo(workflow.MessageSuspendFailedAfterRetries))
|
||||
Expect(checkApp.Status.Workflow.Steps[1].Phase).Should(BeEquivalentTo(common.WorkflowStepPhaseFailed))
|
||||
Expect(checkApp.Status.Workflow.Steps[1].Reason).Should(BeEquivalentTo(custom.StatusReasonFailedAfterRetries))
|
||||
|
||||
By("resume the suspended application")
|
||||
Expect(k8sClient.Get(ctx, appKey, checkApp)).Should(BeNil())
|
||||
@@ -2046,6 +2094,352 @@ var _ = Describe("Test Application Controller", func() {
|
||||
Expect(checkApp.Status.Workflow.Steps[1].Phase).Should(BeEquivalentTo(common.WorkflowStepPhaseFailed))
|
||||
})
|
||||
|
||||
It("application with sub steps", func() {
|
||||
ns := corev1.Namespace{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "app-with-sub-steps",
|
||||
},
|
||||
}
|
||||
Expect(k8sClient.Create(ctx, &ns)).Should(BeNil())
|
||||
healthComponentDef := &v1beta1.ComponentDefinition{}
|
||||
hCDefJson, _ := yaml.YAMLToJSON([]byte(cdDefWithHealthStatusYaml))
|
||||
Expect(json.Unmarshal(hCDefJson, healthComponentDef)).Should(BeNil())
|
||||
healthComponentDef.Name = "worker-with-health"
|
||||
healthComponentDef.Namespace = "app-with-sub-steps"
|
||||
Expect(k8sClient.Create(ctx, healthComponentDef)).Should(BeNil())
|
||||
app := &v1beta1.Application{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
Kind: "Application",
|
||||
APIVersion: "core.oam.dev/v1beta1",
|
||||
},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "app-with-sub-steps",
|
||||
Namespace: "app-with-sub-steps",
|
||||
},
|
||||
Spec: v1beta1.ApplicationSpec{
|
||||
Components: []common.ApplicationComponent{
|
||||
{
|
||||
Name: "myweb1",
|
||||
Type: "worker-with-health",
|
||||
Properties: &runtime.RawExtension{Raw: []byte(`{"cmd":["sleep","1000"],"image":"busybox","lives": "i am lives","enemies": "empty"}`)},
|
||||
},
|
||||
{
|
||||
Name: "myweb2",
|
||||
Type: "worker",
|
||||
Properties: &runtime.RawExtension{Raw: []byte(`{"cmd":["sleep","1000"],"image":"busybox"}`)},
|
||||
},
|
||||
{
|
||||
Name: "myweb3",
|
||||
Type: "worker-with-health",
|
||||
Properties: &runtime.RawExtension{Raw: []byte(`{"cmd":["sleep","1000"],"image":"busybox","lives": "i am lives","enemies": "empty"}`)},
|
||||
},
|
||||
},
|
||||
Workflow: &v1beta1.Workflow{
|
||||
Steps: []v1beta1.WorkflowStep{
|
||||
{
|
||||
Name: "myweb1",
|
||||
Type: "apply-component",
|
||||
Properties: &runtime.RawExtension{Raw: []byte(`{"component":"myweb1"}`)},
|
||||
},
|
||||
{
|
||||
Name: "myweb2",
|
||||
Type: "step-group",
|
||||
SubSteps: []common.WorkflowSubStep{
|
||||
{
|
||||
Name: "myweb2-sub1",
|
||||
Type: "apply-component",
|
||||
DependsOn: []string{"myweb2-sub2"},
|
||||
Properties: &runtime.RawExtension{Raw: []byte(`{"component":"myweb2"}`)},
|
||||
},
|
||||
{
|
||||
Name: "myweb2-sub2",
|
||||
Type: "apply-component",
|
||||
Properties: &runtime.RawExtension{Raw: []byte(`{"component":"myweb3"}`)},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
Expect(k8sClient.Create(context.Background(), app)).Should(BeNil())
|
||||
appKey := types.NamespacedName{Namespace: ns.Name, Name: app.Name}
|
||||
testutil.ReconcileOnceAfterFinalizer(reconciler, reconcile.Request{NamespacedName: appKey})
|
||||
|
||||
expDeployment := &v1.Deployment{}
|
||||
web2Key := types.NamespacedName{Namespace: ns.Name, Name: "myweb2"}
|
||||
Expect(k8sClient.Get(ctx, web2Key, expDeployment)).Should(util.NotFoundMatcher{})
|
||||
web3Key := types.NamespacedName{Namespace: ns.Name, Name: "myweb3"}
|
||||
Expect(k8sClient.Get(ctx, web3Key, expDeployment)).Should(util.NotFoundMatcher{})
|
||||
|
||||
checkApp := &v1beta1.Application{}
|
||||
Expect(k8sClient.Get(ctx, appKey, checkApp)).Should(BeNil())
|
||||
web1Key := types.NamespacedName{Namespace: ns.Name, Name: "myweb1"}
|
||||
Expect(k8sClient.Get(ctx, web1Key, expDeployment)).Should(BeNil())
|
||||
|
||||
expDeployment.Status.Replicas = 1
|
||||
expDeployment.Status.ReadyReplicas = 1
|
||||
Expect(k8sClient.Status().Update(ctx, expDeployment)).Should(BeNil())
|
||||
|
||||
testutil.ReconcileOnce(reconciler, reconcile.Request{NamespacedName: appKey})
|
||||
testutil.ReconcileOnce(reconciler, reconcile.Request{NamespacedName: appKey})
|
||||
|
||||
Expect(k8sClient.Get(ctx, web2Key, expDeployment)).Should(util.NotFoundMatcher{})
|
||||
Expect(k8sClient.Get(ctx, web3Key, expDeployment)).Should(BeNil())
|
||||
expDeployment.Status.Replicas = 1
|
||||
expDeployment.Status.ReadyReplicas = 1
|
||||
Expect(k8sClient.Status().Update(ctx, expDeployment)).Should(BeNil())
|
||||
|
||||
testutil.ReconcileOnce(reconciler, reconcile.Request{NamespacedName: appKey})
|
||||
testutil.ReconcileOnce(reconciler, reconcile.Request{NamespacedName: appKey})
|
||||
|
||||
Expect(k8sClient.Get(ctx, web2Key, expDeployment)).Should(BeNil())
|
||||
expDeployment.Status.Replicas = 1
|
||||
expDeployment.Status.ReadyReplicas = 1
|
||||
Expect(k8sClient.Status().Update(ctx, expDeployment)).Should(BeNil())
|
||||
|
||||
testutil.ReconcileOnce(reconciler, reconcile.Request{NamespacedName: appKey})
|
||||
testutil.ReconcileOnce(reconciler, reconcile.Request{NamespacedName: appKey})
|
||||
testutil.ReconcileOnce(reconciler, reconcile.Request{NamespacedName: appKey})
|
||||
checkApp = &v1beta1.Application{}
|
||||
Expect(k8sClient.Get(ctx, appKey, checkApp)).Should(BeNil())
|
||||
Expect(checkApp.Status.Phase).Should(BeEquivalentTo(common.ApplicationRunning))
|
||||
})
|
||||
|
||||
It("application with if always in workflow", func() {
|
||||
ns := corev1.Namespace{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "app-with-if-always-workflow",
|
||||
},
|
||||
}
|
||||
Expect(k8sClient.Create(ctx, &ns)).Should(BeNil())
|
||||
healthComponentDef := &v1beta1.ComponentDefinition{}
|
||||
hCDefJson, _ := yaml.YAMLToJSON([]byte(cdDefWithHealthStatusYaml))
|
||||
Expect(json.Unmarshal(hCDefJson, healthComponentDef)).Should(BeNil())
|
||||
healthComponentDef.Name = "worker-with-health"
|
||||
healthComponentDef.Namespace = "app-with-if-always-workflow"
|
||||
Expect(k8sClient.Create(ctx, healthComponentDef)).Should(BeNil())
|
||||
app := &v1beta1.Application{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
Kind: "Application",
|
||||
APIVersion: "core.oam.dev/v1beta1",
|
||||
},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "app-with-if-always-workflow",
|
||||
Namespace: "app-with-if-always-workflow",
|
||||
},
|
||||
Spec: v1beta1.ApplicationSpec{
|
||||
Components: []common.ApplicationComponent{
|
||||
{
|
||||
Name: "myweb1",
|
||||
Type: "worker-with-health",
|
||||
Properties: &runtime.RawExtension{Raw: []byte(`{"cmd":["sleep","1000"],"image":"busybox","lives": "i am lives","enemies": "empty"}`)},
|
||||
},
|
||||
{
|
||||
Name: "myweb2",
|
||||
Type: "worker",
|
||||
Properties: &runtime.RawExtension{Raw: []byte(`{"cmd":["sleep","1000"],"image":"busybox"}`)},
|
||||
},
|
||||
{
|
||||
Name: "failed-step",
|
||||
Type: "k8s-objects",
|
||||
Properties: &runtime.RawExtension{Raw: []byte(`{"objects":[{"apiVersion":"v1","kind":"invalid","metadata":{"name":"test1"}}]}`)},
|
||||
},
|
||||
},
|
||||
Workflow: &v1beta1.Workflow{
|
||||
Steps: []v1beta1.WorkflowStep{
|
||||
{
|
||||
Name: "failed-step",
|
||||
Type: "apply-component",
|
||||
Properties: &runtime.RawExtension{Raw: []byte(`{"component":"failed-step"}`)},
|
||||
},
|
||||
{
|
||||
Name: "myweb1",
|
||||
Type: "apply-component",
|
||||
If: "always",
|
||||
Properties: &runtime.RawExtension{Raw: []byte(`{"component":"myweb1"}`)},
|
||||
},
|
||||
{
|
||||
Name: "myweb2",
|
||||
Type: "apply-component",
|
||||
Properties: &runtime.RawExtension{Raw: []byte(`{"component":"myweb2"}`)},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
Expect(k8sClient.Create(context.Background(), app)).Should(BeNil())
|
||||
appKey := types.NamespacedName{Namespace: ns.Name, Name: app.Name}
|
||||
testutil.ReconcileOnce(reconciler, reconcile.Request{NamespacedName: appKey})
|
||||
testutil.ReconcileOnce(reconciler, reconcile.Request{NamespacedName: appKey})
|
||||
|
||||
By("verify the first ten reconciles")
|
||||
for i := 0; i < custom.MaxWorkflowStepErrorRetryTimes; i++ {
|
||||
testutil.ReconcileOnce(reconciler, reconcile.Request{NamespacedName: appKey})
|
||||
}
|
||||
|
||||
expDeployment := &v1.Deployment{}
|
||||
web1Key := types.NamespacedName{Namespace: ns.Name, Name: "myweb1"}
|
||||
Expect(k8sClient.Get(ctx, web1Key, expDeployment)).Should(util.NotFoundMatcher{})
|
||||
web2Key := types.NamespacedName{Namespace: ns.Name, Name: "myweb2"}
|
||||
Expect(k8sClient.Get(ctx, web2Key, expDeployment)).Should(util.NotFoundMatcher{})
|
||||
|
||||
testutil.ReconcileOnce(reconciler, reconcile.Request{NamespacedName: appKey})
|
||||
testutil.ReconcileOnce(reconciler, reconcile.Request{NamespacedName: appKey})
|
||||
|
||||
Expect(k8sClient.Get(ctx, web1Key, expDeployment)).Should(BeNil())
|
||||
Expect(k8sClient.Get(ctx, web2Key, expDeployment)).Should(util.NotFoundMatcher{})
|
||||
|
||||
expDeployment.Status.Replicas = 1
|
||||
expDeployment.Status.ReadyReplicas = 1
|
||||
Expect(k8sClient.Status().Update(ctx, expDeployment)).Should(BeNil())
|
||||
|
||||
testutil.ReconcileOnce(reconciler, reconcile.Request{NamespacedName: appKey})
|
||||
testutil.ReconcileOnce(reconciler, reconcile.Request{NamespacedName: appKey})
|
||||
|
||||
Expect(k8sClient.Get(ctx, web2Key, expDeployment)).Should(util.NotFoundMatcher{})
|
||||
|
||||
checkApp := &v1beta1.Application{}
|
||||
Expect(k8sClient.Get(ctx, appKey, checkApp)).Should(BeNil())
|
||||
Expect(checkApp.Status.Phase).Should(BeEquivalentTo(common.ApplicationWorkflowTerminated))
|
||||
})
|
||||
|
||||
It("application with if always in workflow sub steps", func() {
|
||||
ns := corev1.Namespace{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "app-with-if-always-workflow-sub-steps",
|
||||
},
|
||||
}
|
||||
Expect(k8sClient.Create(ctx, &ns)).Should(BeNil())
|
||||
healthComponentDef := &v1beta1.ComponentDefinition{}
|
||||
hCDefJson, _ := yaml.YAMLToJSON([]byte(cdDefWithHealthStatusYaml))
|
||||
Expect(json.Unmarshal(hCDefJson, healthComponentDef)).Should(BeNil())
|
||||
healthComponentDef.Name = "worker-with-health"
|
||||
healthComponentDef.Namespace = "app-with-if-always-workflow-sub-steps"
|
||||
Expect(k8sClient.Create(ctx, healthComponentDef)).Should(BeNil())
|
||||
app := &v1beta1.Application{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
Kind: "Application",
|
||||
APIVersion: "core.oam.dev/v1beta1",
|
||||
},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "app-with-if-always-workflow-sub-steps",
|
||||
Namespace: "app-with-if-always-workflow-sub-steps",
|
||||
},
|
||||
Spec: v1beta1.ApplicationSpec{
|
||||
Components: []common.ApplicationComponent{
|
||||
{
|
||||
Name: "myweb1",
|
||||
Type: "worker-with-health",
|
||||
Properties: &runtime.RawExtension{Raw: []byte(`{"cmd":["sleep","1000"],"image":"busybox","lives": "i am lives","enemies": "empty"}`)},
|
||||
},
|
||||
{
|
||||
Name: "myweb2",
|
||||
Type: "worker-with-health",
|
||||
Properties: &runtime.RawExtension{Raw: []byte(`{"cmd":["sleep","1000"],"image":"busybox","lives": "i am lives","enemies": "empty"}`)},
|
||||
},
|
||||
{
|
||||
Name: "myweb3",
|
||||
Type: "worker",
|
||||
Properties: &runtime.RawExtension{Raw: []byte(`{"cmd":["sleep","1000"],"image":"busybox"}`)},
|
||||
},
|
||||
{
|
||||
Name: "failed-step",
|
||||
Type: "k8s-objects",
|
||||
Properties: &runtime.RawExtension{Raw: []byte(`{"objects":[{"apiVersion":"v1","kind":"invalid","metadata":{"name":"test1"}}]}`)},
|
||||
},
|
||||
},
|
||||
Workflow: &v1beta1.Workflow{
|
||||
Steps: []v1beta1.WorkflowStep{
|
||||
{
|
||||
Name: "myweb1",
|
||||
Type: "step-group",
|
||||
SubSteps: []common.WorkflowSubStep{
|
||||
{
|
||||
Name: "myweb1-sub1",
|
||||
Type: "apply-component",
|
||||
If: "always",
|
||||
DependsOn: []string{"myweb1-sub2"},
|
||||
Properties: &runtime.RawExtension{Raw: []byte(`{"component":"myweb1"}`)},
|
||||
},
|
||||
{
|
||||
Name: "myweb1-sub2",
|
||||
Type: "apply-component",
|
||||
Properties: &runtime.RawExtension{Raw: []byte(`{"component":"failed-step"}`)},
|
||||
},
|
||||
{
|
||||
Name: "myweb1-sub3",
|
||||
Type: "apply-component",
|
||||
DependsOn: []string{"myweb1-sub1"},
|
||||
Properties: &runtime.RawExtension{Raw: []byte(`{"component":"myweb2"}`)},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "myweb2",
|
||||
Type: "apply-component",
|
||||
If: "always",
|
||||
Properties: &runtime.RawExtension{Raw: []byte(`{"component":"myweb2"}`)},
|
||||
},
|
||||
{
|
||||
Name: "myweb3",
|
||||
Type: "apply-component",
|
||||
Properties: &runtime.RawExtension{Raw: []byte(`{"component":"myweb3"}`)},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
Expect(k8sClient.Create(context.Background(), app)).Should(BeNil())
|
||||
appKey := types.NamespacedName{Namespace: ns.Name, Name: app.Name}
|
||||
testutil.ReconcileOnce(reconciler, reconcile.Request{NamespacedName: appKey})
|
||||
testutil.ReconcileOnce(reconciler, reconcile.Request{NamespacedName: appKey})
|
||||
|
||||
By("verify the first ten reconciles")
|
||||
for i := 0; i < custom.MaxWorkflowStepErrorRetryTimes; i++ {
|
||||
testutil.ReconcileOnce(reconciler, reconcile.Request{NamespacedName: appKey})
|
||||
}
|
||||
|
||||
expDeployment := &v1.Deployment{}
|
||||
web1Key := types.NamespacedName{Namespace: ns.Name, Name: "myweb1"}
|
||||
Expect(k8sClient.Get(ctx, web1Key, expDeployment)).Should(util.NotFoundMatcher{})
|
||||
web2Key := types.NamespacedName{Namespace: ns.Name, Name: "myweb2"}
|
||||
Expect(k8sClient.Get(ctx, web2Key, expDeployment)).Should(util.NotFoundMatcher{})
|
||||
web3Key := types.NamespacedName{Namespace: ns.Name, Name: "myweb3"}
|
||||
Expect(k8sClient.Get(ctx, web3Key, expDeployment)).Should(util.NotFoundMatcher{})
|
||||
|
||||
testutil.ReconcileOnce(reconciler, reconcile.Request{NamespacedName: appKey})
|
||||
testutil.ReconcileOnce(reconciler, reconcile.Request{NamespacedName: appKey})
|
||||
|
||||
Expect(k8sClient.Get(ctx, web1Key, expDeployment)).Should(BeNil())
|
||||
Expect(k8sClient.Get(ctx, web2Key, expDeployment)).Should(util.NotFoundMatcher{})
|
||||
Expect(k8sClient.Get(ctx, web3Key, expDeployment)).Should(util.NotFoundMatcher{})
|
||||
|
||||
expDeployment.Status.Replicas = 1
|
||||
expDeployment.Status.ReadyReplicas = 1
|
||||
Expect(k8sClient.Status().Update(ctx, expDeployment)).Should(BeNil())
|
||||
|
||||
testutil.ReconcileOnce(reconciler, reconcile.Request{NamespacedName: appKey})
|
||||
testutil.ReconcileOnce(reconciler, reconcile.Request{NamespacedName: appKey})
|
||||
testutil.ReconcileOnce(reconciler, reconcile.Request{NamespacedName: appKey})
|
||||
|
||||
Expect(k8sClient.Get(ctx, web2Key, expDeployment)).Should(BeNil())
|
||||
Expect(k8sClient.Get(ctx, web3Key, expDeployment)).Should(util.NotFoundMatcher{})
|
||||
expDeployment.Status.Replicas = 1
|
||||
expDeployment.Status.ReadyReplicas = 1
|
||||
Expect(k8sClient.Status().Update(ctx, expDeployment)).Should(BeNil())
|
||||
|
||||
testutil.ReconcileOnce(reconciler, reconcile.Request{NamespacedName: appKey})
|
||||
testutil.ReconcileOnce(reconciler, reconcile.Request{NamespacedName: appKey})
|
||||
|
||||
checkApp := &v1beta1.Application{}
|
||||
Expect(k8sClient.Get(ctx, appKey, checkApp)).Should(BeNil())
|
||||
Expect(checkApp.Status.Phase).Should(BeEquivalentTo(common.ApplicationWorkflowTerminated))
|
||||
})
|
||||
|
||||
It("application with input/output run as dag workflow", func() {
|
||||
ns := corev1.Namespace{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
@@ -3045,6 +3439,50 @@ var _ = Describe("Test Application Controller", func() {
|
||||
|
||||
Expect(k8sClient.Delete(ctx, app)).Should(BeNil())
|
||||
})
|
||||
|
||||
It("test application with pod affinity will create application", func() {
|
||||
|
||||
ns := &corev1.Namespace{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "app-with-affinity",
|
||||
},
|
||||
}
|
||||
Expect(k8sClient.Create(ctx, ns)).Should(BeNil())
|
||||
|
||||
appWithAffinity.SetNamespace(ns.Name)
|
||||
app := appWithAffinity.DeepCopy()
|
||||
Expect(k8sClient.Create(ctx, app)).Should(BeNil())
|
||||
|
||||
appKey := client.ObjectKey{
|
||||
Name: app.Name,
|
||||
Namespace: app.Namespace,
|
||||
}
|
||||
testutil.ReconcileOnceAfterFinalizer(reconciler, reconcile.Request{NamespacedName: appKey})
|
||||
|
||||
By("Check App running successfully")
|
||||
curApp := &v1beta1.Application{}
|
||||
Expect(k8sClient.Get(ctx, appKey, curApp)).Should(BeNil())
|
||||
Expect(curApp.Status.Phase).Should(Equal(common.ApplicationRunning))
|
||||
|
||||
appRevision := &v1beta1.ApplicationRevision{}
|
||||
Expect(k8sClient.Get(ctx, client.ObjectKey{
|
||||
Namespace: app.Namespace,
|
||||
Name: curApp.Status.LatestRevision.Name,
|
||||
}, appRevision)).Should(BeNil())
|
||||
By("Check affiliated resource tracker is created")
|
||||
expectRTName := fmt.Sprintf("%s-%s", appRevision.GetName(), appRevision.GetNamespace())
|
||||
Eventually(func() error {
|
||||
return k8sClient.Get(ctx, client.ObjectKey{Name: expectRTName}, &v1beta1.ResourceTracker{})
|
||||
}, 10*time.Second, 500*time.Millisecond).Should(Succeed())
|
||||
|
||||
By("Check AppRevision Created with the expected workload spec")
|
||||
appRev := &v1beta1.ApplicationRevision{}
|
||||
Eventually(func() error {
|
||||
return k8sClient.Get(ctx, client.ObjectKey{Name: app.Name + "-v1", Namespace: app.GetNamespace()}, appRev)
|
||||
}, 10*time.Second, 500*time.Millisecond).Should(Succeed())
|
||||
|
||||
Expect(k8sClient.Delete(ctx, app)).Should(BeNil())
|
||||
})
|
||||
})
|
||||
|
||||
const (
|
||||
@@ -4555,6 +4993,190 @@ spec:
|
||||
// +usage=Specify the kind of scale target
|
||||
targetKind: *"Deployment" | string
|
||||
}
|
||||
`
|
||||
affinityYaml = `apiVersion: core.oam.dev/v1beta1
|
||||
kind: TraitDefinition
|
||||
metadata:
|
||||
annotations:
|
||||
definition.oam.dev/description: affinity specify affinity and tolerationon K8s pod for your workload which follows the pod spec in path 'spec.template'.
|
||||
labels:
|
||||
custom.definition.oam.dev/ui-hidden: "true"
|
||||
name: affinity
|
||||
namespace: vela-system
|
||||
spec:
|
||||
appliesToWorkloads:
|
||||
- '*'
|
||||
podDisruptive: true
|
||||
schematic:
|
||||
cue:
|
||||
template: |
|
||||
patch: spec: template: spec: {
|
||||
if parameter.podAffinity != _|_ {
|
||||
affinity: podAffinity: {
|
||||
if parameter.podAffinity.required != _|_ {
|
||||
requiredDuringSchedulingIgnoredDuringExecution: [
|
||||
for k in parameter.podAffinity.required {
|
||||
if k.labelSelector != _|_ {
|
||||
labelSelector: k.labelSelector
|
||||
}
|
||||
if k.namespace != _|_ {
|
||||
namespace: k.namespace
|
||||
}
|
||||
topologyKey: k.topologyKey
|
||||
if k.namespaceSelector != _|_ {
|
||||
namespaceSelector: k.namespaceSelector
|
||||
}
|
||||
}]
|
||||
}
|
||||
if parameter.podAffinity.preferred != _|_ {
|
||||
preferredDuringSchedulingIgnoredDuringExecution: [
|
||||
for k in parameter.podAffinity.preferred {
|
||||
weight: k.weight
|
||||
podAffinityTerm: k.podAffinityTerm
|
||||
}]
|
||||
}
|
||||
}
|
||||
}
|
||||
if parameter.podAntiAffinity != _|_ {
|
||||
affinity: podAntiAffinity: {
|
||||
if parameter.podAntiAffinity.required != _|_ {
|
||||
requiredDuringSchedulingIgnoredDuringExecution: [
|
||||
for k in parameter.podAntiAffinity.required {
|
||||
if k.labelSelector != _|_ {
|
||||
labelSelector: k.labelSelector
|
||||
}
|
||||
if k.namespace != _|_ {
|
||||
namespace: k.namespace
|
||||
}
|
||||
topologyKey: k.topologyKey
|
||||
if k.namespaceSelector != _|_ {
|
||||
namespaceSelector: k.namespaceSelector
|
||||
}
|
||||
}]
|
||||
}
|
||||
if parameter.podAntiAffinity.preferred != _|_ {
|
||||
preferredDuringSchedulingIgnoredDuringExecution: [
|
||||
for k in parameter.podAntiAffinity.preferred {
|
||||
weight: k.weight
|
||||
podAffinityTerm: k.podAffinityTerm
|
||||
}]
|
||||
}
|
||||
}
|
||||
}
|
||||
if parameter.nodeAffinity != _|_ {
|
||||
affinity: nodeAffinity: {
|
||||
if parameter.nodeAffinity.required != _|_ {
|
||||
requiredDuringSchedulingIgnoredDuringExecution: nodeSelectorTerms: [
|
||||
for k in parameter.nodeAffinity.required.nodeSelectorTerms {
|
||||
if k.matchExpressions != _|_ {
|
||||
matchExpressions: k.matchExpressions
|
||||
}
|
||||
if k.matchFields != _|_ {
|
||||
matchFields: k.matchFields
|
||||
}
|
||||
}]
|
||||
}
|
||||
if parameter.nodeAffinity.preferred != _|_ {
|
||||
preferredDuringSchedulingIgnoredDuringExecution: [
|
||||
for k in parameter.nodeAffinity.preferred {
|
||||
weight: k.weight
|
||||
preference: k.preference
|
||||
}]
|
||||
}
|
||||
}
|
||||
}
|
||||
if parameter.tolerations != _|_ {
|
||||
tolerations: [
|
||||
for k in parameter.tolerations {
|
||||
if k.key != _|_ {
|
||||
key: k.key
|
||||
}
|
||||
if k.effect != _|_ {
|
||||
effect: k.effect
|
||||
}
|
||||
if k.value != _|_ {
|
||||
value: k.value
|
||||
}
|
||||
operator: k.operator
|
||||
if k.tolerationSeconds != _|_ {
|
||||
tolerationSeconds: k.tolerationSeconds
|
||||
}
|
||||
}]
|
||||
}
|
||||
}
|
||||
#labelSelector: {
|
||||
matchLabels?: [string]: string
|
||||
matchExpressions?: [...{
|
||||
key: string
|
||||
operator: *"In" | "NotIn" | "Exists" | "DoesNotExist"
|
||||
values?: [...string]
|
||||
}]
|
||||
}
|
||||
#podAffinityTerm: {
|
||||
labelSelector?: #labelSelector
|
||||
namespaces?: [...string]
|
||||
topologyKey: string
|
||||
namespaceSelector?: #labelSelector
|
||||
}
|
||||
#nodeSelecor: {
|
||||
key: string
|
||||
operator: *"In" | "NotIn" | "Exists" | "DoesNotExist" | "Gt" | "Lt"
|
||||
values?: [...string]
|
||||
}
|
||||
#nodeSelectorTerm: {
|
||||
matchExpressions?: [...#nodeSelecor]
|
||||
matchFields?: [...#nodeSelecor]
|
||||
}
|
||||
parameter: {
|
||||
// +usage=Specify the pod affinity scheduling rules
|
||||
podAffinity?: {
|
||||
// +usage=Specify the required during scheduling ignored during execution
|
||||
required?: [...#podAffinityTerm]
|
||||
// +usage=Specify the preferred during scheduling ignored during execution
|
||||
preferred?: [...{
|
||||
// +usage=Specify weight associated with matching the corresponding podAffinityTerm
|
||||
weight: int & >=1 & <=100
|
||||
// +usage=Specify a set of pods
|
||||
podAffinityTerm: #podAffinityTerm
|
||||
}]
|
||||
}
|
||||
// +usage=Specify the pod anti-affinity scheduling rules
|
||||
podAntiAffinity?: {
|
||||
// +usage=Specify the required during scheduling ignored during execution
|
||||
required?: [...#podAffinityTerm]
|
||||
// +usage=Specify the preferred during scheduling ignored during execution
|
||||
preferred?: [...{
|
||||
// +usage=Specify weight associated with matching the corresponding podAffinityTerm
|
||||
weight: int & >=1 & <=100
|
||||
// +usage=Specify a set of pods
|
||||
podAffinityTerm: #podAffinityTerm
|
||||
}]
|
||||
}
|
||||
// +usage=Specify the node affinity scheduling rules for the pod
|
||||
nodeAffinity?: {
|
||||
// +usage=Specify the required during scheduling ignored during execution
|
||||
required?: {
|
||||
// +usage=Specify a list of node selector
|
||||
nodeSelectorTerms: [...#nodeSelectorTerm]
|
||||
}
|
||||
// +usage=Specify the preferred during scheduling ignored during execution
|
||||
preferred?: [...{
|
||||
// +usage=Specify weight associated with matching the corresponding nodeSelector
|
||||
weight: int & >=1 & <=100
|
||||
// +usage=Specify a node selector
|
||||
preference: #nodeSelectorTerm
|
||||
}]
|
||||
}
|
||||
// +usage=Specify tolerant taint
|
||||
tolerations?: [...{
|
||||
key?: string
|
||||
operator: *"Equal" | "Exists"
|
||||
value?: string
|
||||
effect?: "NoSchedule" | "PreferNoSchedule" | "NoExecute"
|
||||
// +usage=Specify the period of time the toleration
|
||||
tolerationSeconds?: int
|
||||
}]
|
||||
}
|
||||
`
|
||||
)
|
||||
|
||||
|
||||
@@ -30,6 +30,7 @@ import (
|
||||
"github.com/oam-dev/kubevela/apis/core.oam.dev/v1beta1"
|
||||
"github.com/oam-dev/kubevela/apis/types"
|
||||
"github.com/oam-dev/kubevela/pkg/appfile"
|
||||
"github.com/oam-dev/kubevela/pkg/auth"
|
||||
"github.com/oam-dev/kubevela/pkg/controller/core.oam.dev/v1alpha2/application/assemble"
|
||||
"github.com/oam-dev/kubevela/pkg/cue/model/value"
|
||||
"github.com/oam-dev/kubevela/pkg/cue/process"
|
||||
@@ -121,6 +122,7 @@ func generateStep(ctx context.Context,
|
||||
DependsOn: subStep.DependsOn,
|
||||
Inputs: subStep.Inputs,
|
||||
Outputs: subStep.Outputs,
|
||||
If: subStep.If,
|
||||
}
|
||||
subTask, err := generateStep(ctx, app, workflowStep, taskDiscover, step.Name)
|
||||
if err != nil {
|
||||
@@ -219,7 +221,7 @@ func (h *AppHandler) checkComponentHealth(appParser *appfile.Parser, appRev *v1b
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
wl.Ctx.SetCtx(ctx)
|
||||
wl.Ctx.SetCtx(auth.ContextWithUserInfo(ctx, h.app))
|
||||
|
||||
readyWorkload, readyTraits, err := renderComponentsAndTraits(h.r.Client, manifest, appRev, clusterName, overrideNamespace, env)
|
||||
if err != nil {
|
||||
@@ -258,7 +260,7 @@ func (h *AppHandler) applyComponentFunc(appParser *appfile.Parser, appRev *v1bet
|
||||
return nil, nil, false, errors.WithMessage(err, "cannot dispatch packaged workload resources")
|
||||
}
|
||||
}
|
||||
wl.Ctx.SetCtx(ctx)
|
||||
wl.Ctx.SetCtx(auth.ContextWithUserInfo(ctx, h.app))
|
||||
|
||||
readyWorkload, readyTraits, err := renderComponentsAndTraits(h.r.Client, manifest, appRev, clusterName, overrideNamespace, env)
|
||||
if err != nil {
|
||||
|
||||
@@ -42,6 +42,7 @@ import (
|
||||
"github.com/oam-dev/kubevela/apis/types"
|
||||
"github.com/oam-dev/kubevela/pkg/appfile"
|
||||
helmapi "github.com/oam-dev/kubevela/pkg/appfile/helm/flux2apis"
|
||||
"github.com/oam-dev/kubevela/pkg/auth"
|
||||
"github.com/oam-dev/kubevela/pkg/component"
|
||||
"github.com/oam-dev/kubevela/pkg/controller/utils"
|
||||
"github.com/oam-dev/kubevela/pkg/cue/model"
|
||||
@@ -542,7 +543,7 @@ func (h *AppHandler) handleComponentRevisionNameSpecified(ctx context.Context, c
|
||||
revisionName := comp.ExternalRevision
|
||||
cr := &appsv1.ControllerRevision{}
|
||||
|
||||
if err := h.r.Client.Get(ctx, client.ObjectKey{Namespace: h.getComponentRevisionNamespace(ctx), Name: revisionName}, cr); err != nil {
|
||||
if err := h.r.Client.Get(auth.ContextWithUserInfo(ctx, h.app), client.ObjectKey{Namespace: h.getComponentRevisionNamespace(ctx), Name: revisionName}, cr); err != nil {
|
||||
if !apierrors.IsNotFound(err) {
|
||||
return errors.Wrapf(err, "failed to get controllerRevision:%s", revisionName)
|
||||
}
|
||||
@@ -592,7 +593,7 @@ func (h *AppHandler) handleComponentRevisionNameUnspecified(ctx context.Context,
|
||||
listOpts := []client.ListOption{client.MatchingLabels{
|
||||
oam.LabelControllerRevisionComponent: comp.Name,
|
||||
}, client.InNamespace(h.getComponentRevisionNamespace(ctx))}
|
||||
if err := h.r.List(ctx, crList, listOpts...); err != nil {
|
||||
if err := h.r.List(auth.ContextWithUserInfo(ctx, h.app), crList, listOpts...); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
|
||||
@@ -16,6 +16,16 @@ limitations under the License.
|
||||
|
||||
package cue
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
|
||||
"cuelang.org/go/pkg/encoding/json"
|
||||
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
|
||||
"github.com/oam-dev/kubevela/pkg/cue/model/value"
|
||||
)
|
||||
|
||||
// int data can evaluate with number in CUE, so it's OK if we convert the original float type data to int
|
||||
func isIntegral(val float64) bool {
|
||||
return val == float64(int(val))
|
||||
@@ -55,3 +65,16 @@ func intifyMap(m map[string]interface{}) interface{} {
|
||||
}
|
||||
return m2
|
||||
}
|
||||
|
||||
// FillUnstructuredObject fill runtime.Unstructured to *value.Value
|
||||
func FillUnstructuredObject(v *value.Value, obj runtime.Unstructured, paths ...string) error {
|
||||
var buf bytes.Buffer
|
||||
if err := unstructured.UnstructuredJSONScheme.Encode(obj, &buf); err != nil {
|
||||
return v.FillObject(err.Error(), "err")
|
||||
}
|
||||
expr, err := json.Unmarshal(buf.Bytes())
|
||||
if err != nil {
|
||||
return v.FillObject(err.Error(), "err")
|
||||
}
|
||||
return v.FillObject(expr, paths...)
|
||||
}
|
||||
|
||||
74
pkg/cue/utils_test.go
Normal file
74
pkg/cue/utils_test.go
Normal file
@@ -0,0 +1,74 @@
|
||||
/*
|
||||
Copyright 2022. The KubeVela Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package cue
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||
|
||||
"github.com/oam-dev/kubevela/pkg/cue/model/value"
|
||||
)
|
||||
|
||||
func TestFillUnstructuredObject(t *testing.T) {
|
||||
testcases := map[string]struct {
|
||||
obj *unstructured.Unstructured
|
||||
json string
|
||||
}{
|
||||
"test unstructured object with nil value": {
|
||||
obj: &unstructured.Unstructured{
|
||||
Object: map[string]interface{}{
|
||||
"apiVersion": "apps/v1",
|
||||
"kind": "Deployment",
|
||||
"spec": map[string]interface{}{
|
||||
"template": map[string]interface{}{
|
||||
"metadata": map[string]interface{}{
|
||||
"creationTimestamp": nil,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
json: `{"object":{"apiVersion":"apps/v1","kind":"Deployment","spec":{"template":{"metadata":{"creationTimestamp":null}}}}}`,
|
||||
},
|
||||
"test unstructured object without nil value": {
|
||||
obj: &unstructured.Unstructured{
|
||||
Object: map[string]interface{}{
|
||||
"apiVersion": "apps/v1",
|
||||
"kind": "Deployment",
|
||||
"metadata": map[string]interface{}{
|
||||
"creationTimestamp": "2022-05-25T12:07:02Z",
|
||||
},
|
||||
},
|
||||
},
|
||||
json: `{"object":{"apiVersion":"apps/v1","kind":"Deployment","metadata":{"creationTimestamp":"2022-05-25T12:07:02Z"}}}`,
|
||||
},
|
||||
}
|
||||
|
||||
for name, testcase := range testcases {
|
||||
t.Run(name, func(t *testing.T) {
|
||||
value, err := value.NewValue("", nil, "")
|
||||
assert.NoError(t, err)
|
||||
err = FillUnstructuredObject(value, testcase.obj, "object")
|
||||
assert.NoError(t, err)
|
||||
json, err := value.CueValue().MarshalJSON()
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, testcase.json, string(json))
|
||||
})
|
||||
}
|
||||
}
|
||||
44
pkg/features/apiserver_features.go
Normal file
44
pkg/features/apiserver_features.go
Normal file
@@ -0,0 +1,44 @@
|
||||
/*
|
||||
Copyright 2022 The KubeVela Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package features
|
||||
|
||||
import (
|
||||
"k8s.io/apimachinery/pkg/util/runtime"
|
||||
"k8s.io/component-base/featuregate"
|
||||
)
|
||||
|
||||
var (
|
||||
// APIServerMutableFeatureGate is a mutable version of APIServerFeatureGate
|
||||
APIServerMutableFeatureGate featuregate.MutableFeatureGate = featuregate.NewFeatureGate()
|
||||
|
||||
// APIServerFeatureGate is a shared global FeatureGate for apiserver.
|
||||
APIServerFeatureGate featuregate.FeatureGate = APIServerMutableFeatureGate
|
||||
)
|
||||
|
||||
const (
|
||||
// APIServerEnableImpersonation whether to enable impersonation for APIServer
|
||||
APIServerEnableImpersonation featuregate.Feature = "EnableImpersonation"
|
||||
// APIServerEnableAdminImpersonation whether to disable User admin impersonation for APIServer
|
||||
APIServerEnableAdminImpersonation featuregate.Feature = "EnableAdminImpersonation"
|
||||
)
|
||||
|
||||
func init() {
|
||||
runtime.Must(APIServerMutableFeatureGate.Add(map[featuregate.Feature]featuregate.FeatureSpec{
|
||||
APIServerEnableImpersonation: {Default: false, PreRelease: featuregate.Alpha},
|
||||
APIServerEnableAdminImpersonation: {Default: false, PreRelease: featuregate.Alpha},
|
||||
}))
|
||||
}
|
||||
@@ -33,11 +33,11 @@ const (
|
||||
DeprecatedObjectLabelSelector featuregate.Feature = "DeprecatedObjectLabelSelector"
|
||||
// LegacyResourceTrackerGC enable the gc of legacy resource tracker in managed clusters
|
||||
LegacyResourceTrackerGC featuregate.Feature = "LegacyResourceTrackerGC"
|
||||
// EnableSuspendOnFailure enable suspend on workflow failure
|
||||
EnableSuspendOnFailure featuregate.Feature = "EnableSuspendOnFailure"
|
||||
|
||||
// Edge Features
|
||||
|
||||
// ControllerAutoImpersonation enable the auto impersonation for controller (to use explicit identity for requests)
|
||||
ControllerAutoImpersonation featuregate.Feature = "ControllerAutoImpersonation"
|
||||
// AuthenticateApplication enable the authentication for application
|
||||
AuthenticateApplication featuregate.Feature = "AuthenticateApplication"
|
||||
)
|
||||
@@ -47,7 +47,7 @@ var defaultFeatureGates = map[featuregate.Feature]featuregate.FeatureSpec{
|
||||
LegacyObjectTypeIdentifier: {Default: false, PreRelease: featuregate.Alpha},
|
||||
DeprecatedObjectLabelSelector: {Default: false, PreRelease: featuregate.Alpha},
|
||||
LegacyResourceTrackerGC: {Default: true, PreRelease: featuregate.Alpha},
|
||||
ControllerAutoImpersonation: {Default: true, PreRelease: featuregate.Alpha},
|
||||
EnableSuspendOnFailure: {Default: false, PreRelease: featuregate.Alpha},
|
||||
AuthenticateApplication: {Default: false, PreRelease: featuregate.Alpha},
|
||||
}
|
||||
|
||||
|
||||
@@ -96,6 +96,8 @@ const (
|
||||
|
||||
// LabelProject recorde the project the resource belong to
|
||||
LabelProject = "core.oam.dev/project"
|
||||
|
||||
LabelResourceRules = "rules.oam.dev/resources"
|
||||
)
|
||||
|
||||
const (
|
||||
|
||||
@@ -25,6 +25,7 @@ import (
|
||||
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||
|
||||
"github.com/oam-dev/kubevela/apis/core.oam.dev/common"
|
||||
"github.com/oam-dev/kubevela/pkg/auth"
|
||||
"github.com/oam-dev/kubevela/pkg/multicluster"
|
||||
"github.com/oam-dev/kubevela/pkg/oam"
|
||||
"github.com/oam-dev/kubevela/pkg/resourcetracker"
|
||||
@@ -45,7 +46,7 @@ func (h *resourceKeeper) DispatchComponentRevision(ctx context.Context, cr *v1.C
|
||||
if err = resourcetracker.RecordManifestsInResourceTracker(multicluster.ContextInLocalCluster(ctx), h.Client, rt, []*unstructured.Unstructured{obj}, true, common.WorkflowResourceCreator); err != nil {
|
||||
return errors.Wrapf(err, "failed to record componentrevision %s/%s/%s", oam.GetCluster(cr), cr.Namespace, cr.Name)
|
||||
}
|
||||
if err = h.Client.Create(multicluster.ContextWithClusterName(ctx, oam.GetCluster(cr)), cr); err != nil {
|
||||
if err = h.Client.Create(auth.ContextWithUserInfo(multicluster.ContextWithClusterName(ctx, oam.GetCluster(cr)), h.app), cr); err != nil {
|
||||
return errors.Wrapf(err, "failed to create componentrevision %s/%s/%s", oam.GetCluster(cr), cr.Namespace, cr.Name)
|
||||
}
|
||||
return nil
|
||||
@@ -63,7 +64,7 @@ func (h *resourceKeeper) DeleteComponentRevision(ctx context.Context, cr *v1.Con
|
||||
obj.SetName(cr.Name)
|
||||
obj.SetNamespace(cr.Namespace)
|
||||
obj.SetLabels(cr.Labels)
|
||||
if err = h.Client.Delete(multicluster.ContextWithClusterName(ctx, oam.GetCluster(cr)), cr); err != nil && !errors2.IsNotFound(err) {
|
||||
if err = h.Client.Delete(auth.ContextWithUserInfo(multicluster.ContextWithClusterName(ctx, oam.GetCluster(cr)), h.app), cr); err != nil && !errors2.IsNotFound(err) {
|
||||
return errors.Wrapf(err, "failed to delete componentrevision %s/%s/%s", oam.GetCluster(cr), cr.Namespace, cr.Name)
|
||||
}
|
||||
if err = resourcetracker.DeletedManifestInResourceTracker(multicluster.ContextInLocalCluster(ctx), h.Client, rt, obj, true); err != nil {
|
||||
|
||||
@@ -35,6 +35,7 @@ import (
|
||||
|
||||
"github.com/oam-dev/kubevela/apis/core.oam.dev/v1alpha1"
|
||||
"github.com/oam-dev/kubevela/apis/core.oam.dev/v1beta1"
|
||||
"github.com/oam-dev/kubevela/pkg/auth"
|
||||
"github.com/oam-dev/kubevela/pkg/features"
|
||||
"github.com/oam-dev/kubevela/pkg/monitor/metrics"
|
||||
"github.com/oam-dev/kubevela/pkg/multicluster"
|
||||
@@ -182,7 +183,7 @@ func (h *gcHandler) scan(ctx context.Context) (inactiveRTs []*v1beta1.ResourceTr
|
||||
if rt != nil {
|
||||
inactive := true
|
||||
for _, mr := range rt.Spec.ManagedResources {
|
||||
entry := h.cache.get(ctx, mr)
|
||||
entry := h.cache.get(auth.ContextWithUserInfo(ctx, h.app), mr)
|
||||
if entry.err == nil && (entry.gcExecutorRT != rt || !entry.exists) {
|
||||
continue
|
||||
}
|
||||
@@ -225,7 +226,7 @@ func (h *gcHandler) Mark(ctx context.Context) error {
|
||||
// checkAndRemoveResourceTrackerFinalizer return (all resource recycled, error)
|
||||
func (h *gcHandler) checkAndRemoveResourceTrackerFinalizer(ctx context.Context, rt *v1beta1.ResourceTracker) (bool, v1beta1.ManagedResource, error) {
|
||||
for _, mr := range rt.Spec.ManagedResources {
|
||||
entry := h.cache.get(ctx, mr)
|
||||
entry := h.cache.get(auth.ContextWithUserInfo(ctx, h.app), mr)
|
||||
if entry.err != nil {
|
||||
return false, entry.mr, entry.err
|
||||
}
|
||||
@@ -257,6 +258,7 @@ func (h *gcHandler) Sweep(ctx context.Context) (finished bool, waiting []v1beta1
|
||||
}
|
||||
|
||||
func (h *gcHandler) recycleResourceTracker(ctx context.Context, rt *v1beta1.ResourceTracker) error {
|
||||
ctx = auth.ContextWithUserInfo(ctx, h.app)
|
||||
switch h.cfg.order {
|
||||
case v1alpha1.OrderDependency:
|
||||
for _, mr := range rt.Spec.ManagedResources {
|
||||
@@ -380,14 +382,16 @@ func (h *gcHandler) GarbageCollectComponentRevisionResourceTracker(ctx context.C
|
||||
}
|
||||
var managedResources []v1beta1.ManagedResource
|
||||
for _, cr := range h._crRT.Spec.ManagedResources { // legacy code for rollout-plan
|
||||
_ctx := multicluster.ContextWithClusterName(ctx, cr.Cluster)
|
||||
_ctx = auth.ContextWithUserInfo(_ctx, h.app)
|
||||
if _, exists := inUseComponents[cr.ComponentKey()]; !exists {
|
||||
_cr := &appsv1.ControllerRevision{}
|
||||
err := h.Client.Get(multicluster.ContextWithClusterName(ctx, cr.Cluster), cr.NamespacedName(), _cr)
|
||||
err := h.Client.Get(_ctx, cr.NamespacedName(), _cr)
|
||||
if err != nil && !multicluster.IsNotFoundOrClusterNotExists(err) {
|
||||
return errors.Wrapf(err, "failed to get component revision %s", cr.ResourceKey())
|
||||
}
|
||||
if err == nil {
|
||||
if err = h.Client.Delete(multicluster.ContextWithClusterName(ctx, cr.Cluster), _cr); err != nil && !kerrors.IsNotFound(err) {
|
||||
if err = h.Client.Delete(_ctx, _cr); err != nil && !kerrors.IsNotFound(err) {
|
||||
return errors.Wrapf(err, "failed to delete component revision %s", cr.ResourceKey())
|
||||
}
|
||||
}
|
||||
|
||||
14
pkg/utils/env/env.go
vendored
14
pkg/utils/env/env.go
vendored
@@ -26,6 +26,7 @@ import (
|
||||
"github.com/AlecAivazis/survey/v2"
|
||||
"github.com/pkg/errors"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
|
||||
"github.com/oam-dev/kubevela/apis/core.oam.dev/v1beta1"
|
||||
@@ -69,7 +70,18 @@ func CreateEnv(envArgs *types.EnvMeta) error {
|
||||
return err
|
||||
}
|
||||
}
|
||||
err = utils.CreateOrUpdateNamespace(context.TODO(), c, envArgs.Namespace, utils.MergeOverrideLabels(map[string]string{
|
||||
ctx := context.TODO()
|
||||
namespace, err := utils.GetNamespace(ctx, c, envArgs.Namespace)
|
||||
if err != nil && !apierrors.IsNotFound(err) {
|
||||
return err
|
||||
}
|
||||
if namespace != nil {
|
||||
existedEnv := namespace.GetLabels()[oam.LabelNamespaceOfEnvName]
|
||||
if existedEnv != "" && existedEnv != envArgs.Name {
|
||||
return fmt.Errorf("the namespace %s was already assigned to env %s", envArgs.Namespace, existedEnv)
|
||||
}
|
||||
}
|
||||
err = utils.CreateOrUpdateNamespace(ctx, c, envArgs.Namespace, utils.MergeOverrideLabels(map[string]string{
|
||||
oam.LabelControlPlaneNamespaceUsage: oam.VelaNamespaceUsageEnv,
|
||||
}), utils.MergeNoConflictLabels(map[string]string{
|
||||
oam.LabelNamespaceOfEnvName: envArgs.Name,
|
||||
|
||||
81
pkg/utils/env/env_test.go
vendored
81
pkg/utils/env/env_test.go
vendored
@@ -15,3 +15,84 @@ limitations under the License.
|
||||
*/
|
||||
|
||||
package env
|
||||
|
||||
import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"path/filepath"
|
||||
|
||||
"github.com/google/go-cmp/cmp"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
clientgoscheme "k8s.io/client-go/kubernetes/scheme"
|
||||
"k8s.io/client-go/rest"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
"sigs.k8s.io/controller-runtime/pkg/envtest"
|
||||
|
||||
"github.com/oam-dev/kubevela/apis/types"
|
||||
"github.com/oam-dev/kubevela/pkg/utils/common"
|
||||
)
|
||||
|
||||
var testEnv *envtest.Environment
|
||||
var cfg *rest.Config
|
||||
var rawClient client.Client
|
||||
var testScheme = runtime.NewScheme()
|
||||
|
||||
func TestCreateEnv(t *testing.T) {
|
||||
|
||||
testEnv = &envtest.Environment{
|
||||
ControlPlaneStartTimeout: time.Minute,
|
||||
ControlPlaneStopTimeout: time.Minute,
|
||||
CRDDirectoryPaths: []string{
|
||||
filepath.Join("../../..", "charts/vela-core/crds"), // this has all the required CRDs,
|
||||
},
|
||||
}
|
||||
var err error
|
||||
cfg, err = testEnv.Start()
|
||||
assert.NoError(t, err)
|
||||
assert.NoError(t, clientgoscheme.AddToScheme(testScheme))
|
||||
|
||||
rawClient, err = client.New(cfg, client.Options{Scheme: testScheme})
|
||||
assert.NoError(t, err)
|
||||
|
||||
type want struct {
|
||||
data string
|
||||
}
|
||||
testcases := []struct {
|
||||
name string
|
||||
envMeta *types.EnvMeta
|
||||
want want
|
||||
}{
|
||||
{
|
||||
name: "env-application",
|
||||
envMeta: &types.EnvMeta{
|
||||
Name: "env-application",
|
||||
Namespace: "default",
|
||||
},
|
||||
want: want{
|
||||
data: "",
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "default",
|
||||
envMeta: &types.EnvMeta{
|
||||
Name: "default",
|
||||
Namespace: "default",
|
||||
},
|
||||
want: want{
|
||||
data: "the namespace default was already assigned to env env-application",
|
||||
},
|
||||
},
|
||||
}
|
||||
for _, tc := range testcases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
err := common.SetGlobalClient(rawClient)
|
||||
assert.NoError(t, err)
|
||||
err = CreateEnv(tc.envMeta)
|
||||
if err != nil && cmp.Diff(tc.want.data, err.Error()) != "" {
|
||||
t.Errorf("CreateEnv(...): \n -want: \n%s,\n +got:\n%s", tc.want.data, err.Error())
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
@@ -97,6 +97,16 @@ func CreateNamespace(ctx context.Context, kubeClient client.Client, name string,
|
||||
return kubeClient.Create(ctx, obj)
|
||||
}
|
||||
|
||||
// GetNamespace will return a namespace with mutate option
|
||||
func GetNamespace(ctx context.Context, kubeClient client.Client, name string) (*corev1.Namespace, error) {
|
||||
obj := &corev1.Namespace{}
|
||||
err := kubeClient.Get(ctx, client.ObjectKey{Name: name}, obj)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return obj, nil
|
||||
}
|
||||
|
||||
// UpdateNamespace will update a namespace with mutate option
|
||||
func UpdateNamespace(ctx context.Context, kubeClient client.Client, name string, options ...MutateOption) error {
|
||||
var namespace corev1.Namespace
|
||||
|
||||
@@ -110,7 +110,7 @@ func (h *provider) ListResourcesInApp(ctx wfContext.Context, v *value.Value, act
|
||||
if err != nil {
|
||||
return v.FillObject(err.Error(), "err")
|
||||
}
|
||||
return v.FillObject(appResList, "list")
|
||||
return fillQueryResult(v, appResList, "list")
|
||||
}
|
||||
|
||||
// ListAppliedResources list applied resource from tracker, this provider only queries the metadata.
|
||||
@@ -133,10 +133,10 @@ func (h *provider) ListAppliedResources(ctx wfContext.Context, v *value.Value, a
|
||||
if err != nil {
|
||||
return v.FillObject(err.Error(), "err")
|
||||
}
|
||||
return v.FillObject(appResList, "list")
|
||||
return fillQueryResult(v, appResList, "list")
|
||||
}
|
||||
|
||||
// ListAppliedResources list applied resource from tracker
|
||||
// GetApplicationResourceTree get resource tree of application
|
||||
func (h *provider) GetApplicationResourceTree(ctx wfContext.Context, v *value.Value, act types.Action) error {
|
||||
val, err := v.LookupValue("app")
|
||||
if err != nil {
|
||||
@@ -156,6 +156,11 @@ func (h *provider) GetApplicationResourceTree(ctx wfContext.Context, v *value.Va
|
||||
if err != nil {
|
||||
return v.FillObject(err.Error(), "err")
|
||||
}
|
||||
// merge user defined customize rule before every request.
|
||||
err = mergeCustomRules(context.Background(), h.cli)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for _, resource := range appResList {
|
||||
root := querytypes.ResourceTreeNode{
|
||||
APIVersion: resource.APIVersion,
|
||||
@@ -167,6 +172,10 @@ func (h *provider) GetApplicationResourceTree(ctx wfContext.Context, v *value.Va
|
||||
}
|
||||
root.LeafNodes, err = iteratorChildResources(context.Background(), resource.Cluster, h.cli, root, 1)
|
||||
if err != nil {
|
||||
// if the resource has been deleted, continue access next appliedResource don't break the whole request
|
||||
if kerrors.IsNotFound(err) {
|
||||
continue
|
||||
}
|
||||
return v.FillObject(err.Error(), "err")
|
||||
}
|
||||
rootObject, err := fetchObjectWithResourceTreeNode(context.Background(), resource.Cluster, h.cli, root)
|
||||
@@ -178,9 +187,18 @@ func (h *provider) GetApplicationResourceTree(ctx wfContext.Context, v *value.Va
|
||||
return v.FillObject(err.Error(), "err")
|
||||
}
|
||||
root.HealthStatus = *rootStatus
|
||||
addInfo, err := additionalInfo(*rootObject)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
root.AdditionalInfo = addInfo
|
||||
root.CreationTimestamp = rootObject.GetCreationTimestamp().Time
|
||||
if !rootObject.GetDeletionTimestamp().IsZero() {
|
||||
root.DeletionTimestamp = rootObject.GetDeletionTimestamp().Time
|
||||
}
|
||||
resource.ResourceTree = &root
|
||||
}
|
||||
return v.FillObject(appResList, "list")
|
||||
return fillQueryResult(v, appResList, "list")
|
||||
}
|
||||
|
||||
func (h *provider) CollectPods(ctx wfContext.Context, v *value.Value, act types.Action) error {
|
||||
@@ -211,7 +229,7 @@ func (h *provider) CollectPods(ctx wfContext.Context, v *value.Value, act types.
|
||||
if err != nil {
|
||||
return v.FillObject(err.Error(), "err")
|
||||
}
|
||||
return v.FillObject(pods, "list")
|
||||
return fillQueryResult(v, pods, "list")
|
||||
}
|
||||
|
||||
func (h *provider) SearchEvents(ctx wfContext.Context, v *value.Value, act types.Action) error {
|
||||
@@ -240,7 +258,7 @@ func (h *provider) SearchEvents(ctx wfContext.Context, v *value.Value, act types
|
||||
if err := h.cli.List(listCtx, &eventList, listOpts...); err != nil {
|
||||
return v.FillObject(err.Error(), "err")
|
||||
}
|
||||
return v.FillObject(eventList.Items, "list")
|
||||
return fillQueryResult(v, eventList.Items, "list")
|
||||
}
|
||||
|
||||
// GeneratorServiceEndpoints generator service endpoints is available for common component type,
|
||||
@@ -370,7 +388,7 @@ func (h *provider) GeneratorServiceEndpoints(wfctx wfContext.Context, v *value.V
|
||||
serviceEndpoints = append(serviceEndpoints, generatorFromService(service, selectorNodeIP, cluster, resource.Component, fmt.Sprintf("/seldon/%s/%s", resource.Namespace, resource.Name))...)
|
||||
}
|
||||
}
|
||||
return v.FillObject(serviceEndpoints, "list")
|
||||
return fillQueryResult(v, serviceEndpoints, "list")
|
||||
}
|
||||
|
||||
var (
|
||||
|
||||
@@ -740,7 +740,7 @@ options: {
|
||||
Name: "vela-system",
|
||||
},
|
||||
})
|
||||
Expect(err).Should(BeNil())
|
||||
Expect(err).Should(SatisfyAny(BeNil(), util.AlreadyExistMatcher{}))
|
||||
for _, s := range testServicelist {
|
||||
ns := "default"
|
||||
if s["namespace"] != nil {
|
||||
|
||||
@@ -19,8 +19,7 @@ package query
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
"github.com/oam-dev/kubevela/pkg/apiserver/utils/log"
|
||||
"time"
|
||||
|
||||
appsv1 "k8s.io/api/apps/v1"
|
||||
v12 "k8s.io/api/core/v1"
|
||||
@@ -28,14 +27,25 @@ import (
|
||||
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
types2 "k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/apimachinery/pkg/util/duration"
|
||||
"k8s.io/kubectl/pkg/util/podutils"
|
||||
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
"sigs.k8s.io/yaml"
|
||||
|
||||
velatypes "github.com/oam-dev/kubevela/apis/types"
|
||||
"github.com/oam-dev/kubevela/pkg/apiserver/utils/log"
|
||||
"github.com/oam-dev/kubevela/pkg/multicluster"
|
||||
"github.com/oam-dev/kubevela/pkg/oam"
|
||||
"github.com/oam-dev/kubevela/pkg/velaql/providers/query/types"
|
||||
|
||||
helmreleaseapi "github.com/fluxcd/helm-controller/api/v2beta1"
|
||||
helmrepoapi "github.com/fluxcd/source-controller/api/v1beta2"
|
||||
)
|
||||
|
||||
// relationshipKey is the configmap key of relationShip rule
|
||||
var relationshipKey = "rules"
|
||||
|
||||
// set the iterator max depth is 5
|
||||
var maxDepth = 5
|
||||
|
||||
@@ -59,14 +69,24 @@ func init() {
|
||||
{APIVersion: "v1", Kind: "Pod"}: statefulSet2PodListOption,
|
||||
},
|
||||
}
|
||||
globalRule[GroupResourceType{Group: "", Kind: "Service"}] = ChildrenResourcesRule{
|
||||
CareResource: map[ResourceType]genListOptionFunc{
|
||||
{APIVersion: "discovery.k8s.io/v1beta1", Kind: "EndpointSlice"}: nil,
|
||||
{APIVersion: "v1", Kind: "Endpoints"}: service2EndpointListOption,
|
||||
},
|
||||
}
|
||||
globalRule[GroupResourceType{Group: "helm.toolkit.fluxcd.io", Kind: "HelmRelease"}] = ChildrenResourcesRule{
|
||||
CareResource: map[ResourceType]genListOptionFunc{
|
||||
{APIVersion: "apps/v1", Kind: "Deployment"}: nil,
|
||||
{APIVersion: "apps/v1", Kind: "StatefulSet"}: nil,
|
||||
{APIVersion: "v1", Kind: "ConfigMap"}: nil,
|
||||
{APIVersion: "v1", Kind: "Secret"}: nil,
|
||||
{APIVersion: "v1", Kind: "Service"}: nil,
|
||||
{APIVersion: "networking.k8s.io/v1", Kind: "Ingress"}: nil,
|
||||
{APIVersion: "apps/v1", Kind: "Deployment"}: nil,
|
||||
{APIVersion: "apps/v1", Kind: "StatefulSet"}: nil,
|
||||
{APIVersion: "v1", Kind: "ConfigMap"}: nil,
|
||||
{APIVersion: "v1", Kind: "Secret"}: nil,
|
||||
{APIVersion: "v1", Kind: "Service"}: nil,
|
||||
{APIVersion: "v1", Kind: "PersistentVolumeClaim"}: nil,
|
||||
{APIVersion: "networking.k8s.io/v1", Kind: "Ingress"}: nil,
|
||||
{APIVersion: "v1", Kind: "ServiceAccount"}: nil,
|
||||
{APIVersion: "rbac.authorization.k8s.io/v1", Kind: "Role"}: nil,
|
||||
{APIVersion: "rbac.authorization.k8s.io/v1", Kind: "RoleBinding"}: nil,
|
||||
},
|
||||
DefaultGenListOptionFunc: helmRelease2AnyListOption,
|
||||
}
|
||||
@@ -84,6 +104,12 @@ type ResourceType struct {
|
||||
Kind string `json:"kind,omitempty"`
|
||||
}
|
||||
|
||||
// customRule define the customize rule created by user
|
||||
type customRule struct {
|
||||
ParentResourceType *GroupResourceType `json:"parentResourceType,omitempty"`
|
||||
ChildrenResourceType []ResourceType `json:"childrenResourceType,omitempty"`
|
||||
}
|
||||
|
||||
// ChildrenResourcesRule define the relationShip between parentObject and children resource
|
||||
type ChildrenResourcesRule struct {
|
||||
// every subResourceType can have a specified genListOptionFunc.
|
||||
@@ -133,6 +159,19 @@ var statefulSet2PodListOption = func(obj unstructured.Unstructured) (client.List
|
||||
return client.ListOptions{Namespace: sts.Namespace, LabelSelector: stsSelector}, nil
|
||||
}
|
||||
|
||||
var service2EndpointListOption = func(obj unstructured.Unstructured) (client.ListOptions, error) {
|
||||
svc := v12.Service{}
|
||||
err := runtime.DefaultUnstructuredConverter.FromUnstructured(obj.Object, &svc)
|
||||
if err != nil {
|
||||
return client.ListOptions{}, err
|
||||
}
|
||||
stsSelector, err := v1.LabelSelectorAsSelector(&v1.LabelSelector{MatchLabels: svc.Labels})
|
||||
if err != nil {
|
||||
return client.ListOptions{}, err
|
||||
}
|
||||
return client.ListOptions{Namespace: svc.Namespace, LabelSelector: stsSelector}, nil
|
||||
}
|
||||
|
||||
var helmRelease2AnyListOption = func(obj unstructured.Unstructured) (client.ListOptions, error) {
|
||||
hrSelector, err := v1.LabelSelectorAsSelector(&v1.LabelSelector{MatchLabels: map[string]string{
|
||||
"helm.toolkit.fluxcd.io/name": obj.GetName(),
|
||||
@@ -236,6 +275,59 @@ var checkPodStatus = func(obj unstructured.Unstructured) (*types.HealthStatus, e
|
||||
}, nil
|
||||
}
|
||||
|
||||
var checkHelmReleaseStatus = func(obj unstructured.Unstructured) (*types.HealthStatus, error) {
|
||||
helmRelease := &helmreleaseapi.HelmRelease{}
|
||||
err := runtime.DefaultUnstructuredConverter.FromUnstructured(obj.Object, &helmRelease)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to convert unstructured helmRelease to typed: %w", err)
|
||||
}
|
||||
if len(helmRelease.Status.Conditions) != 0 {
|
||||
for _, condition := range helmRelease.Status.Conditions {
|
||||
if condition.Type == "Ready" {
|
||||
if condition.Status == v1.ConditionTrue {
|
||||
return &types.HealthStatus{
|
||||
Status: types.HealthStatusHealthy,
|
||||
}, nil
|
||||
}
|
||||
return &types.HealthStatus{
|
||||
Status: types.HealthStatusUnHealthy,
|
||||
Message: condition.Message,
|
||||
}, nil
|
||||
}
|
||||
}
|
||||
}
|
||||
return &types.HealthStatus{
|
||||
Status: types.HealthStatusUnKnown,
|
||||
}, nil
|
||||
}
|
||||
|
||||
var checkHelmRepoStatus = func(obj unstructured.Unstructured) (*types.HealthStatus, error) {
|
||||
helmRepo := helmrepoapi.HelmRepository{}
|
||||
err := runtime.DefaultUnstructuredConverter.FromUnstructured(obj.Object, &helmRepo)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to convert unstructured helmRelease to typed: %w", err)
|
||||
}
|
||||
if len(helmRepo.Status.Conditions) != 0 {
|
||||
for _, condition := range helmRepo.Status.Conditions {
|
||||
if condition.Type == "Ready" {
|
||||
if condition.Status == v1.ConditionTrue {
|
||||
return &types.HealthStatus{
|
||||
Status: types.HealthStatusHealthy,
|
||||
Message: condition.Message,
|
||||
}, nil
|
||||
}
|
||||
return &types.HealthStatus{
|
||||
Status: types.HealthStatusUnHealthy,
|
||||
Message: condition.Message,
|
||||
}, nil
|
||||
}
|
||||
}
|
||||
}
|
||||
return &types.HealthStatus{
|
||||
Status: types.HealthStatusUnKnown,
|
||||
}, nil
|
||||
}
|
||||
|
||||
var checkReplicaSetStatus = func(obj unstructured.Unstructured) (*types.HealthStatus, error) {
|
||||
replicaSet := appsv1.ReplicaSet{}
|
||||
err := runtime.DefaultUnstructuredConverter.FromUnstructured(obj.Object, &replicaSet)
|
||||
@@ -335,6 +427,19 @@ func checkResourceStatus(obj unstructured.Unstructured) (*types.HealthStatus, er
|
||||
checkFunc = checkReplicaSetStatus
|
||||
default:
|
||||
}
|
||||
case "helm.toolkit.fluxcd.io":
|
||||
switch kind {
|
||||
case "HelmRelease":
|
||||
checkFunc = checkHelmReleaseStatus
|
||||
default:
|
||||
}
|
||||
case "source.toolkit.fluxcd.io":
|
||||
switch kind {
|
||||
case "HelmRepository":
|
||||
checkFunc = checkHelmRepoStatus
|
||||
default:
|
||||
}
|
||||
default:
|
||||
}
|
||||
if checkFunc != nil {
|
||||
return checkFunc(obj)
|
||||
@@ -342,6 +447,162 @@ func checkResourceStatus(obj unstructured.Unstructured) (*types.HealthStatus, er
|
||||
return &types.HealthStatus{Status: types.HealthStatusHealthy}, nil
|
||||
}
|
||||
|
||||
type additionalInfoFunc func(obj unstructured.Unstructured) (map[string]interface{}, error)
|
||||
|
||||
func additionalInfo(obj unstructured.Unstructured) (map[string]interface{}, error) {
|
||||
group := obj.GroupVersionKind().Group
|
||||
kind := obj.GroupVersionKind().Kind
|
||||
var infoFunc additionalInfoFunc
|
||||
switch group {
|
||||
case "":
|
||||
switch kind {
|
||||
case "Pod":
|
||||
infoFunc = podAdditionalInfo
|
||||
case "Service":
|
||||
infoFunc = svcAdditionalInfo
|
||||
}
|
||||
default:
|
||||
}
|
||||
if infoFunc != nil {
|
||||
return infoFunc(obj)
|
||||
}
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func svcAdditionalInfo(obj unstructured.Unstructured) (map[string]interface{}, error) {
|
||||
svc := v12.Service{}
|
||||
err := runtime.DefaultUnstructuredConverter.FromUnstructured(obj.Object, &svc)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to convert unstructured svc to typed: %w", err)
|
||||
}
|
||||
if svc.Spec.Type == v12.ServiceTypeLoadBalancer {
|
||||
var eip string
|
||||
for _, ingress := range svc.Status.LoadBalancer.Ingress {
|
||||
if len(ingress.IP) != 0 {
|
||||
eip = ingress.IP
|
||||
}
|
||||
}
|
||||
if len(eip) == 0 {
|
||||
eip = "pending"
|
||||
}
|
||||
return map[string]interface{}{
|
||||
"EIP": eip,
|
||||
}, nil
|
||||
}
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// the logic of this func totaly copy from the source-code of kubernetes tableConvertor
|
||||
// https://github.com/kubernetes/kubernetes/blob/ea0764452222146c47ec826977f49d7001b0ea8c/pkg/printers/internalversion/printers.go#L740
|
||||
// The result is same with the output of kubectl.
|
||||
//nolint
|
||||
func podAdditionalInfo(obj unstructured.Unstructured) (map[string]interface{}, error) {
|
||||
pod := v12.Pod{}
|
||||
err := runtime.DefaultUnstructuredConverter.FromUnstructured(obj.Object, &pod)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to convert unstructured Pod to typed: %w", err)
|
||||
}
|
||||
|
||||
hasPodReadyCondition := func(conditions []v12.PodCondition) bool {
|
||||
for _, condition := range conditions {
|
||||
if condition.Type == v12.PodReady && condition.Status == v12.ConditionTrue {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
translateTimestampSince := func(timestamp v1.Time) string {
|
||||
if timestamp.IsZero() {
|
||||
return "<unknown>"
|
||||
}
|
||||
|
||||
return duration.HumanDuration(time.Since(timestamp.Time))
|
||||
}
|
||||
|
||||
restarts := 0
|
||||
totalContainers := len(pod.Spec.Containers)
|
||||
readyContainers := 0
|
||||
|
||||
reason := string(pod.Status.Phase)
|
||||
if pod.Status.Reason != "" {
|
||||
reason = pod.Status.Reason
|
||||
}
|
||||
|
||||
initializing := false
|
||||
for i := range pod.Status.InitContainerStatuses {
|
||||
container := pod.Status.InitContainerStatuses[i]
|
||||
restarts += int(container.RestartCount)
|
||||
switch {
|
||||
case container.State.Terminated != nil && container.State.Terminated.ExitCode == 0:
|
||||
continue
|
||||
case container.State.Terminated != nil:
|
||||
// initialization is failed
|
||||
if len(container.State.Terminated.Reason) == 0 {
|
||||
if container.State.Terminated.Signal != 0 {
|
||||
reason = fmt.Sprintf("Init:Signal:%d", container.State.Terminated.Signal)
|
||||
} else {
|
||||
reason = fmt.Sprintf("Init:ExitCode:%d", container.State.Terminated.ExitCode)
|
||||
}
|
||||
} else {
|
||||
reason = "Init:" + container.State.Terminated.Reason
|
||||
}
|
||||
initializing = true
|
||||
case container.State.Waiting != nil && len(container.State.Waiting.Reason) > 0 && container.State.Waiting.Reason != "PodInitializing":
|
||||
reason = "Init:" + container.State.Waiting.Reason
|
||||
initializing = true
|
||||
default:
|
||||
reason = fmt.Sprintf("Init:%d/%d", i, len(pod.Spec.InitContainers))
|
||||
initializing = true
|
||||
}
|
||||
break
|
||||
}
|
||||
if !initializing {
|
||||
restarts = 0
|
||||
hasRunning := false
|
||||
for i := len(pod.Status.ContainerStatuses) - 1; i >= 0; i-- {
|
||||
container := pod.Status.ContainerStatuses[i]
|
||||
|
||||
restarts += int(container.RestartCount)
|
||||
if container.State.Waiting != nil && container.State.Waiting.Reason != "" {
|
||||
reason = container.State.Waiting.Reason
|
||||
} else if container.State.Terminated != nil && container.State.Terminated.Reason != "" {
|
||||
reason = container.State.Terminated.Reason
|
||||
} else if container.State.Terminated != nil && container.State.Terminated.Reason == "" {
|
||||
if container.State.Terminated.Signal != 0 {
|
||||
reason = fmt.Sprintf("Signal:%d", container.State.Terminated.Signal)
|
||||
} else {
|
||||
reason = fmt.Sprintf("ExitCode:%d", container.State.Terminated.ExitCode)
|
||||
}
|
||||
} else if container.Ready && container.State.Running != nil {
|
||||
hasRunning = true
|
||||
readyContainers++
|
||||
}
|
||||
}
|
||||
|
||||
// change pod status back to "Running" if there is at least one container still reporting as "Running" status
|
||||
if reason == "Completed" && hasRunning {
|
||||
if hasPodReadyCondition(pod.Status.Conditions) {
|
||||
reason = "Running"
|
||||
} else {
|
||||
reason = "NotReady"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if pod.DeletionTimestamp != nil && pod.Status.Reason == "NodeLost" {
|
||||
reason = "Unknown"
|
||||
} else if pod.DeletionTimestamp != nil {
|
||||
reason = "Terminating"
|
||||
}
|
||||
return map[string]interface{}{
|
||||
"Ready": fmt.Sprintf("%d/%d", readyContainers, totalContainers),
|
||||
"Status": reason,
|
||||
"Restarts": restarts,
|
||||
"Age": translateTimestampSince(pod.CreationTimestamp),
|
||||
}, nil
|
||||
}
|
||||
|
||||
func fetchObjectWithResourceTreeNode(ctx context.Context, cluster string, k8sClient client.Client, resource types.ResourceTreeNode) (*unstructured.Unstructured, error) {
|
||||
o := unstructured.Unstructured{}
|
||||
o.SetAPIVersion(resource.APIVersion)
|
||||
@@ -439,6 +700,15 @@ func iteratorChildResources(ctx context.Context, cluster string, k8sClient clien
|
||||
return nil, err
|
||||
}
|
||||
rtn.HealthStatus = *healthStatus
|
||||
addInfo, err := additionalInfo(item)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
rtn.CreationTimestamp = item.GetCreationTimestamp().Time
|
||||
if !item.GetDeletionTimestamp().IsZero() {
|
||||
rtn.DeletionTimestamp = item.GetDeletionTimestamp().Time
|
||||
}
|
||||
rtn.AdditionalInfo = addInfo
|
||||
resList = append(resList, &rtn)
|
||||
}
|
||||
}
|
||||
@@ -446,3 +716,36 @@ func iteratorChildResources(ctx context.Context, cluster string, k8sClient clien
|
||||
}
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// mergeCustomRules merge the customize
|
||||
func mergeCustomRules(ctx context.Context, k8sClient client.Client) error {
|
||||
rulesList := v12.ConfigMapList{}
|
||||
if err := k8sClient.List(ctx, &rulesList, client.InNamespace(velatypes.DefaultKubeVelaNS), client.HasLabels{oam.LabelResourceRules}); err != nil {
|
||||
return err
|
||||
}
|
||||
for _, item := range rulesList.Items {
|
||||
ruleStr := item.Data[relationshipKey]
|
||||
var customRules []*customRule
|
||||
err := yaml.Unmarshal([]byte(ruleStr), &customRules)
|
||||
if err != nil {
|
||||
// don't let one miss-config configmap brake whole process
|
||||
log.Logger.Errorf("relationship rule configamp %s miss config %v", item.Name, err)
|
||||
}
|
||||
for _, rule := range customRules {
|
||||
if cResource, ok := globalRule[*rule.ParentResourceType]; ok {
|
||||
for _, resourceType := range rule.ChildrenResourceType {
|
||||
if _, ok := cResource.CareResource[resourceType]; !ok {
|
||||
cResource.CareResource[resourceType] = nil
|
||||
}
|
||||
}
|
||||
} else {
|
||||
caredResources := map[ResourceType]genListOptionFunc{}
|
||||
for _, resourceType := range rule.ChildrenResourceType {
|
||||
caredResources[resourceType] = nil
|
||||
}
|
||||
globalRule[*rule.ParentResourceType] = ChildrenResourcesRule{DefaultGenListOptionFunc: nil, CareResource: caredResources}
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -19,31 +19,32 @@ package query
|
||||
import (
|
||||
"fmt"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/oam-dev/kubevela/apis/core.oam.dev/common"
|
||||
"github.com/oam-dev/kubevela/apis/core.oam.dev/v1beta1"
|
||||
types3 "github.com/oam-dev/kubevela/apis/types"
|
||||
"github.com/oam-dev/kubevela/pkg/cue/model/value"
|
||||
"github.com/oam-dev/kubevela/pkg/oam"
|
||||
|
||||
types2 "k8s.io/apimachinery/pkg/types"
|
||||
|
||||
"github.com/oam-dev/kubevela/pkg/oam/util"
|
||||
"github.com/oam-dev/kubevela/pkg/velaql/providers/query/types"
|
||||
|
||||
. "github.com/onsi/ginkgo"
|
||||
. "github.com/onsi/gomega"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
|
||||
v12 "k8s.io/api/apps/v1"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
types2 "k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/utils/pointer"
|
||||
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
|
||||
"github.com/oam-dev/kubevela/pkg/velaql/providers/query/types"
|
||||
"github.com/fluxcd/helm-controller/api/v2beta1"
|
||||
"github.com/fluxcd/source-controller/api/v1beta2"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestPodStatus(t *testing.T) {
|
||||
@@ -186,6 +187,20 @@ func TestPodStatus(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestService2EndpointOption(t *testing.T) {
|
||||
labels := map[string]string{
|
||||
"service-name": "test",
|
||||
"uid": "test-uid",
|
||||
}
|
||||
u := unstructured.Unstructured{}
|
||||
u.SetAPIVersion("v1")
|
||||
u.SetKind("Service")
|
||||
u.SetLabels(labels)
|
||||
l, err := service2EndpointListOption(u)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, "service-name=test,uid=test-uid", l.LabelSelector.String())
|
||||
}
|
||||
|
||||
func TestServiceStatus(t *testing.T) {
|
||||
lbHealthSvc := v1.Service{Spec: v1.ServiceSpec{Type: v1.ServiceTypeLoadBalancer}, Status: v1.ServiceStatus{
|
||||
LoadBalancer: v1.LoadBalancerStatus{
|
||||
@@ -328,6 +343,100 @@ func TestReplicaSetStatus(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestHelmResourceStatus(t *testing.T) {
|
||||
tm := metav1.TypeMeta{APIVersion: "helm.toolkit.fluxcd.io/v2beta1", Kind: "HelmRelease"}
|
||||
healthHr := v2beta1.HelmRelease{TypeMeta: tm, Status: v2beta1.HelmReleaseStatus{Conditions: []metav1.Condition{
|
||||
{
|
||||
Type: "Ready",
|
||||
Status: metav1.ConditionTrue,
|
||||
},
|
||||
}}}
|
||||
unHealthyHr := v2beta1.HelmRelease{TypeMeta: tm, Status: v2beta1.HelmReleaseStatus{Conditions: []metav1.Condition{
|
||||
{
|
||||
Type: "Ready",
|
||||
Status: metav1.ConditionFalse,
|
||||
Message: "some reason",
|
||||
},
|
||||
}}}
|
||||
unKnowHealthyHr := v2beta1.HelmRelease{TypeMeta: tm, Status: v2beta1.HelmReleaseStatus{Conditions: []metav1.Condition{
|
||||
{
|
||||
Type: "OtherType",
|
||||
Status: metav1.ConditionFalse,
|
||||
},
|
||||
}}}
|
||||
testCases := map[string]struct {
|
||||
hr v2beta1.HelmRelease
|
||||
res *types.HealthStatus
|
||||
}{
|
||||
"healthHr": {
|
||||
hr: healthHr,
|
||||
res: &types.HealthStatus{Status: types.HealthStatusHealthy},
|
||||
},
|
||||
"unHealthyHr": {
|
||||
hr: unHealthyHr,
|
||||
res: &types.HealthStatus{Status: types.HealthStatusUnHealthy, Message: "some reason"},
|
||||
},
|
||||
"unKnowHealthyHr": {
|
||||
hr: unKnowHealthyHr,
|
||||
res: &types.HealthStatus{Status: types.HealthStatusUnKnown},
|
||||
},
|
||||
}
|
||||
for _, s := range testCases {
|
||||
obj, err := runtime.DefaultUnstructuredConverter.ToUnstructured(s.hr.DeepCopy())
|
||||
assert.NoError(t, err)
|
||||
res, err := checkResourceStatus(unstructured.Unstructured{Object: obj})
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, res, s.res)
|
||||
}
|
||||
}
|
||||
|
||||
func TestHelmRepoResourceStatus(t *testing.T) {
|
||||
tm := metav1.TypeMeta{APIVersion: "source.toolkit.fluxcd.io/v1beta2", Kind: "HelmRepository"}
|
||||
healthHr := v1beta2.HelmRepository{TypeMeta: tm, Status: v1beta2.HelmRepositoryStatus{Conditions: []metav1.Condition{
|
||||
{
|
||||
Type: "Ready",
|
||||
Status: metav1.ConditionTrue,
|
||||
},
|
||||
}}}
|
||||
unHealthyHr := v1beta2.HelmRepository{TypeMeta: tm, Status: v1beta2.HelmRepositoryStatus{Conditions: []metav1.Condition{
|
||||
{
|
||||
Type: "Ready",
|
||||
Status: metav1.ConditionFalse,
|
||||
Message: "some reason",
|
||||
},
|
||||
}}}
|
||||
unKnowHealthyHr := v1beta2.HelmRepository{TypeMeta: tm, Status: v1beta2.HelmRepositoryStatus{Conditions: []metav1.Condition{
|
||||
{
|
||||
Type: "OtherType",
|
||||
Status: metav1.ConditionFalse,
|
||||
},
|
||||
}}}
|
||||
testCases := map[string]struct {
|
||||
hr v1beta2.HelmRepository
|
||||
res *types.HealthStatus
|
||||
}{
|
||||
"healthHr": {
|
||||
hr: healthHr,
|
||||
res: &types.HealthStatus{Status: types.HealthStatusHealthy},
|
||||
},
|
||||
"unHealthyHr": {
|
||||
hr: unHealthyHr,
|
||||
res: &types.HealthStatus{Status: types.HealthStatusUnHealthy, Message: "some reason"},
|
||||
},
|
||||
"unKnowHealthyHr": {
|
||||
hr: unKnowHealthyHr,
|
||||
res: &types.HealthStatus{Status: types.HealthStatusUnKnown},
|
||||
},
|
||||
}
|
||||
for _, s := range testCases {
|
||||
obj, err := runtime.DefaultUnstructuredConverter.ToUnstructured(s.hr.DeepCopy())
|
||||
assert.NoError(t, err)
|
||||
res, err := checkResourceStatus(unstructured.Unstructured{Object: obj})
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, res, s.res)
|
||||
}
|
||||
}
|
||||
|
||||
func TestGenListOption(t *testing.T) {
|
||||
resLabel, err := metav1.LabelSelectorAsSelector(&metav1.LabelSelector{MatchLabels: map[string]string{"testKey": "testVal"}})
|
||||
assert.NoError(t, err)
|
||||
@@ -368,6 +477,434 @@ func TestGenListOption(t *testing.T) {
|
||||
assert.Equal(t, hrls, client.ListOptions{LabelSelector: hrll})
|
||||
}
|
||||
|
||||
func TestPodAdditionalInfo(t *testing.T) {
|
||||
typeMeta := metav1.TypeMeta{APIVersion: "v1", Kind: "Pod"}
|
||||
|
||||
type testCase struct {
|
||||
pod v1.Pod
|
||||
res map[string]interface{}
|
||||
}
|
||||
|
||||
case1 := testCase{
|
||||
pod: v1.Pod{TypeMeta: typeMeta,
|
||||
ObjectMeta: metav1.ObjectMeta{DeletionTimestamp: &metav1.Time{Time: time.Now()}},
|
||||
Status: v1.PodStatus{
|
||||
InitContainerStatuses: []v1.ContainerStatus{
|
||||
{
|
||||
State: v1.ContainerState{
|
||||
Terminated: &v1.ContainerStateTerminated{
|
||||
ExitCode: 0,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
Reason: "NodeLost"},
|
||||
},
|
||||
res: map[string]interface{}{
|
||||
"Ready": "0/0",
|
||||
"Status": "Unknown",
|
||||
"Restarts": 0,
|
||||
"Age": "<unknown>",
|
||||
},
|
||||
}
|
||||
|
||||
case2 := testCase{
|
||||
pod: v1.Pod{TypeMeta: typeMeta,
|
||||
Status: v1.PodStatus{
|
||||
InitContainerStatuses: []v1.ContainerStatus{
|
||||
{
|
||||
State: v1.ContainerState{
|
||||
Terminated: &v1.ContainerStateTerminated{
|
||||
ExitCode: 127,
|
||||
},
|
||||
},
|
||||
},
|
||||
}},
|
||||
},
|
||||
res: map[string]interface{}{
|
||||
"Ready": "0/0",
|
||||
"Status": "Init:ExitCode:127",
|
||||
"Restarts": 0,
|
||||
"Age": "<unknown>",
|
||||
},
|
||||
}
|
||||
|
||||
case3 := testCase{
|
||||
pod: v1.Pod{TypeMeta: typeMeta,
|
||||
Status: v1.PodStatus{
|
||||
InitContainerStatuses: []v1.ContainerStatus{
|
||||
{
|
||||
State: v1.ContainerState{
|
||||
Terminated: &v1.ContainerStateTerminated{
|
||||
ExitCode: 127,
|
||||
Signal: 32,
|
||||
},
|
||||
},
|
||||
},
|
||||
}},
|
||||
},
|
||||
res: map[string]interface{}{
|
||||
"Ready": "0/0",
|
||||
"Status": "Init:Signal:32",
|
||||
"Restarts": 0,
|
||||
"Age": "<unknown>",
|
||||
},
|
||||
}
|
||||
|
||||
case4 := testCase{
|
||||
pod: v1.Pod{TypeMeta: typeMeta,
|
||||
Status: v1.PodStatus{
|
||||
InitContainerStatuses: []v1.ContainerStatus{
|
||||
{
|
||||
State: v1.ContainerState{
|
||||
Terminated: &v1.ContainerStateTerminated{
|
||||
Reason: "OOMKill",
|
||||
ExitCode: 127,
|
||||
},
|
||||
},
|
||||
},
|
||||
}},
|
||||
},
|
||||
res: map[string]interface{}{
|
||||
"Ready": "0/0",
|
||||
"Status": "Init:OOMKill",
|
||||
"Restarts": 0,
|
||||
"Age": "<unknown>",
|
||||
},
|
||||
}
|
||||
|
||||
case5 := testCase{
|
||||
pod: v1.Pod{TypeMeta: typeMeta,
|
||||
Status: v1.PodStatus{
|
||||
InitContainerStatuses: []v1.ContainerStatus{
|
||||
{
|
||||
State: v1.ContainerState{
|
||||
Terminated: &v1.ContainerStateTerminated{
|
||||
Reason: "OOMKill",
|
||||
ExitCode: 127,
|
||||
},
|
||||
},
|
||||
},
|
||||
}},
|
||||
},
|
||||
res: map[string]interface{}{
|
||||
"Ready": "0/0",
|
||||
"Status": "Init:OOMKill",
|
||||
"Restarts": 0,
|
||||
"Age": "<unknown>",
|
||||
},
|
||||
}
|
||||
|
||||
case6 := testCase{
|
||||
pod: v1.Pod{TypeMeta: typeMeta,
|
||||
Status: v1.PodStatus{
|
||||
InitContainerStatuses: []v1.ContainerStatus{
|
||||
{
|
||||
State: v1.ContainerState{
|
||||
Waiting: &v1.ContainerStateWaiting{
|
||||
Reason: "ContainerCreating",
|
||||
},
|
||||
},
|
||||
},
|
||||
}},
|
||||
},
|
||||
res: map[string]interface{}{
|
||||
"Ready": "0/0",
|
||||
"Status": "Init:ContainerCreating",
|
||||
"Restarts": 0,
|
||||
"Age": "<unknown>",
|
||||
},
|
||||
}
|
||||
|
||||
case7 := testCase{
|
||||
pod: v1.Pod{TypeMeta: typeMeta,
|
||||
Spec: v1.PodSpec{
|
||||
InitContainers: []v1.Container{
|
||||
{Name: "test"},
|
||||
}},
|
||||
Status: v1.PodStatus{
|
||||
InitContainerStatuses: []v1.ContainerStatus{
|
||||
{
|
||||
State: v1.ContainerState{
|
||||
Waiting: &v1.ContainerStateWaiting{},
|
||||
},
|
||||
},
|
||||
}},
|
||||
},
|
||||
res: map[string]interface{}{
|
||||
"Ready": "0/0",
|
||||
"Status": "Init:0/1",
|
||||
"Restarts": 0,
|
||||
"Age": "<unknown>",
|
||||
},
|
||||
}
|
||||
|
||||
case8 := testCase{
|
||||
pod: v1.Pod{TypeMeta: typeMeta,
|
||||
Status: v1.PodStatus{
|
||||
ContainerStatuses: []v1.ContainerStatus{
|
||||
{
|
||||
State: v1.ContainerState{
|
||||
Waiting: &v1.ContainerStateWaiting{
|
||||
Reason: "ContainerCreating",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
res: map[string]interface{}{
|
||||
"Ready": "0/0",
|
||||
"Status": "ContainerCreating",
|
||||
"Restarts": 0,
|
||||
"Age": "<unknown>",
|
||||
},
|
||||
}
|
||||
|
||||
case9 := testCase{
|
||||
pod: v1.Pod{TypeMeta: typeMeta,
|
||||
Status: v1.PodStatus{
|
||||
ContainerStatuses: []v1.ContainerStatus{
|
||||
{
|
||||
State: v1.ContainerState{
|
||||
Terminated: &v1.ContainerStateTerminated{
|
||||
Reason: "OOMKilled",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
res: map[string]interface{}{
|
||||
"Ready": "0/0",
|
||||
"Status": "OOMKilled",
|
||||
"Restarts": 0,
|
||||
"Age": "<unknown>",
|
||||
},
|
||||
}
|
||||
|
||||
case10 := testCase{
|
||||
pod: v1.Pod{TypeMeta: typeMeta,
|
||||
Status: v1.PodStatus{
|
||||
ContainerStatuses: []v1.ContainerStatus{
|
||||
{
|
||||
State: v1.ContainerState{
|
||||
Terminated: &v1.ContainerStateTerminated{
|
||||
Signal: 2,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
res: map[string]interface{}{
|
||||
"Ready": "0/0",
|
||||
"Status": "Signal:2",
|
||||
"Restarts": 0,
|
||||
"Age": "<unknown>",
|
||||
},
|
||||
}
|
||||
|
||||
case11 := testCase{
|
||||
pod: v1.Pod{TypeMeta: typeMeta,
|
||||
Status: v1.PodStatus{
|
||||
ContainerStatuses: []v1.ContainerStatus{
|
||||
{
|
||||
State: v1.ContainerState{
|
||||
Terminated: &v1.ContainerStateTerminated{
|
||||
ExitCode: 127,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
res: map[string]interface{}{
|
||||
"Ready": "0/0",
|
||||
"Status": "ExitCode:127",
|
||||
"Restarts": 0,
|
||||
"Age": "<unknown>",
|
||||
},
|
||||
}
|
||||
|
||||
case12 := testCase{
|
||||
pod: v1.Pod{TypeMeta: typeMeta,
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{{
|
||||
Name: "nginx",
|
||||
}},
|
||||
},
|
||||
Status: v1.PodStatus{
|
||||
ContainerStatuses: []v1.ContainerStatus{
|
||||
{
|
||||
State: v1.ContainerState{
|
||||
Running: &v1.ContainerStateRunning{
|
||||
StartedAt: metav1.Now(),
|
||||
},
|
||||
},
|
||||
Ready: true,
|
||||
},
|
||||
},
|
||||
Phase: "Running",
|
||||
},
|
||||
},
|
||||
res: map[string]interface{}{
|
||||
"Ready": "1/1",
|
||||
"Status": "Running",
|
||||
"Restarts": 0,
|
||||
"Age": "<unknown>",
|
||||
},
|
||||
}
|
||||
|
||||
case13 := testCase{
|
||||
pod: v1.Pod{TypeMeta: typeMeta,
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{{
|
||||
Name: "nginx",
|
||||
}},
|
||||
},
|
||||
Status: v1.PodStatus{
|
||||
ContainerStatuses: []v1.ContainerStatus{
|
||||
{
|
||||
State: v1.ContainerState{
|
||||
Running: &v1.ContainerStateRunning{
|
||||
StartedAt: metav1.Now(),
|
||||
},
|
||||
},
|
||||
Ready: true,
|
||||
},
|
||||
},
|
||||
Phase: "Completed",
|
||||
Conditions: []v1.PodCondition{
|
||||
{
|
||||
Type: v1.PodReady,
|
||||
Status: v1.ConditionTrue,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
res: map[string]interface{}{
|
||||
"Ready": "1/1",
|
||||
"Status": "Running",
|
||||
"Restarts": 0,
|
||||
"Age": "<unknown>",
|
||||
},
|
||||
}
|
||||
|
||||
case14 := testCase{
|
||||
pod: v1.Pod{TypeMeta: typeMeta,
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{{
|
||||
Name: "nginx",
|
||||
}},
|
||||
},
|
||||
Status: v1.PodStatus{
|
||||
ContainerStatuses: []v1.ContainerStatus{
|
||||
{
|
||||
State: v1.ContainerState{
|
||||
Running: &v1.ContainerStateRunning{
|
||||
StartedAt: metav1.Now(),
|
||||
},
|
||||
},
|
||||
Ready: true,
|
||||
},
|
||||
},
|
||||
Phase: "Completed",
|
||||
},
|
||||
},
|
||||
res: map[string]interface{}{
|
||||
"Ready": "1/1",
|
||||
"Status": "NotReady",
|
||||
"Restarts": 0,
|
||||
"Age": "<unknown>",
|
||||
},
|
||||
}
|
||||
|
||||
case15 := testCase{
|
||||
pod: v1.Pod{TypeMeta: typeMeta,
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
DeletionTimestamp: &metav1.Time{Time: time.Now()},
|
||||
},
|
||||
},
|
||||
res: map[string]interface{}{
|
||||
"Ready": "0/0",
|
||||
"Status": "Terminating",
|
||||
"Restarts": 0,
|
||||
"Age": "<unknown>",
|
||||
},
|
||||
}
|
||||
|
||||
testCases := map[string]testCase{
|
||||
"pod1": case1,
|
||||
"pod2": case2,
|
||||
"pod3": case3,
|
||||
"pod4": case4,
|
||||
"pod5": case5,
|
||||
"pod6": case6,
|
||||
"pod7": case7,
|
||||
"pod8": case8,
|
||||
"pod9": case9,
|
||||
"pod10": case10,
|
||||
"pod11": case11,
|
||||
"pod12": case12,
|
||||
"pod13": case13,
|
||||
"pod14": case14,
|
||||
"pod15": case15,
|
||||
}
|
||||
|
||||
for des, t2 := range testCases {
|
||||
fmt.Println(des)
|
||||
u, err := runtime.DefaultUnstructuredConverter.ToUnstructured(t2.pod.DeepCopy())
|
||||
assert.NoError(t, err)
|
||||
res, err := additionalInfo(unstructured.Unstructured{Object: u})
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, t2.res, res)
|
||||
}
|
||||
}
|
||||
|
||||
func TestSvcAdditionalInfo(t *testing.T) {
|
||||
typeMeta := metav1.TypeMeta{APIVersion: "v1", Kind: "Service"}
|
||||
|
||||
type testCase struct {
|
||||
svc v1.Service
|
||||
res map[string]interface{}
|
||||
}
|
||||
|
||||
case1 := testCase{
|
||||
svc: v1.Service{TypeMeta: typeMeta, Spec: v1.ServiceSpec{
|
||||
Type: v1.ServiceTypeLoadBalancer,
|
||||
},
|
||||
Status: v1.ServiceStatus{LoadBalancer: v1.LoadBalancerStatus{Ingress: []v1.LoadBalancerIngress{{IP: "145.2.2.1"}}}}},
|
||||
res: map[string]interface{}{
|
||||
"EIP": "145.2.2.1",
|
||||
},
|
||||
}
|
||||
|
||||
case2 := testCase{
|
||||
svc: v1.Service{TypeMeta: typeMeta, Spec: v1.ServiceSpec{
|
||||
Type: v1.ServiceTypeLoadBalancer,
|
||||
},
|
||||
Status: v1.ServiceStatus{}},
|
||||
res: map[string]interface{}{
|
||||
"EIP": "pending",
|
||||
},
|
||||
}
|
||||
|
||||
testCases := map[string]testCase{
|
||||
"svc1": case1,
|
||||
"svc2": case2,
|
||||
}
|
||||
|
||||
for des, t2 := range testCases {
|
||||
fmt.Println(des)
|
||||
u, err := runtime.DefaultUnstructuredConverter.ToUnstructured(t2.svc.DeepCopy())
|
||||
assert.NoError(t, err)
|
||||
res, err := additionalInfo(unstructured.Unstructured{Object: u})
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, t2.res, res)
|
||||
}
|
||||
}
|
||||
|
||||
var _ = Describe("unit-test to e2e test", func() {
|
||||
deploy1 := v12.Deployment{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
@@ -844,3 +1381,101 @@ var _ = Describe("unit-test to e2e test", func() {
|
||||
Expect(len(res.List)).Should(Equal(2))
|
||||
})
|
||||
})
|
||||
|
||||
var _ = Describe("test merge globalRules", func() {
|
||||
cloneSetStr := `
|
||||
- parentResourceType:
|
||||
group: apps.kruise.io
|
||||
kind: CloneSet
|
||||
childrenResourceType:
|
||||
- apiVersion: v1
|
||||
kind: Pod
|
||||
- apiVersion: apps/v1
|
||||
kind: ControllerRevision
|
||||
`
|
||||
daemonSetStr := `
|
||||
- parentResourceType:
|
||||
group: apps
|
||||
kind: DaemonSet
|
||||
childrenResourceType:
|
||||
- apiVersion: v1
|
||||
kind: Pod
|
||||
- apiVersion: apps/v1
|
||||
kind: ControllerRevision
|
||||
`
|
||||
stsStr := `
|
||||
- parentResourceType:
|
||||
group: apps
|
||||
kind: StatefulSet
|
||||
childrenResourceType:
|
||||
- apiVersion: v1
|
||||
kind: Pod
|
||||
- apiVersion: apps/v1
|
||||
kind: ControllerRevision
|
||||
`
|
||||
missConfigedStr := `
|
||||
- parentResourceType:
|
||||
group: apps
|
||||
kind: StatefulSet
|
||||
childrenResourceType:
|
||||
- apiVersion: v1
|
||||
kind: Pod
|
||||
- apiVersion: apps/v1
|
||||
kind: ControllerRevision
|
||||
`
|
||||
|
||||
It("test merge rules", func() {
|
||||
Expect(k8sClient.Create(ctx, &v1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: "vela-system"}})).Should(SatisfyAny(BeNil(), util.AlreadyExistMatcher{}))
|
||||
cloneSetConfigMap := v1.ConfigMap{TypeMeta: metav1.TypeMeta{APIVersion: "v1", Kind: "ConfigMap"},
|
||||
ObjectMeta: metav1.ObjectMeta{Namespace: types3.DefaultKubeVelaNS, Name: "cloneset", Labels: map[string]string{oam.LabelResourceRules: "true"}},
|
||||
Data: map[string]string{relationshipKey: cloneSetStr},
|
||||
}
|
||||
Expect(k8sClient.Create(ctx, &cloneSetConfigMap)).Should(BeNil())
|
||||
|
||||
daemonSetConfigMap := v1.ConfigMap{TypeMeta: metav1.TypeMeta{APIVersion: "v1", Kind: "ConfigMap"},
|
||||
ObjectMeta: metav1.ObjectMeta{Namespace: types3.DefaultKubeVelaNS, Name: "daemonset", Labels: map[string]string{oam.LabelResourceRules: "true"}},
|
||||
Data: map[string]string{relationshipKey: daemonSetStr},
|
||||
}
|
||||
Expect(k8sClient.Create(ctx, &daemonSetConfigMap)).Should(BeNil())
|
||||
|
||||
stsConfigMap := v1.ConfigMap{TypeMeta: metav1.TypeMeta{APIVersion: "v1", Kind: "ConfigMap"},
|
||||
ObjectMeta: metav1.ObjectMeta{Namespace: types3.DefaultKubeVelaNS, Name: "sts", Labels: map[string]string{oam.LabelResourceRules: "true"}},
|
||||
Data: map[string]string{relationshipKey: stsStr},
|
||||
}
|
||||
Expect(k8sClient.Create(ctx, &stsConfigMap)).Should(BeNil())
|
||||
|
||||
missConfigedCm := v1.ConfigMap{TypeMeta: metav1.TypeMeta{APIVersion: "v1", Kind: "ConfigMap"},
|
||||
ObjectMeta: metav1.ObjectMeta{Namespace: types3.DefaultKubeVelaNS, Name: "miss-configed", Labels: map[string]string{oam.LabelResourceRules: "true"}},
|
||||
Data: map[string]string{relationshipKey: missConfigedStr},
|
||||
}
|
||||
Expect(k8sClient.Create(ctx, &missConfigedCm)).Should(BeNil())
|
||||
|
||||
Expect(mergeCustomRules(ctx, k8sClient)).Should(BeNil())
|
||||
childrenResources, ok := globalRule[GroupResourceType{Group: "apps.kruise.io", Kind: "CloneSet"}]
|
||||
Expect(ok).Should(BeTrue())
|
||||
Expect(childrenResources.DefaultGenListOptionFunc).Should(BeNil())
|
||||
Expect(len(childrenResources.CareResource)).Should(BeEquivalentTo(2))
|
||||
specifyFunc, ok := childrenResources.CareResource[ResourceType{APIVersion: "v1", Kind: "Pod"}]
|
||||
Expect(ok).Should(BeTrue())
|
||||
Expect(specifyFunc).Should(BeNil())
|
||||
|
||||
dsChildrenResources, ok := globalRule[GroupResourceType{Group: "apps", Kind: "DaemonSet"}]
|
||||
Expect(ok).Should(BeTrue())
|
||||
Expect(dsChildrenResources.DefaultGenListOptionFunc).Should(BeNil())
|
||||
Expect(len(dsChildrenResources.CareResource)).Should(BeEquivalentTo(2))
|
||||
dsSpecifyFunc, ok := dsChildrenResources.CareResource[ResourceType{APIVersion: "v1", Kind: "Pod"}]
|
||||
Expect(ok).Should(BeTrue())
|
||||
Expect(dsSpecifyFunc).Should(BeNil())
|
||||
crSpecifyFunc, ok := dsChildrenResources.CareResource[ResourceType{APIVersion: "apps/v1", Kind: "ControllerRevision"}]
|
||||
Expect(ok).Should(BeTrue())
|
||||
Expect(crSpecifyFunc).Should(BeNil())
|
||||
|
||||
stsChildrenResources, ok := globalRule[GroupResourceType{Group: "apps", Kind: "StatefulSet"}]
|
||||
Expect(ok).Should(BeTrue())
|
||||
Expect(stsChildrenResources.DefaultGenListOptionFunc).Should(BeNil())
|
||||
Expect(len(stsChildrenResources.CareResource)).Should(BeEquivalentTo(2))
|
||||
stsCrSpecifyFunc, ok := stsChildrenResources.CareResource[ResourceType{APIVersion: "apps/v1", Kind: "ControllerRevision"}]
|
||||
Expect(ok).Should(BeTrue())
|
||||
Expect(stsCrSpecifyFunc).Should(BeNil())
|
||||
})
|
||||
})
|
||||
|
||||
@@ -21,13 +21,13 @@ type HealthStatusCode string
|
||||
|
||||
const (
|
||||
// HealthStatusHealthy resource is healthy
|
||||
HealthStatusHealthy HealthStatusCode = "HealthStatusHealthy"
|
||||
HealthStatusHealthy HealthStatusCode = "Healthy"
|
||||
// HealthStatusUnHealthy resource is unhealthy
|
||||
HealthStatusUnHealthy HealthStatusCode = "HealthStatusUnHealthy"
|
||||
HealthStatusUnHealthy HealthStatusCode = "UnHealthy"
|
||||
// HealthStatusProgressing resource is still progressing
|
||||
HealthStatusProgressing HealthStatusCode = "HealthStatusProgressing"
|
||||
HealthStatusProgressing HealthStatusCode = "Progressing"
|
||||
// HealthStatusUnKnown health status is unknown
|
||||
HealthStatusUnKnown HealthStatusCode = "HealthStatusUnKnown"
|
||||
HealthStatusUnKnown HealthStatusCode = "UnKnown"
|
||||
)
|
||||
|
||||
// HealthStatus the resource health status
|
||||
|
||||
@@ -19,6 +19,7 @@ package types
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
@@ -105,14 +106,17 @@ type AppliedResource struct {
|
||||
|
||||
// ResourceTreeNode is the tree node of every resource
|
||||
type ResourceTreeNode struct {
|
||||
Cluster string `json:"cluster"`
|
||||
APIVersion string `json:"apiVersion,omitempty"`
|
||||
Kind string `json:"kind"`
|
||||
Namespace string `json:"namespace,omitempty"`
|
||||
Name string `json:"name,omitempty"`
|
||||
UID types.UID `json:"uid,omitempty"`
|
||||
HealthStatus HealthStatus `json:"healthStatus,omitempty"`
|
||||
LeafNodes []*ResourceTreeNode `json:"leafNodes,omitempty"`
|
||||
Cluster string `json:"cluster"`
|
||||
APIVersion string `json:"apiVersion,omitempty"`
|
||||
Kind string `json:"kind"`
|
||||
Namespace string `json:"namespace,omitempty"`
|
||||
Name string `json:"name,omitempty"`
|
||||
UID types.UID `json:"uid,omitempty"`
|
||||
HealthStatus HealthStatus `json:"healthStatus,omitempty"`
|
||||
DeletionTimestamp time.Time `json:"deletionTimestamp,omitempty"`
|
||||
CreationTimestamp time.Time `json:"creationTimestamp,omitempty"`
|
||||
LeafNodes []*ResourceTreeNode `json:"leafNodes,omitempty"`
|
||||
AdditionalInfo map[string]interface{} `json:"additionalInfo,omitempty"`
|
||||
}
|
||||
|
||||
// GroupVersionKind returns the stored group, version, and kind of an object
|
||||
|
||||
38
pkg/velaql/providers/query/utils.go
Normal file
38
pkg/velaql/providers/query/utils.go
Normal file
@@ -0,0 +1,38 @@
|
||||
/*
|
||||
Copyright 2022. The KubeVela Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package query
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
|
||||
cuejson "cuelang.org/go/pkg/encoding/json"
|
||||
|
||||
"github.com/oam-dev/kubevela/pkg/cue/model/value"
|
||||
)
|
||||
|
||||
// fillQueryResult help fill query result which contains k8s object to *value.Value
|
||||
func fillQueryResult(v *value.Value, res interface{}, paths ...string) error {
|
||||
b, err := json.Marshal(res)
|
||||
if err != nil {
|
||||
return v.FillObject(err, "err")
|
||||
}
|
||||
expr, err := cuejson.Unmarshal(b)
|
||||
if err != nil {
|
||||
return v.FillObject(err, "err")
|
||||
}
|
||||
return v.FillObject(expr, paths...)
|
||||
}
|
||||
83
pkg/velaql/providers/query/utils_test.go
Normal file
83
pkg/velaql/providers/query/utils_test.go
Normal file
@@ -0,0 +1,83 @@
|
||||
/*
|
||||
Copyright 2022. The KubeVela Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package query
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||
|
||||
"github.com/oam-dev/kubevela/pkg/cue/model/value"
|
||||
)
|
||||
|
||||
func TestFillQueryResult(t *testing.T) {
|
||||
testcases := map[string]struct {
|
||||
queryRes interface{}
|
||||
json string
|
||||
}{
|
||||
"test fill query result which contains *unstructured.Unstructured": {
|
||||
queryRes: []Resource{
|
||||
{
|
||||
Cluster: "local",
|
||||
Component: "web",
|
||||
Revision: "v1",
|
||||
Object: &unstructured.Unstructured{
|
||||
Object: map[string]interface{}{
|
||||
"apiVersion": "apps/v1",
|
||||
"kind": "Deployment",
|
||||
"spec": map[string]interface{}{
|
||||
"template": map[string]interface{}{
|
||||
"metadata": map[string]interface{}{
|
||||
"creationTimestamp": nil,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Cluster: "ap-southeast-1",
|
||||
Component: "web",
|
||||
Revision: "v2",
|
||||
Object: &unstructured.Unstructured{
|
||||
Object: map[string]interface{}{
|
||||
"apiVersion": "apps/v1",
|
||||
"kind": "Deployment",
|
||||
"metadata": map[string]interface{}{
|
||||
"creationTimestamp": "2022-05-25T12:07:02Z",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
json: `{"list":[{"cluster":"local","component":"web","revision":"v1","object":{"apiVersion":"apps/v1","kind":"Deployment","spec":{"template":{"metadata":{"creationTimestamp":null}}}}},{"cluster":"ap-southeast-1","component":"web","revision":"v2","object":{"apiVersion":"apps/v1","kind":"Deployment","metadata":{"creationTimestamp":"2022-05-25T12:07:02Z"}}}]}`,
|
||||
},
|
||||
}
|
||||
|
||||
for name, testcase := range testcases {
|
||||
t.Run(name, func(t *testing.T) {
|
||||
value, err := value.NewValue("", nil, "")
|
||||
assert.NoError(t, err)
|
||||
err = fillQueryResult(value, testcase.queryRes, "list")
|
||||
assert.NoError(t, err)
|
||||
json, err := value.CueValue().MarshalJSON()
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, testcase.json, string(json))
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -30,7 +30,6 @@ import (
|
||||
"sigs.k8s.io/controller-runtime/pkg/webhook"
|
||||
"sigs.k8s.io/controller-runtime/pkg/webhook/admission"
|
||||
|
||||
"github.com/oam-dev/kubevela/apis/core.oam.dev/common"
|
||||
"github.com/oam-dev/kubevela/apis/core.oam.dev/v1beta1"
|
||||
"github.com/oam-dev/kubevela/pkg/auth"
|
||||
"github.com/oam-dev/kubevela/pkg/features"
|
||||
@@ -52,7 +51,7 @@ func (h *MutatingHandler) Handle(ctx context.Context, req admission.Request) adm
|
||||
return admission.Patched("")
|
||||
}
|
||||
|
||||
if slices.Contains(req.UserInfo.Groups, common.Group) || slices.Contains(h.skipUsers, req.UserInfo.Username) {
|
||||
if slices.Contains(h.skipUsers, req.UserInfo.Username) {
|
||||
return admission.Patched("")
|
||||
}
|
||||
|
||||
@@ -86,11 +85,9 @@ func (h *MutatingHandler) InjectDecoder(d *admission.Decoder) error {
|
||||
func RegisterMutatingHandler(mgr manager.Manager) {
|
||||
server := mgr.GetWebhookServer()
|
||||
handler := &MutatingHandler{}
|
||||
if !utilfeature.DefaultMutableFeatureGate.Enabled(features.ControllerAutoImpersonation) {
|
||||
if userInfo := utils.GetUserInfoFromConfig(mgr.GetConfig()); userInfo != nil {
|
||||
klog.Infof("[ApplicationMutatingHandler] add skip user %s", userInfo.Username)
|
||||
handler.skipUsers = []string{userInfo.Username}
|
||||
}
|
||||
if userInfo := utils.GetUserInfoFromConfig(mgr.GetConfig()); userInfo != nil {
|
||||
klog.Infof("[ApplicationMutatingHandler] add skip user %s", userInfo.Username)
|
||||
handler.skipUsers = []string{userInfo.Username}
|
||||
}
|
||||
server.Register("/mutating-core-oam-dev-v1beta1-applications", &webhook.Admission{Handler: handler})
|
||||
}
|
||||
|
||||
@@ -29,8 +29,8 @@ import (
|
||||
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
||||
"sigs.k8s.io/controller-runtime/pkg/webhook/admission"
|
||||
|
||||
"github.com/oam-dev/kubevela/apis/core.oam.dev/common"
|
||||
"github.com/oam-dev/kubevela/apis/core.oam.dev/v1beta1"
|
||||
"github.com/oam-dev/kubevela/apis/types"
|
||||
"github.com/oam-dev/kubevela/pkg/features"
|
||||
"github.com/oam-dev/kubevela/pkg/oam"
|
||||
)
|
||||
@@ -40,7 +40,7 @@ var _ = Describe("Test Application Mutator", func() {
|
||||
var mutatingHandler *MutatingHandler
|
||||
|
||||
BeforeEach(func() {
|
||||
mutatingHandler = &MutatingHandler{}
|
||||
mutatingHandler = &MutatingHandler{skipUsers: []string{types.VelaCoreName}}
|
||||
Expect(mutatingHandler.InjectDecoder(decoder)).Should(BeNil())
|
||||
})
|
||||
|
||||
@@ -55,7 +55,7 @@ var _ = Describe("Test Application Mutator", func() {
|
||||
Expect(utilfeature.DefaultMutableFeatureGate.Set(fmt.Sprintf("%s=true", features.AuthenticateApplication))).Should(Succeed())
|
||||
resp := mutatingHandler.Handle(ctx, admission.Request{
|
||||
AdmissionRequest: admissionv1.AdmissionRequest{
|
||||
UserInfo: authv1.UserInfo{Groups: []string{common.Group}},
|
||||
UserInfo: authv1.UserInfo{Username: types.VelaCoreName},
|
||||
}})
|
||||
Expect(resp.Allowed).Should(BeTrue())
|
||||
Expect(resp.Patches).Should(BeNil())
|
||||
|
||||
@@ -28,18 +28,8 @@ import (
|
||||
wfContext "github.com/oam-dev/kubevela/pkg/workflow/context"
|
||||
)
|
||||
|
||||
const (
|
||||
// ReadyComponent is the key for depends on in workflow context
|
||||
ReadyComponent = "readyComponent__"
|
||||
)
|
||||
|
||||
// Input set data to parameter.
|
||||
func Input(ctx wfContext.Context, paramValue *value.Value, step v1beta1.WorkflowStep) error {
|
||||
for _, depend := range step.DependsOn {
|
||||
if _, err := ctx.GetVar(ReadyComponent, depend); err != nil {
|
||||
return errors.WithMessagef(err, "the depends on component [%s] is not ready", depend)
|
||||
}
|
||||
}
|
||||
for _, input := range step.Inputs {
|
||||
inputValue, err := ctx.GetVar(strings.Split(input.From, ".")...)
|
||||
if err != nil {
|
||||
@@ -66,13 +56,6 @@ func Output(ctx wfContext.Context, taskValue *value.Value, step v1beta1.Workflow
|
||||
if err := json.Unmarshal(js, &o); err != nil {
|
||||
return err
|
||||
}
|
||||
ready, err := value.NewValue(`true`, nil, "")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if err := ctx.SetVar(ready, ReadyComponent, o.Name); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
for _, output := range step.Outputs {
|
||||
|
||||
@@ -43,10 +43,6 @@ func TestInput(t *testing.T) {
|
||||
r.NoError(err)
|
||||
err = wfCtx.SetVar(score, "foo")
|
||||
r.NoError(err)
|
||||
ready, err := value.NewValue(`true`, nil, "")
|
||||
r.NoError(err)
|
||||
err = wfCtx.SetVar(ready, ReadyComponent, "mystep")
|
||||
r.NoError(err)
|
||||
err = Input(wfCtx, paramValue, v1beta1.WorkflowStep{
|
||||
DependsOn: []string{"mystep"},
|
||||
Inputs: common.StepInputs{{
|
||||
@@ -85,12 +81,6 @@ output: score: 99
|
||||
s, err := result.String()
|
||||
r.NoError(err)
|
||||
r.Equal(s, `99
|
||||
`)
|
||||
ready, err := wfCtx.GetVar(ReadyComponent, "mystep")
|
||||
r.NoError(err)
|
||||
s, err = ready.String()
|
||||
r.NoError(err)
|
||||
r.Equal(s, `true
|
||||
`)
|
||||
}
|
||||
|
||||
|
||||
@@ -26,6 +26,7 @@ import (
|
||||
"github.com/oam-dev/kubevela/apis/core.oam.dev/common"
|
||||
"github.com/oam-dev/kubevela/apis/core.oam.dev/v1beta1"
|
||||
"github.com/oam-dev/kubevela/pkg/auth"
|
||||
"github.com/oam-dev/kubevela/pkg/cue"
|
||||
"github.com/oam-dev/kubevela/pkg/cue/model"
|
||||
"github.com/oam-dev/kubevela/pkg/cue/model/value"
|
||||
"github.com/oam-dev/kubevela/pkg/multicluster"
|
||||
@@ -92,7 +93,7 @@ func (h *provider) Apply(ctx wfContext.Context, v *value.Value, act types.Action
|
||||
if err := h.apply(deployCtx, cluster, common.WorkflowResourceCreator, workload); err != nil {
|
||||
return err
|
||||
}
|
||||
return v.FillObject(workload.Object, "value")
|
||||
return cue.FillUnstructuredObject(v, workload, "value")
|
||||
}
|
||||
|
||||
// ApplyInParallel create or update CRs in parallel.
|
||||
@@ -136,7 +137,6 @@ func (h *provider) Read(ctx wfContext.Context, v *value.Value, act types.Action)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
obj := new(unstructured.Unstructured)
|
||||
if err := val.UnmarshalTo(obj); err != nil {
|
||||
return err
|
||||
@@ -154,7 +154,7 @@ func (h *provider) Read(ctx wfContext.Context, v *value.Value, act types.Action)
|
||||
if err := h.cli.Get(readCtx, key, obj); err != nil {
|
||||
return v.FillObject(err.Error(), "err")
|
||||
}
|
||||
return v.FillObject(obj.Object, "value")
|
||||
return cue.FillUnstructuredObject(v, obj, "value")
|
||||
}
|
||||
|
||||
// List lists CRs from cluster.
|
||||
@@ -197,7 +197,7 @@ func (h *provider) List(ctx wfContext.Context, v *value.Value, act types.Action)
|
||||
if err := h.cli.List(readCtx, list, listOpts...); err != nil {
|
||||
return v.FillObject(err.Error(), "err")
|
||||
}
|
||||
return v.FillObject(list, "list")
|
||||
return cue.FillUnstructuredObject(v, list, "list")
|
||||
}
|
||||
|
||||
// Delete deletes CR from cluster.
|
||||
|
||||
@@ -208,7 +208,7 @@ func applyComponents(apply oamProvider.ComponentApply, healthCheck oamProvider.C
|
||||
var reasons []string
|
||||
for i, res := range results {
|
||||
if res.err != nil {
|
||||
errs = append(errs, res.err)
|
||||
errs = append(errs, fmt.Errorf("error encountered in cluster %s: %w", todoTasks[i].placement.Cluster, res.err))
|
||||
}
|
||||
if !res.healthy {
|
||||
allHealthy = false
|
||||
|
||||
@@ -29,4 +29,6 @@ type DeployWorkflowStepSpec struct {
|
||||
Policies []string `json:"policies,omitempty"`
|
||||
// Parallelism allows setting parallelism for the component deploy process
|
||||
Parallelism *int `json:"parallelism,omitempty"`
|
||||
// IgnoreTerraformComponent default is true, true means this step will apply the components without the terraform workload.
|
||||
IgnoreTerraformComponent *bool `json:"ignoreTerraformComponent,omitempty"`
|
||||
}
|
||||
|
||||
@@ -24,6 +24,7 @@ import (
|
||||
|
||||
"cuelang.org/go/cue"
|
||||
"github.com/pkg/errors"
|
||||
"k8s.io/apiserver/pkg/util/feature"
|
||||
|
||||
"github.com/oam-dev/kubevela/apis/core.oam.dev/common"
|
||||
"github.com/oam-dev/kubevela/apis/core.oam.dev/v1beta1"
|
||||
@@ -32,6 +33,7 @@ import (
|
||||
"github.com/oam-dev/kubevela/pkg/cue/model/value"
|
||||
"github.com/oam-dev/kubevela/pkg/cue/packages"
|
||||
"github.com/oam-dev/kubevela/pkg/cue/process"
|
||||
"github.com/oam-dev/kubevela/pkg/features"
|
||||
monitorContext "github.com/oam-dev/kubevela/pkg/monitor/context"
|
||||
wfContext "github.com/oam-dev/kubevela/pkg/workflow/context"
|
||||
"github.com/oam-dev/kubevela/pkg/workflow/hooks"
|
||||
@@ -47,6 +49,8 @@ var (
|
||||
const (
|
||||
// StatusReasonWait is the reason of the workflow progress condition which is Wait.
|
||||
StatusReasonWait = "Wait"
|
||||
// StatusReasonSkip is the reason of the workflow progress condition which is Skip.
|
||||
StatusReasonSkip = "Skip"
|
||||
// StatusReasonRendering is the reason of the workflow progress condition which is Rendering.
|
||||
StatusReasonRendering = "Rendering"
|
||||
// StatusReasonExecute is the reason of the workflow progress condition which is Execute.
|
||||
@@ -59,6 +63,8 @@ const (
|
||||
StatusReasonParameter = "ProcessParameter"
|
||||
// StatusReasonOutput is the reason of the workflow progress condition which is Output.
|
||||
StatusReasonOutput = "Output"
|
||||
// StatusReasonFailedAfterRetries is the reason of the workflow progress condition which is FailedAfterRetries.
|
||||
StatusReasonFailedAfterRetries = "FailedAfterRetries"
|
||||
)
|
||||
|
||||
// LoadTaskTemplate gets the workflowStep definition from cluster and resolve it.
|
||||
@@ -85,7 +91,8 @@ func (t *TaskLoader) GetTaskGenerator(ctx context.Context, name string) (wfTypes
|
||||
type taskRunner struct {
|
||||
name string
|
||||
run func(ctx wfContext.Context, options *wfTypes.TaskRunOptions) (common.StepStatus, *wfTypes.Operation, error)
|
||||
checkPending func(ctx wfContext.Context) bool
|
||||
checkPending func(ctx wfContext.Context, stepStatus map[string]common.StepStatus) bool
|
||||
skip func(dependsOnPhase common.WorkflowStepPhase, stepStatus map[string]common.StepStatus) (common.StepStatus, bool)
|
||||
}
|
||||
|
||||
// Name return step name.
|
||||
@@ -99,10 +106,15 @@ func (tr *taskRunner) Run(ctx wfContext.Context, options *wfTypes.TaskRunOptions
|
||||
}
|
||||
|
||||
// Pending check task should be executed or not.
|
||||
func (tr *taskRunner) Pending(ctx wfContext.Context) bool {
|
||||
return tr.checkPending(ctx)
|
||||
func (tr *taskRunner) Pending(ctx wfContext.Context, stepStatus map[string]common.StepStatus) bool {
|
||||
return tr.checkPending(ctx, stepStatus)
|
||||
}
|
||||
|
||||
func (tr *taskRunner) Skip(dependsOnPhase common.WorkflowStepPhase, stepStatus map[string]common.StepStatus) (common.StepStatus, bool) {
|
||||
return tr.skip(dependsOnPhase, stepStatus)
|
||||
}
|
||||
|
||||
// nolint:gocyclo
|
||||
func (t *TaskLoader) makeTaskGenerator(templ string) (wfTypes.TaskGenerator, error) {
|
||||
return func(wfStep v1beta1.WorkflowStep, genOpt *wfTypes.GeneratorOptions) (wfTypes.TaskRunner, error) {
|
||||
|
||||
@@ -141,18 +153,21 @@ func (t *TaskLoader) makeTaskGenerator(templ string) (wfTypes.TaskGenerator, err
|
||||
|
||||
tRunner := new(taskRunner)
|
||||
tRunner.name = wfStep.Name
|
||||
tRunner.checkPending = func(ctx wfContext.Context) bool {
|
||||
for _, depend := range wfStep.DependsOn {
|
||||
if _, err := ctx.GetVar(hooks.ReadyComponent, depend); err != nil {
|
||||
return true
|
||||
}
|
||||
tRunner.checkPending = func(ctx wfContext.Context, stepStatus map[string]common.StepStatus) bool {
|
||||
return CheckPending(ctx, wfStep, stepStatus)
|
||||
}
|
||||
tRunner.skip = func(dependsOnPhase common.WorkflowStepPhase, stepStatus map[string]common.StepStatus) (common.StepStatus, bool) {
|
||||
if feature.DefaultMutableFeatureGate.Enabled(features.EnableSuspendOnFailure) {
|
||||
return exec.status(), false
|
||||
}
|
||||
for _, input := range wfStep.Inputs {
|
||||
if _, err := ctx.GetVar(strings.Split(input.From, ".")...); err != nil {
|
||||
return true
|
||||
}
|
||||
skip := SkipTaskRunner(&SkipOptions{
|
||||
If: wfStep.If,
|
||||
DependsOnPhase: dependsOnPhase,
|
||||
})
|
||||
if skip {
|
||||
exec.Skip("")
|
||||
}
|
||||
return false
|
||||
return exec.status(), skip
|
||||
}
|
||||
tRunner.run = func(ctx wfContext.Context, options *wfTypes.TaskRunOptions) (common.StepStatus, *wfTypes.Operation, error) {
|
||||
if options.GetTracer == nil {
|
||||
@@ -261,6 +276,7 @@ type executor struct {
|
||||
terminated bool
|
||||
failedAfterRetries bool
|
||||
wait bool
|
||||
skip bool
|
||||
|
||||
tracer monitorContext.Context
|
||||
}
|
||||
@@ -289,6 +305,13 @@ func (exec *executor) Wait(message string) {
|
||||
exec.wfStatus.Message = message
|
||||
}
|
||||
|
||||
func (exec *executor) Skip(message string) {
|
||||
exec.skip = true
|
||||
exec.wfStatus.Phase = common.WorkflowStepPhaseSkipped
|
||||
exec.wfStatus.Reason = StatusReasonSkip
|
||||
exec.wfStatus.Message = message
|
||||
}
|
||||
|
||||
func (exec *executor) err(ctx wfContext.Context, err error, reason string) {
|
||||
exec.wait = true
|
||||
exec.wfStatus.Phase = common.WorkflowStepPhaseFailed
|
||||
@@ -302,6 +325,7 @@ func (exec *executor) checkErrorTimes(ctx wfContext.Context) {
|
||||
if times >= MaxWorkflowStepErrorRetryTimes {
|
||||
exec.wait = false
|
||||
exec.failedAfterRetries = true
|
||||
exec.wfStatus.Reason = StatusReasonFailedAfterRetries
|
||||
}
|
||||
}
|
||||
|
||||
@@ -441,3 +465,58 @@ func NewTaskLoader(lt LoadTaskTemplate, pkgDiscover *packages.PackageDiscover, h
|
||||
logLevel: logLevel,
|
||||
}
|
||||
}
|
||||
|
||||
// SkipOptions is the options of skip task runner
|
||||
type SkipOptions struct {
|
||||
If string
|
||||
DependsOnPhase common.WorkflowStepPhase
|
||||
}
|
||||
|
||||
// SkipTaskRunner will decide whether to skip task runner.
|
||||
func SkipTaskRunner(options *SkipOptions) bool {
|
||||
switch options.If {
|
||||
case "always":
|
||||
return false
|
||||
case "":
|
||||
return options.DependsOnPhase != common.WorkflowStepPhaseSucceeded
|
||||
default:
|
||||
// TODO:(fog) support more if cases
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
// CheckPending checks whether to pending task run
|
||||
func CheckPending(ctx wfContext.Context, step v1beta1.WorkflowStep, stepStatus map[string]common.StepStatus) bool {
|
||||
for _, depend := range step.DependsOn {
|
||||
if status, ok := stepStatus[depend]; ok {
|
||||
if !IsStepFinish(status.Phase, status.Reason) {
|
||||
return true
|
||||
}
|
||||
} else {
|
||||
return true
|
||||
}
|
||||
}
|
||||
for _, input := range step.Inputs {
|
||||
if _, err := ctx.GetVar(strings.Split(input.From, ".")...); err != nil {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// IsStepFinish will decide whether step is finish.
|
||||
func IsStepFinish(phase common.WorkflowStepPhase, reason string) bool {
|
||||
if feature.DefaultMutableFeatureGate.Enabled(features.EnableSuspendOnFailure) {
|
||||
return phase == common.WorkflowStepPhaseSucceeded
|
||||
}
|
||||
switch phase {
|
||||
case common.WorkflowStepPhaseFailed:
|
||||
return reason == StatusReasonTerminate || reason == StatusReasonFailedAfterRetries
|
||||
case common.WorkflowStepPhaseSkipped:
|
||||
return true
|
||||
case common.WorkflowStepPhaseSucceeded:
|
||||
return true
|
||||
default:
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
@@ -36,7 +36,6 @@ import (
|
||||
"github.com/oam-dev/kubevela/pkg/cue/model/value"
|
||||
"github.com/oam-dev/kubevela/pkg/cue/process"
|
||||
wfContext "github.com/oam-dev/kubevela/pkg/workflow/context"
|
||||
"github.com/oam-dev/kubevela/pkg/workflow/hooks"
|
||||
"github.com/oam-dev/kubevela/pkg/workflow/providers"
|
||||
"github.com/oam-dev/kubevela/pkg/workflow/types"
|
||||
)
|
||||
@@ -270,6 +269,7 @@ close({
|
||||
r.Equal(operation.Waiting, false)
|
||||
r.Equal(operation.FailedAfterRetries, true)
|
||||
r.Equal(status.Phase, common.WorkflowStepPhaseFailed)
|
||||
r.Equal(status.Reason, StatusReasonFailedAfterRetries)
|
||||
default:
|
||||
r.Equal(operation.Waiting, true)
|
||||
r.Equal(status.Phase, common.WorkflowStepPhaseFailed)
|
||||
@@ -438,14 +438,14 @@ func TestPendingInputCheck(t *testing.T) {
|
||||
r.NoError(err)
|
||||
run, err := gen(step, &types.GeneratorOptions{})
|
||||
r.NoError(err)
|
||||
r.Equal(run.Pending(wfCtx), true)
|
||||
r.Equal(run.Pending(wfCtx, nil), true)
|
||||
score, err := value.NewValue(`
|
||||
100
|
||||
`, nil, "")
|
||||
r.NoError(err)
|
||||
err = wfCtx.SetVar(score, "score")
|
||||
r.NoError(err)
|
||||
r.Equal(run.Pending(wfCtx), false)
|
||||
r.Equal(run.Pending(wfCtx, nil), false)
|
||||
}
|
||||
|
||||
func TestPendingDependsOnCheck(t *testing.T) {
|
||||
@@ -473,12 +473,49 @@ func TestPendingDependsOnCheck(t *testing.T) {
|
||||
r.NoError(err)
|
||||
run, err := gen(step, &types.GeneratorOptions{})
|
||||
r.NoError(err)
|
||||
r.Equal(run.Pending(wfCtx), true)
|
||||
ready, err := value.NewValue("true", nil, "")
|
||||
r.Equal(run.Pending(wfCtx, nil), true)
|
||||
ss := map[string]common.StepStatus{
|
||||
"depend": {
|
||||
Phase: common.WorkflowStepPhaseSucceeded,
|
||||
},
|
||||
}
|
||||
r.Equal(run.Pending(wfCtx, ss), false)
|
||||
}
|
||||
|
||||
func TestSkip(t *testing.T) {
|
||||
r := require.New(t)
|
||||
discover := providers.NewProviders()
|
||||
discover.Register("test", map[string]providers.Handler{
|
||||
"ok": func(ctx wfContext.Context, v *value.Value, act types.Action) error {
|
||||
return nil
|
||||
},
|
||||
})
|
||||
step := v1beta1.WorkflowStep{
|
||||
Name: "skip",
|
||||
Type: "ok",
|
||||
}
|
||||
pCtx := process.NewContext(process.ContextData{
|
||||
AppName: "myapp",
|
||||
CompName: "mycomp",
|
||||
Namespace: "default",
|
||||
AppRevisionName: "myapp-v1",
|
||||
})
|
||||
tasksLoader := NewTaskLoader(mockLoadTemplate, nil, discover, 0, pCtx)
|
||||
gen, err := tasksLoader.GetTaskGenerator(context.Background(), step.Type)
|
||||
r.NoError(err)
|
||||
err = wfCtx.SetVar(ready, hooks.ReadyComponent, "depend")
|
||||
runner, err := gen(step, &types.GeneratorOptions{})
|
||||
r.NoError(err)
|
||||
r.Equal(run.Pending(wfCtx), false)
|
||||
status, skip := runner.Skip(common.WorkflowStepPhaseFailed, nil)
|
||||
r.Equal(skip, true)
|
||||
r.Equal(status.Phase, common.WorkflowStepPhaseSkipped)
|
||||
r.Equal(status.Reason, StatusReasonSkip)
|
||||
runner2, err := gen(v1beta1.WorkflowStep{
|
||||
If: "always",
|
||||
Name: "test",
|
||||
}, &types.GeneratorOptions{ID: "124"})
|
||||
r.NoError(err)
|
||||
_, skip = runner2.Skip(common.WorkflowStepPhaseFailed, nil)
|
||||
r.Equal(skip, false)
|
||||
}
|
||||
|
||||
func newWorkflowContextForTest(t *testing.T) wfContext.Context {
|
||||
|
||||
@@ -22,6 +22,7 @@ import (
|
||||
builtintime "time"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"k8s.io/apiserver/pkg/util/feature"
|
||||
"k8s.io/client-go/rest"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
|
||||
@@ -29,6 +30,7 @@ import (
|
||||
"github.com/oam-dev/kubevela/apis/core.oam.dev/v1beta1"
|
||||
"github.com/oam-dev/kubevela/pkg/cue/packages"
|
||||
"github.com/oam-dev/kubevela/pkg/cue/process"
|
||||
"github.com/oam-dev/kubevela/pkg/features"
|
||||
monitorContext "github.com/oam-dev/kubevela/pkg/monitor/context"
|
||||
"github.com/oam-dev/kubevela/pkg/oam/discoverymapper"
|
||||
"github.com/oam-dev/kubevela/pkg/velaql/providers/query"
|
||||
@@ -73,7 +75,7 @@ func (td *taskDiscover) GetTaskGenerator(ctx context.Context, name string) (type
|
||||
func suspend(step v1beta1.WorkflowStep, opt *types.GeneratorOptions) (types.TaskRunner, error) {
|
||||
tr := &suspendTaskRunner{
|
||||
id: opt.ID,
|
||||
name: step.Name,
|
||||
step: step,
|
||||
wait: false,
|
||||
}
|
||||
|
||||
@@ -92,6 +94,7 @@ func StepGroup(step v1beta1.WorkflowStep, opt *types.GeneratorOptions) (types.Ta
|
||||
return &stepGroupTaskRunner{
|
||||
id: opt.ID,
|
||||
name: step.Name,
|
||||
step: step,
|
||||
subTaskRunners: opt.SubTaskRunners,
|
||||
}, nil
|
||||
}
|
||||
@@ -119,40 +122,64 @@ func NewTaskDiscoverFromRevision(ctx monitorContext.Context, providerHandlers pr
|
||||
}
|
||||
|
||||
type suspendTaskRunner struct {
|
||||
id string
|
||||
name string
|
||||
wait bool
|
||||
id string
|
||||
step v1beta1.WorkflowStep
|
||||
wait bool
|
||||
phase common.WorkflowStepPhase
|
||||
}
|
||||
|
||||
// Name return suspend step name.
|
||||
func (tr *suspendTaskRunner) Name() string {
|
||||
return tr.name
|
||||
return tr.step.Name
|
||||
}
|
||||
|
||||
// Run make workflow suspend.
|
||||
func (tr *suspendTaskRunner) Run(ctx wfContext.Context, options *types.TaskRunOptions) (common.StepStatus, *types.Operation, error) {
|
||||
if tr.wait {
|
||||
tr.phase = common.WorkflowStepPhaseRunning
|
||||
} else {
|
||||
tr.phase = common.WorkflowStepPhaseSucceeded
|
||||
}
|
||||
stepStatus := common.StepStatus{
|
||||
ID: tr.id,
|
||||
Name: tr.name,
|
||||
Name: tr.step.Name,
|
||||
Type: types.WorkflowStepTypeSuspend,
|
||||
Phase: common.WorkflowStepPhaseSucceeded,
|
||||
}
|
||||
|
||||
if tr.wait {
|
||||
stepStatus.Phase = common.WorkflowStepPhaseRunning
|
||||
Phase: tr.phase,
|
||||
}
|
||||
|
||||
return stepStatus, &types.Operation{Suspend: true}, nil
|
||||
}
|
||||
|
||||
// Pending check task should be executed or not.
|
||||
func (tr *suspendTaskRunner) Pending(ctx wfContext.Context) bool {
|
||||
return false
|
||||
func (tr *suspendTaskRunner) Pending(ctx wfContext.Context, stepStatus map[string]common.StepStatus) bool {
|
||||
return custom.CheckPending(ctx, tr.step, stepStatus)
|
||||
}
|
||||
|
||||
func (tr *suspendTaskRunner) Skip(dependsOnPhase common.WorkflowStepPhase, stepStatus map[string]common.StepStatus) (common.StepStatus, bool) {
|
||||
status := common.StepStatus{
|
||||
ID: tr.id,
|
||||
Name: tr.step.Name,
|
||||
Type: types.WorkflowStepTypeSuspend,
|
||||
Phase: tr.phase,
|
||||
}
|
||||
if feature.DefaultMutableFeatureGate.Enabled(features.EnableSuspendOnFailure) {
|
||||
return status, false
|
||||
}
|
||||
skip := custom.SkipTaskRunner(&custom.SkipOptions{
|
||||
If: tr.step.If,
|
||||
DependsOnPhase: dependsOnPhase,
|
||||
})
|
||||
if skip {
|
||||
status.Phase = common.WorkflowStepPhaseSkipped
|
||||
status.Reason = custom.StatusReasonSkip
|
||||
}
|
||||
return status, skip
|
||||
}
|
||||
|
||||
type stepGroupTaskRunner struct {
|
||||
id string
|
||||
name string
|
||||
step v1beta1.WorkflowStep
|
||||
subTaskRunners []types.TaskRunner
|
||||
}
|
||||
|
||||
@@ -161,12 +188,43 @@ func (tr *stepGroupTaskRunner) Name() string {
|
||||
return tr.name
|
||||
}
|
||||
|
||||
// Pending check task should be executed or not.
|
||||
func (tr *stepGroupTaskRunner) Pending(ctx wfContext.Context, stepStatus map[string]common.StepStatus) bool {
|
||||
return custom.CheckPending(ctx, tr.step, stepStatus)
|
||||
}
|
||||
|
||||
func (tr *stepGroupTaskRunner) Skip(dependsOnPhase common.WorkflowStepPhase, stepStatus map[string]common.StepStatus) (common.StepStatus, bool) {
|
||||
status := common.StepStatus{
|
||||
ID: tr.id,
|
||||
Name: tr.step.Name,
|
||||
Type: types.WorkflowStepTypeStepGroup,
|
||||
}
|
||||
if feature.DefaultMutableFeatureGate.Enabled(features.EnableSuspendOnFailure) {
|
||||
return status, false
|
||||
}
|
||||
skip := custom.SkipTaskRunner(&custom.SkipOptions{
|
||||
If: tr.step.If,
|
||||
DependsOnPhase: dependsOnPhase,
|
||||
})
|
||||
if skip {
|
||||
status.Phase = common.WorkflowStepPhaseSkipped
|
||||
status.Reason = custom.StatusReasonSkip
|
||||
stepStatus[tr.step.Name] = common.StepStatus{
|
||||
ID: tr.id,
|
||||
Phase: status.Phase,
|
||||
}
|
||||
// return false here to set all the sub steps to skipped
|
||||
return status, false
|
||||
}
|
||||
return status, skip
|
||||
}
|
||||
|
||||
// Run make workflow step group.
|
||||
func (tr *stepGroupTaskRunner) Run(ctx wfContext.Context, options *types.TaskRunOptions) (common.StepStatus, *types.Operation, error) {
|
||||
e := options.Engine
|
||||
if len(tr.subTaskRunners) > 0 {
|
||||
// set sub steps to dag mode for now
|
||||
e.SetParentRunner(tr.name)
|
||||
// set sub steps to dag mode for now
|
||||
if err := e.Run(tr.subTaskRunners, true); err != nil {
|
||||
return common.StepStatus{
|
||||
ID: tr.id,
|
||||
@@ -178,34 +236,39 @@ func (tr *stepGroupTaskRunner) Run(ctx wfContext.Context, options *types.TaskRun
|
||||
e.SetParentRunner("")
|
||||
}
|
||||
stepStatus := e.GetStepStatus(tr.name)
|
||||
var phase common.WorkflowStepPhase
|
||||
subStepPhases := make(map[common.WorkflowStepPhase]int)
|
||||
status := common.StepStatus{
|
||||
ID: tr.id,
|
||||
Name: tr.name,
|
||||
Type: types.WorkflowStepTypeStepGroup,
|
||||
}
|
||||
|
||||
subStepCounts := make(map[string]int)
|
||||
for _, subStepsStatus := range stepStatus.SubStepsStatus {
|
||||
subStepPhases[subStepsStatus.Phase]++
|
||||
subStepCounts[string(subStepsStatus.Phase)]++
|
||||
subStepCounts[subStepsStatus.Reason]++
|
||||
}
|
||||
switch {
|
||||
case len(stepStatus.SubStepsStatus) < len(tr.subTaskRunners):
|
||||
phase = common.WorkflowStepPhaseRunning
|
||||
case subStepPhases[common.WorkflowStepPhaseRunning] > 0:
|
||||
phase = common.WorkflowStepPhaseRunning
|
||||
case subStepPhases[common.WorkflowStepPhaseStopped] > 0:
|
||||
phase = common.WorkflowStepPhaseStopped
|
||||
case subStepPhases[common.WorkflowStepPhaseFailed] > 0:
|
||||
phase = common.WorkflowStepPhaseFailed
|
||||
status.Phase = common.WorkflowStepPhaseRunning
|
||||
case subStepCounts[string(common.WorkflowStepPhaseRunning)] > 0:
|
||||
status.Phase = common.WorkflowStepPhaseRunning
|
||||
case subStepCounts[string(common.WorkflowStepPhaseStopped)] > 0:
|
||||
status.Phase = common.WorkflowStepPhaseStopped
|
||||
case subStepCounts[string(common.WorkflowStepPhaseFailed)] > 0:
|
||||
status.Phase = common.WorkflowStepPhaseFailed
|
||||
switch {
|
||||
case subStepCounts[custom.StatusReasonFailedAfterRetries] > 0:
|
||||
status.Reason = custom.StatusReasonFailedAfterRetries
|
||||
case subStepCounts[custom.StatusReasonTerminate] > 0:
|
||||
status.Reason = custom.StatusReasonTerminate
|
||||
}
|
||||
case subStepCounts[string(common.WorkflowStepPhaseSkipped)] > 0:
|
||||
status.Phase = common.WorkflowStepPhaseSkipped
|
||||
status.Reason = custom.StatusReasonSkip
|
||||
default:
|
||||
phase = common.WorkflowStepPhaseSucceeded
|
||||
status.Phase = common.WorkflowStepPhaseSucceeded
|
||||
}
|
||||
return common.StepStatus{
|
||||
ID: tr.id,
|
||||
Name: tr.name,
|
||||
Type: types.WorkflowStepTypeStepGroup,
|
||||
Phase: phase,
|
||||
}, e.GetOperation(), nil
|
||||
}
|
||||
|
||||
// Pending check task should be executed or not.
|
||||
func (tr *stepGroupTaskRunner) Pending(ctx wfContext.Context) bool {
|
||||
return false
|
||||
return status, e.GetOperation(), nil
|
||||
}
|
||||
|
||||
// NewViewTaskDiscover will create a client for load task generator.
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user