Compare commits

..

15 Commits

Author SHA1 Message Date
Tianxin Dong
bce3e158ca Backport #2541 to release 1.1 (#2709) 2021-11-15 20:34:14 +08:00
github-actions[bot]
8a9df2dd39 Fix: stop installing golangci-lint each time (#2713)
When golangci-lint doesn't locate in $PATH, it will be installed in
$GOBIN every single time.

(cherry picked from commit bff846b7fe)

Co-authored-by: zzxwill <zzxwill@gmail.com>
2021-11-15 16:39:54 +08:00
Jianbo Sun
9a0ea76210 Revert "Backport #2629 to release-1.1: patch status retry while conflict happens" (#2704)
* Revert "Fix: patch status retry while conflict happens (#2629) (#2638)"

This reverts commit 4e9a7fc82e.

* Style: make reviewable

Co-authored-by: Yin Da <yd219913@alibaba-inc.com>
2021-11-14 17:07:30 +08:00
Somefive
156f165b5c Fix: cluster url (#2675) (#2705) 2021-11-14 13:54:07 +08:00
Jian.Li
fce05bffc5 Backport #2683 : Feat: output log with structured tag & add step duration metrics (#2696)
* debug task

(cherry picked from commit 93378eda67)

* metrics

(cherry picked from commit 7366804014)

* trace context

(cherry picked from commit f32105f23b)

* add step_duration metrics

(cherry picked from commit f9fc065e71)

* add readme docs

(cherry picked from commit 69146b468d)
2021-11-12 23:55:00 +08:00
Zheng Xi Zhou
6c0b943dfc Fix: application status.services doesn't include Terraform typed components (#2692)
Function aggregateHealthStatus() in pkg/controller/core.oam.dev/v1alpha2/
application/apply.go which is used to retrieve components status, was
abandoned. All unit-tests of it was abandoned too. Fixed it and restore all the unit
tests.
2021-11-12 22:33:09 +08:00
github-actions[bot]
d83fa47741 Fix: fix delete a component from application not delete workload (#2690)
lint

Fix: error test

Fix: fix e2e rollout

Fix comment

(cherry picked from commit 7fb0c2ad13)

Co-authored-by: wangyike <wangyike_wyk@163.com>
2021-11-12 11:47:24 +08:00
github-actions[bot]
e8fe203265 [Backport release-1.1] Fix: minor fix for vela cli printing (#2657)
* Fix: minor fix for vela cli printing

(cherry picked from commit 45177b74e9)

* add dockerfile go mod cache

(cherry picked from commit db5fd86a53)

Co-authored-by: Jianbo Sun <jianbo.sjb@alibaba-inc.com>
2021-11-08 10:23:03 +08:00
github-actions[bot]
a075830ae1 Feat: add vela exec for multi cluster (#2654)
fix

support vela exec

(cherry picked from commit 6bdfbe2a4f)

Co-authored-by: 天元 <jianbo.sjb@alibaba-inc.com>
2021-11-07 09:08:36 +08:00
github-actions[bot]
8ab4634701 [Backport release-1.1] Fix: filter loggable workload in vela logs (#2653)
* Fix: filter loggable workload in vela logs

(cherry picked from commit d7168f2f13)

* reviewable

(cherry picked from commit 76547e2c00)

Co-authored-by: qiaozp <chivalry.pp@gmail.com>
2021-11-06 21:27:10 +08:00
Jianbo Sun
9838eff9c7 Fix: upgrade stern lib to avoid panic for vela logs (#2652)
(cherry picked from commit 026027eff9)
2021-11-06 17:37:29 +08:00
Jianbo Sun
476de5e4f1 Backport: #2653 Feat: add vela prob to test cluster (#2649) 2021-11-06 17:24:50 +08:00
github-actions[bot]
c89d1e1713 [Backport release-1.1] Fix: allow definition schema cm can be same name in different definition type (#2639)
* Fix: fix definition schema cm name

(cherry picked from commit ef899413a3)

* fix ut

(cherry picked from commit 374aa64e32)

* fix ut

(cherry picked from commit a4357f6acf)

* fix show

(cherry picked from commit b3033f9f80)

* add switch default case

(cherry picked from commit dbe8167959)

Co-authored-by: FogDong <dongtianxin.tx@alibaba-inc.com>
2021-11-06 10:50:00 +08:00
Somefive
4e9a7fc82e Fix: patch status retry while conflict happens (#2629) (#2638) 2021-11-05 15:55:09 +08:00
github-actions[bot]
3b62e44ef8 Feat: add reconcile timeout configuration for vela-core (#2633)
(cherry picked from commit 923ec1844f)

Co-authored-by: Yin Da <yd219913@alibaba-inc.com>
2021-11-04 20:49:23 +08:00
47 changed files with 1381 additions and 418 deletions

10
.github/pr-title-checker-config.json vendored Normal file
View File

@@ -0,0 +1,10 @@
{
"LABEL": {
"name": "title-needs-formatting",
"color": "EEEEEE"
},
"CHECKS": {
"prefixes": ["Fix: ", "Feat: ", "Docs: ", "Test: ", "Chore: ", "CI: ", "Perf: ", "Refactor: ", "Revert: ", "Style: ", "Test: ", "Backport: ", "Backport",
"Fix(", "Feat(", "Docs(", "Test(", "Chore(", "CI(", "Perf(", "Refactor(", "Revert(", "Style(", "Test(", "Backport("]
}
}

View File

@@ -1,13 +1,19 @@
name: Lint Commit Messages
on: [push, pull_request]
name: PR Title Checker
on:
pull_request:
types:
- opened
- edited
- synchronize
- labeled
- unlabeled
jobs:
commitlint:
check:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v2
- uses: thehanimo/pr-title-checker@v1.3.1
with:
fetch-depth: 0
- uses: wagoid/commitlint-github-action@v4
with:
helpURL: https://github.com/oam-dev/kubevela/blob/master/contribute/create-pull-request.md#commit-message-format
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
pass_on_octokit_error: true
configuration_path: ".github/pr-title-checker-config.json"

View File

@@ -83,6 +83,9 @@ jobs:
- name: Run api e2e tests
run: make e2e-api-test
- name: Run addons e2e tests
run: make e2e-addon-test
- name: Run e2e tests
run: make e2e-test

View File

@@ -5,6 +5,10 @@ WORKDIR /workspace
# Copy the Go Modules manifests
COPY go.mod go.mod
COPY go.sum go.sum
# It's a proxy for CN developer, please unblock it if you have network issue
# RUN go env -w GOPROXY=https://goproxy.cn,direct
# cache deps before building and copying source so that we don't need to re-download as much
# and so that source changes don't invalidate our downloaded layer
RUN go mod download

View File

@@ -174,6 +174,11 @@ e2e-test:
ginkgo -v --skip="rollout related e2e-test." ./test/e2e-test
@$(OK) tests pass
e2e-addon-test:
cp bin/vela /tmp/
ginkgo -v ./test/e2e-addon-test
@$(OK) tests pass
e2e-rollout-test:
ginkgo -v --focus="rollout related e2e-test." ./test/e2e-test
@$(OK) tests pass
@@ -270,16 +275,20 @@ HOSTARCH := amd64
endif
golangci:
ifeq (, $(shell which golangci-lint))
ifneq ($(shell which golangci-lint),)
@$(OK) golangci-lint is already installed
GOLANGCILINT=$(shell which golangci-lint)
else ifeq (, $(shell which $(GOBIN)/golangci-lint))
@{ \
set -e ;\
echo 'installing golangci-lint-$(GOLANGCILINT_VERSION)' ;\
curl -sSfL https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh | sh -s -- -b $(GOBIN) $(GOLANGCILINT_VERSION) ;\
echo 'Install succeed' ;\
echo 'Successfully installed' ;\
}
GOLANGCILINT=$(GOBIN)/golangci-lint
else
GOLANGCILINT=$(shell which golangci-lint)
@$(OK) golangci-lint is already installed
GOLANGCILINT=$(GOBIN)/golangci-lint
endif
.PHONY: staticchecktool

View File

@@ -44,6 +44,7 @@ import (
oamv1alpha2 "github.com/oam-dev/kubevela/pkg/controller/core.oam.dev/v1alpha2"
"github.com/oam-dev/kubevela/pkg/controller/utils"
"github.com/oam-dev/kubevela/pkg/cue/packages"
_ "github.com/oam-dev/kubevela/pkg/monitor/metrics"
"github.com/oam-dev/kubevela/pkg/multicluster"
"github.com/oam-dev/kubevela/pkg/oam"
"github.com/oam-dev/kubevela/pkg/oam/discoverymapper"
@@ -112,6 +113,8 @@ func main() {
flag.StringVar(&storageDriver, "storage-driver", "Local", "Application file save to the storage driver")
flag.DurationVar(&syncPeriod, "informer-re-sync-interval", 60*time.Minute,
"controller shared informer lister full re-sync period")
flag.DurationVar(&commonconfig.ReconcileTimeout, "reconcile-timeout", time.Minute*3,
"the timeout for controller reconcile")
flag.StringVar(&oam.SystemDefinitonNamespace, "system-definition-namespace", "vela-system", "define the namespace of the system-level definition")
flag.IntVar(&controllerArgs.ConcurrentReconciles, "concurrent-reconciles", 4, "concurrent-reconciles is the concurrent reconcile number of the controller. The default value is 4")
flag.Float64Var(&qps, "kube-api-qps", 50, "the qps for reconcile clients. Low qps may lead to low throughput. High qps may give stress to api-server. Raise this value if concurrent-reconciles is set to be high.")

View File

@@ -1,32 +0,0 @@
const Configuration = {
/*
* Resolve and load @commitlint/config-conventional from node_modules.
* Referenced packages must be installed
*/
extends: ['@commitlint/config-conventional'],
/*
* Any rules defined here will override rules from @commitlint/config-conventional
*/
rules: {
'type-enum': [
2,
'always',
[
'Build',
'Chore',
'CI',
'Docs',
'Feat',
'Fix',
'Perf',
'Refactor',
'Revert',
'Style',
'Test',
],
],
'type-case': [2, 'never', 'lower-case'],
},
};
module.exports = Configuration;

View File

@@ -22,6 +22,7 @@
| disable-caps | string | "" | To be disabled builtin capability list. |
| storage-driver | string | Local | Application file save to the storage driver |
| informer-re-sync-interval | time | 1h | Controller shared informer lister full re-sync period, the interval between two routinely reconciles for one CR (like Application) if no changes made to it. |
| reconcile-timeout | time | 3m | The timeout for controller reconcile. |
| system-definition-namespace | string | vela-system | define the namespace of the system-level definition |
| concurrent-reconciles | int | 4 | The concurrent reconcile number of the controller. You can increase the degree of concurrency if a large number of CPU cores are provided to the controller. |
| kube-api-qps | int | 50 | The qps for reconcile k8s clients. Increase it if you have high concurrency. A small number might restrict the requests to the api-server which may cause a long waiting queue when there are a large number of inflight requests. Try to avoid setting it too high since it will cause large burden on apiserver. |
@@ -39,4 +40,4 @@
| Medium | < 500 | < 5,000 | < 30,000 | 4 | 500 | 800 | 1 | 2Gi |
| Large | < 1,000 | < 12,000 | < 72,000 | 4 | 800 | 1,000 | 2 | 4Gi |
> For details, read KubeVela Performance Test Report
> For details, read KubeVela Performance Test Report

View File

@@ -49,11 +49,11 @@ var _ = ginkgo.Describe("Test Vela Application", func() {
e2e.EnvInitContext("env init", envName)
e2e.EnvSetContext("env set", envName)
e2e.JsonAppFileContext("deploy app-basic", appbasicJsonAppFile)
ApplicationExecContext("exec -- COMMAND", applicationName)
e2e.JsonAppFileContext("update app-basic, add scaler trait with replicas 2", appbasicAddTraitJsonAppFile)
e2e.ComponentListContext("ls", applicationName, workloadType, traitAlias)
ApplicationStatusContext("status", applicationName, workloadType)
ApplicationStatusDeeplyContext("status", applicationName, workloadType, envName)
ApplicationExecContext("exec -- COMMAND", applicationName)
// ApplicationPortForwardContext("port-forward", applicationName)
e2e.WorkloadDeleteContext("delete", applicationName)

9
go.mod
View File

@@ -29,10 +29,12 @@ require (
github.com/gosuri/uitable v0.0.4
github.com/hashicorp/hcl/v2 v2.9.1
github.com/hinshun/vt10x v0.0.0-20180616224451-1954e6464174
github.com/huandu/xstrings v1.3.2 // indirect
github.com/imdario/mergo v0.3.12
github.com/kyokomi/emoji v2.2.4+incompatible
github.com/mitchellh/copystructure v1.2.0 // indirect
github.com/mitchellh/hashstructure/v2 v2.0.1
github.com/oam-dev/cluster-gateway v1.1.2
github.com/oam-dev/cluster-gateway v1.1.6
github.com/oam-dev/terraform-config-inspect v0.0.0-20210418082552-fc72d929aa28
github.com/oam-dev/terraform-controller v0.2.6
github.com/olekukonko/tablewriter v0.0.5
@@ -41,6 +43,7 @@ require (
github.com/opencontainers/runc v1.0.0-rc95 // indirect
github.com/openkruise/kruise-api v0.9.0
github.com/pkg/errors v0.9.1
github.com/prometheus/client_golang v1.11.0
github.com/sirupsen/logrus v1.8.1
github.com/spf13/cobra v1.2.1
github.com/spf13/pflag v1.0.5
@@ -51,6 +54,7 @@ require (
go.mongodb.org/mongo-driver v1.5.1
go.uber.org/zap v1.18.1
golang.org/x/oauth2 v0.0.0-20210402161424-2e8d93401602
golang.org/x/term v0.0.0-20210615171337-6886f2dfbf5b // indirect
gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b
gotest.tools v2.2.0+incompatible
helm.sh/helm/v3 v3.6.1
@@ -77,5 +81,6 @@ require (
replace (
github.com/docker/docker => github.com/moby/moby v17.12.0-ce-rc1.0.20200618181300-9dc6525e6118+incompatible
github.com/wercker/stern => github.com/oam-dev/stern v1.13.0-alpha
github.com/wercker/stern => github.com/oam-dev/stern v1.13.1
sigs.k8s.io/apiserver-network-proxy/konnectivity-client => sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.24
)

42
go.sum
View File

@@ -77,6 +77,7 @@ github.com/Azure/go-autorest/autorest v0.9.0/go.mod h1:xyHB1BMZT0cuDHU7I0+g046+B
github.com/Azure/go-autorest/autorest v0.9.3-0.20191028180845-3492b2aff503/go.mod h1:xyHB1BMZT0cuDHU7I0+g046+BFDTQ8rEZB0s4Yfa6bI=
github.com/Azure/go-autorest/autorest v0.10.0/go.mod h1:/FALq9T/kS7b5J5qsQ+RSTUdAmGFqi0vUdVNNx8q630=
github.com/Azure/go-autorest/autorest v0.10.2/go.mod h1:/FALq9T/kS7b5J5qsQ+RSTUdAmGFqi0vUdVNNx8q630=
github.com/Azure/go-autorest/autorest v0.11.1/go.mod h1:JFgpikqFJ/MleTTxwepExTKnFUKKszPS8UavbQYUMuw=
github.com/Azure/go-autorest/autorest v0.11.12/go.mod h1:eipySxLmqSyC5s5k1CLupqet0PSENBEDP93LQ9a8QYw=
github.com/Azure/go-autorest/autorest v0.11.18 h1:90Y4srNYrwOtAgVo3ndrQkTYn6kf1Eg/AjTFJ8Is2aM=
github.com/Azure/go-autorest/autorest v0.11.18/go.mod h1:dSiJPy22c3u0OtOKDNttNgqpNFY/GeWa7GH/Pz56QRA=
@@ -84,6 +85,7 @@ github.com/Azure/go-autorest/autorest/adal v0.5.0/go.mod h1:8Z9fGy2MpX0PvDjB1pEg
github.com/Azure/go-autorest/autorest/adal v0.8.1-0.20191028180845-3492b2aff503/go.mod h1:Z6vX6WXXuyieHAXwMj0S6HY6e6wcHn37qQMBQlvY3lc=
github.com/Azure/go-autorest/autorest/adal v0.8.2/go.mod h1:ZjhuQClTqx435SRJ2iMlOxPYt3d2C/T/7TiQCVZSn3Q=
github.com/Azure/go-autorest/autorest/adal v0.8.3/go.mod h1:ZjhuQClTqx435SRJ2iMlOxPYt3d2C/T/7TiQCVZSn3Q=
github.com/Azure/go-autorest/autorest/adal v0.9.0/go.mod h1:/c022QCutn2P7uY+/oQWWNcK9YU+MH96NgK+jErpbcg=
github.com/Azure/go-autorest/autorest/adal v0.9.5/go.mod h1:B7KF7jKIeC9Mct5spmyCB/A8CG/sEz1vwIRGv/bbw7A=
github.com/Azure/go-autorest/autorest/adal v0.9.13 h1:Mp5hbtOePIzM8pJVRa3YLrWWmZtoxRXqUEzCfJt3+/Q=
github.com/Azure/go-autorest/autorest/adal v0.9.13/go.mod h1:W/MM4U6nLxnIskrw4UwWzlHfGjwUS50aOsc/I3yuU8M=
@@ -94,6 +96,7 @@ github.com/Azure/go-autorest/autorest/date v0.3.0/go.mod h1:BI0uouVdmngYNUzGWeSY
github.com/Azure/go-autorest/autorest/mocks v0.1.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0=
github.com/Azure/go-autorest/autorest/mocks v0.2.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0=
github.com/Azure/go-autorest/autorest/mocks v0.3.0/go.mod h1:a8FDP3DYzQ4RYfVAxAN3SVSiiO77gL2j2ronKKP0syM=
github.com/Azure/go-autorest/autorest/mocks v0.4.0/go.mod h1:LTp+uSrOhSkaKrUy935gNZuuIPPVsHlr9DSOxSayd+k=
github.com/Azure/go-autorest/autorest/mocks v0.4.1 h1:K0laFcLE6VLTOwNgSxaGbUcLPuGXlNkbVvq4cW4nIHk=
github.com/Azure/go-autorest/autorest/mocks v0.4.1/go.mod h1:LTp+uSrOhSkaKrUy935gNZuuIPPVsHlr9DSOxSayd+k=
github.com/Azure/go-autorest/autorest/to v0.3.0/go.mod h1:MgwOyqaIuKdG4TL/2ywSsIWKAfJfgHDo8ObuUk3t5sA=
@@ -905,8 +908,9 @@ github.com/hinshun/vt10x v0.0.0-20180616224451-1954e6464174/go.mod h1:DqJ97dSdRW
github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
github.com/huandu/xstrings v1.0.0/go.mod h1:4qWG/gcEcfX4z/mBDHJ++3ReCw9ibxbsNJbcucJdbSo=
github.com/huandu/xstrings v1.2.0/go.mod h1:DvyZB1rfVYsBIigL8HwpZgxHwXozlTgGqn63UyNX5k4=
github.com/huandu/xstrings v1.3.1 h1:4jgBlKK6tLKFvO8u5pmYjG91cqytmDCDvGh7ECVFfFs=
github.com/huandu/xstrings v1.3.1/go.mod h1:y5/lhBue+AyNmUVz9RLU9xbLR0o4KIIExikq4ovT0aE=
github.com/huandu/xstrings v1.3.2 h1:L18LIDzqlW6xN2rEkpdV8+oL/IXWJ1APd+vsdYy4Wdw=
github.com/huandu/xstrings v1.3.2/go.mod h1:y5/lhBue+AyNmUVz9RLU9xbLR0o4KIIExikq4ovT0aE=
github.com/hudl/fargo v1.3.0/go.mod h1:y3CKSmjA+wD2gak7sUSXTAoopbhU08POFhmITJgmKTg=
github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc=
github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc=
@@ -1121,8 +1125,9 @@ github.com/minio/minio-go/v6 v6.0.49/go.mod h1:qD0lajrGW49lKZLtXKtCB4X/qkMf0a5tB
github.com/minio/sha256-simd v0.1.1/go.mod h1:B5e1o+1/KgNmWrSQK08Y6Z1Vb5pwIktudl0J58iy0KM=
github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc=
github.com/mitchellh/copystructure v1.0.0/go.mod h1:SNtv71yrdKgLRyLFxmLdkAbkKEFWgYaq1OVrnRcwhnw=
github.com/mitchellh/copystructure v1.1.1 h1:Bp6x9R1Wn16SIz3OfeDr0b7RnCG2OB66Y7PQyC/cvq4=
github.com/mitchellh/copystructure v1.1.1/go.mod h1:EBArHfARyrSWO/+Wyr9zwEkc6XMFB9XyNgFNmRkZZU4=
github.com/mitchellh/copystructure v1.2.0 h1:vpKXTN4ewci03Vljg/q9QvCGUDttBOGBIa15WveJJGw=
github.com/mitchellh/copystructure v1.2.0/go.mod h1:qLl+cE2AmVv+CoeAwDPye/v+N2HKCj9FbZEVFJRxO9s=
github.com/mitchellh/go-homedir v0.0.0-20161203194507-b8bc1bf76747/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0=
github.com/mitchellh/go-homedir v1.0.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0=
github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0=
@@ -1143,13 +1148,15 @@ github.com/mitchellh/mapstructure v1.2.2/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RR
github.com/mitchellh/mapstructure v1.4.1/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo=
github.com/mitchellh/osext v0.0.0-20151018003038-5e2d6d41470f/go.mod h1:OkQIRizQZAeMln+1tSwduZz7+Af5oFlKirV/MSYes2A=
github.com/mitchellh/reflectwalk v1.0.0/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw=
github.com/mitchellh/reflectwalk v1.0.1 h1:FVzMWA5RllMAKIdUSC8mdWo3XtwoecrH79BY70sEEpE=
github.com/mitchellh/reflectwalk v1.0.1/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw=
github.com/mitchellh/reflectwalk v1.0.2 h1:G2LzWKi524PWgd3mLHV8Y5k7s6XUvT0Gef6zxSIeXaQ=
github.com/mitchellh/reflectwalk v1.0.2/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw=
github.com/moby/moby v17.12.0-ce-rc1.0.20200618181300-9dc6525e6118+incompatible h1:NT0cwArZg/wGdvY8pzej4tPr+9WGmDdkF8Suj+mkz2g=
github.com/moby/moby v17.12.0-ce-rc1.0.20200618181300-9dc6525e6118+incompatible/go.mod h1:fDXVQ6+S340veQPv35CzDahGBmHsiclFwfEygB/TWMc=
github.com/moby/spdystream v0.2.0 h1:cjW1zVyyoiM0T7b6UoySUFqzXMoqRckQtXwGPiBhOM8=
github.com/moby/spdystream v0.2.0/go.mod h1:f7i0iNDQJ059oMTcWxx8MA/zKFIuD/lY+0GqbN2Wy8c=
github.com/moby/sys/mountinfo v0.4.1/go.mod h1:rEr8tzG/lsIZHBtN/JjGG+LMYx9eXgW2JI+6q0qou+A=
github.com/moby/term v0.0.0-20200312100748-672ec06f55cd/go.mod h1:DdlQx2hp0Ss5/fLikoLlEeIYiATotOjgB//nb973jeo=
github.com/moby/term v0.0.0-20201216013528-df9cb8a40635/go.mod h1:FBS0z0QWA44HXygs7VXDUOGoN/1TV3RuWkLO04am3wc=
github.com/moby/term v0.0.0-20210610120745-9d4ed1856297 h1:yH0SvLzcbZxcJXho2yh7CqdENGMQe73Cw3woZBpPli0=
github.com/moby/term v0.0.0-20210610120745-9d4ed1856297/go.mod h1:vgPCkQMyxTZ7IDy8SXRufE172gr8+K/JE/7hHFxHW3A=
@@ -1182,6 +1189,7 @@ github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRW
github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
github.com/mwitkow/go-proto-validators v0.0.0-20180403085117-0950a7990007/go.mod h1:m2XC9Qq0AlmmVksL6FktJCdTYyLk7V3fKyp0sl1yWQo=
github.com/mwitkow/go-proto-validators v0.2.0/go.mod h1:ZfA1hW+UH/2ZHOWvQ3HnQaU0DtnpXu850MZiy+YUgcc=
github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f h1:y5//uYreIhSUg3J1GEMiLbxo1LJaP8RfCpH6pymGZus=
github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw=
github.com/nakabonne/nestif v0.3.0/go.mod h1:dI314BppzXjJ4HsCnbo7XzrJHPszZsjnk5wEBSYHI2c=
github.com/nats-io/jwt v0.3.0/go.mod h1:fRYCDE99xlTsqUzISS1Bi75UBJ6ljOJQOAAu5VglpSg=
@@ -1200,10 +1208,10 @@ github.com/nishanths/predeclared v0.2.1/go.mod h1:HvkGJcA3naj4lOwnFXFDkFxVtSqQMB
github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A=
github.com/nxadm/tail v1.4.8 h1:nPr65rt6Y5JFSKQO7qToXr7pePgD6Gwiw05lkbyAQTE=
github.com/nxadm/tail v1.4.8/go.mod h1:+ncqLTQzXmGhMZNUePPaPqPvBxHAIsmXswZKocGu+AU=
github.com/oam-dev/cluster-gateway v1.1.2 h1:sxC8Uyx/d3Yu8nIFSz31i+4JKhJfDAS9XVIPEWa1y+Q=
github.com/oam-dev/cluster-gateway v1.1.2/go.mod h1:EjPUZwTYBe+gFtPV/yGohLE19fDr3CUg4tfSRY72fkM=
github.com/oam-dev/stern v1.13.0-alpha h1:EVjM8Qvh6LssB6t4RZrjf9DtCq1cz+/cy6OF7fpy9wk=
github.com/oam-dev/stern v1.13.0-alpha/go.mod h1:AOkvfFUv0Arz7GBi0jz7S0Jsu4K/kdvSjNsnRt1+BIg=
github.com/oam-dev/cluster-gateway v1.1.6 h1:CY6m2Qcs6XJ/l/NY48CdHD7GAel9zZ/erUOz2zYzxkI=
github.com/oam-dev/cluster-gateway v1.1.6/go.mod h1:SF7S4Ss+VUs2OVxmvSrrFGcaNFoXy6JWxHAnUxC1QcY=
github.com/oam-dev/stern v1.13.1 h1:Gt7xMBmQjRueHVFjRo5CHDTVhiYrssjlmvPwRiZtq7c=
github.com/oam-dev/stern v1.13.1/go.mod h1:0pLjZt0amXE/ErF16Rdrgd98H2owN8Hmn3/7CX5+AeA=
github.com/oam-dev/terraform-config-inspect v0.0.0-20210418082552-fc72d929aa28 h1:tD8HiFKnt0jnwdTWjeqUnfnUYLD/+Nsmj8ZGIxqDWiU=
github.com/oam-dev/terraform-config-inspect v0.0.0-20210418082552-fc72d929aa28/go.mod h1:Mu8i0/DdplvnjwRbAYPsc8+LRR27n/mp8VWdkN10GzE=
github.com/oam-dev/terraform-controller v0.2.6 h1:aoEj4sfxDMBdTkM5uKYmjVFOgjhYeYBm0xzdRb4+Xu0=
@@ -1296,7 +1304,6 @@ github.com/philhofer/fwd v1.0.0/go.mod h1:gk3iGcWd9+svBvR0sR+KPcfE+RNWozjowpeBVG
github.com/pierrec/lz4 v1.0.2-0.20190131084431-473cd7ce01a1/go.mod h1:3/3N9NVKO0jef7pBehbT1qWhCMrIgbYNnFAZCqQ5LRc=
github.com/pierrec/lz4 v2.0.5+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY=
github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA=
github.com/pkg/errors v0.0.0-20180311214515-816c9085562c/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
@@ -1955,6 +1962,7 @@ golang.org/x/sys v0.0.0-20200905004654-be1d3432aa8f/go.mod h1:h1NjWce9XRLGQEsW7w
golang.org/x/sys v0.0.0-20200909081042-eff7692f9009/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200923182605-d9f96fdee20d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20201112073958-5cba982894dd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20201201145000-ef89a241ccb3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210104204734-6f8348627aad/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
@@ -1982,8 +1990,9 @@ golang.org/x/sys v0.0.0-20210806184541-e5e7981a1069 h1:siQdpVirKtzPhKl3lZWozZraC
golang.org/x/sys v0.0.0-20210806184541-e5e7981a1069/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
golang.org/x/term v0.0.0-20210220032956-6a3ed077a48d h1:SZxvLBoTP5yHO3Frd4z4vrF+DBX9vMVanchswa69toE=
golang.org/x/term v0.0.0-20210220032956-6a3ed077a48d/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
golang.org/x/term v0.0.0-20210615171337-6886f2dfbf5b h1:9zKuko04nR4gjZ4+DNjHqRlAJqbJETHwiNKDqTfOjfE=
golang.org/x/term v0.0.0-20210615171337-6886f2dfbf5b/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
golang.org/x/text v0.0.0-20160726164857-2910a502d2bf/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
@@ -2002,6 +2011,7 @@ golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxb
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20200416051211-89c76fbcd5d1/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20200630173020-3af7569d3a1e/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20210220033141-f8bda1e9f3ba/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20210611083556-38a9dc6acbc6/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20210723032227-1f47c861a9ac h1:7zkz7BUtwNFFqcowJ+RIgu2MaV/MapERkDIy+mwPyjs=
@@ -2367,6 +2377,7 @@ k8s.io/api v0.18.2/go.mod h1:SJCWI7OLzhZSvbY7U8zwNl9UA4o1fizoug34OV/2r78=
k8s.io/api v0.18.3/go.mod h1:UOaMwERbqJMfeeeHc8XJKawj4P9TgDRnViIqqBeH2QA=
k8s.io/api v0.18.6/go.mod h1:eeyxr+cwCjMdLAmr2W3RyDI0VvTawSg/3RFFBEnmZGI=
k8s.io/api v0.18.8/go.mod h1:d/CXqwWv+Z2XEG1LgceeDmHQwpUJhROPx16SlxJgERY=
k8s.io/api v0.20.10/go.mod h1:0kei3F6biGjtRQBo5dUeujq6Ji3UCh9aOSfp/THYd7I=
k8s.io/api v0.21.0/go.mod h1:+YbrhBBGgsxbF6o6Kj4KJPJnBmAKuXDeS3E18bgHNVU=
k8s.io/api v0.21.1/go.mod h1:FstGROTmsSHBarKc8bylzXih8BLNYTiS3TZcsoEDg2s=
k8s.io/api v0.21.2/go.mod h1:Lv6UGJZ1rlMI1qusN8ruAp9PUBFyBwpEHAdG24vIsiU=
@@ -2394,6 +2405,7 @@ k8s.io/apimachinery v0.18.2/go.mod h1:9SnR/e11v5IbyPCGbvJViimtJ0SwHG4nfZFjU77ftc
k8s.io/apimachinery v0.18.3/go.mod h1:OaXp26zu/5J7p0f92ASynJa1pZo06YlV9fG7BoWbCko=
k8s.io/apimachinery v0.18.6/go.mod h1:OaXp26zu/5J7p0f92ASynJa1pZo06YlV9fG7BoWbCko=
k8s.io/apimachinery v0.18.8/go.mod h1:6sQd+iHEqmOtALqOFjSWp2KZ9F0wlU/nWm0ZgsYWMig=
k8s.io/apimachinery v0.20.10/go.mod h1:kQa//VOAwyVwJ2+L9kOREbsnryfsGSkSM1przND4+mw=
k8s.io/apimachinery v0.21.0/go.mod h1:jbreFvJo3ov9rj7eWT7+sYiRx+qZuCYXwWT1bcDswPY=
k8s.io/apimachinery v0.21.1/go.mod h1:jbreFvJo3ov9rj7eWT7+sYiRx+qZuCYXwWT1bcDswPY=
k8s.io/apimachinery v0.21.2/go.mod h1:CdTY8fU/BlvAbJ2z/8kBwimGki5Zp8/fbVuLY8gJumM=
@@ -2423,6 +2435,7 @@ k8s.io/client-go v0.18.2/go.mod h1:Xcm5wVGXX9HAA2JJ2sSBUn3tCJ+4SVlCbl2MNNv+CIU=
k8s.io/client-go v0.18.3/go.mod h1:4a/dpQEvzAhT1BbuWW09qvIaGw6Gbu1gZYiQZIi1DMw=
k8s.io/client-go v0.18.6/go.mod h1:/fwtGLjYMS1MaM5oi+eXhKwG+1UHidUEXRh6cNsdO0Q=
k8s.io/client-go v0.18.8/go.mod h1:HqFqMllQ5NnQJNwjro9k5zMyfhZlOwpuTLVrxjkYSxU=
k8s.io/client-go v0.20.10/go.mod h1:fFg+aLoasv/R+xiVaWjxeqGFYltzgQcOQzkFaSRfnJ0=
k8s.io/client-go v0.21.0/go.mod h1:nNBytTF9qPFDEhoqgEPaarobC8QPae13bElIVHzIglA=
k8s.io/client-go v0.21.1/go.mod h1:/kEw4RgW+3xnBGzvp9IWxKSNA+lXn3A7AuH3gdOAzLs=
k8s.io/client-go v0.21.2/go.mod h1:HdJ9iknWpbl3vMGtib6T2PyI/VYxiZfq936WNVHBRrA=
@@ -2444,6 +2457,7 @@ k8s.io/component-base v0.0.0-20191122220729-2684fb322cb9/go.mod h1:NFuUusy/X4Tk2
k8s.io/component-base v0.17.0/go.mod h1:rKuRAokNMY2nn2A6LP/MiwpoaMRHpfRnrPaUJJj1Yoc=
k8s.io/component-base v0.18.2/go.mod h1:kqLlMuhJNHQ9lz8Z7V5bxUUtjFZnrypArGl58gmDfUM=
k8s.io/component-base v0.18.6/go.mod h1:knSVsibPR5K6EW2XOjEHik6sdU5nCvKMrzMt2D4In14=
k8s.io/component-base v0.20.10/go.mod h1:ZKOEin1xu68aJzxgzl5DZSp5J1IrjAOPlPN90/t6OI8=
k8s.io/component-base v0.21.0/go.mod h1:qvtjz6X0USWXbgmbfXR+Agik4RZ3jv2Bgr5QnZzdPYw=
k8s.io/component-base v0.21.2/go.mod h1:9lvmIThzdlrJj5Hp8Z/TOgIkdfsNARQ1pT+3PByuiuc=
k8s.io/component-base v0.21.3/go.mod h1:kkuhtfEHeZM6LkX0saqSK8PbdO7A0HigUngmhhrwfGQ=
@@ -2509,6 +2523,7 @@ mvdan.cc/gofumpt v0.1.1/go.mod h1:yXG1r1WqZVKWbVRtBWKWX9+CxGYfA51nSomhM0woR48=
mvdan.cc/interfacer v0.0.0-20180901003855-c20040233aed/go.mod h1:Xkxe497xwlCKkIaQYRfC7CSLworTXY9RMqwhhCm+8Nc=
mvdan.cc/lint v0.0.0-20170908181259-adc824a0674b/go.mod h1:2odslEg/xrtNQqCYg2/jCoyKnw3vv5biOc3JnIcYfL4=
mvdan.cc/unparam v0.0.0-20210104141923-aac4ce9116a7/go.mod h1:hBpJkZE8H/sb+VRFvw2+rBpHNsTBcvSpk61hr8mzXZE=
open-cluster-management.io/api v0.0.0-20210610125115-f57c747b84aa/go.mod h1:9qiA5h/8kvPQnJEOlAPHVjRO9a1jCmDhGzvgMBvXEaE=
open-cluster-management.io/api v0.0.0-20210804091127-340467ff6239 h1:ToDTkftv88UVZSCqTCzYZTkYoba28z+An08Yrm9aOAA=
open-cluster-management.io/api v0.0.0-20210804091127-340467ff6239/go.mod h1:9qiA5h/8kvPQnJEOlAPHVjRO9a1jCmDhGzvgMBvXEaE=
rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8=
@@ -2517,11 +2532,10 @@ rsc.io/letsencrypt v0.0.3/go.mod h1:buyQKZ6IXrRnB7TdkHP0RyEybLx18HHyOSoTyoOLqNY=
rsc.io/pdf v0.1.1/go.mod h1:n8OzWcQ6Sp37PL01nO98y4iUCRdTGarVfzxY20ICaU4=
rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0=
rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA=
sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.7/go.mod h1:PHgbrJT7lCHcxMU+mDHEm+nx46H4zuuHZkDP6icnhu0=
sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.15/go.mod h1:LEScyzhFmoF5pso/YSeBstl57mOzx9xlU9n85RGrDQg=
sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.19/go.mod h1:LEScyzhFmoF5pso/YSeBstl57mOzx9xlU9n85RGrDQg=
sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.22 h1:fmRfl9WJ4ApJn7LxNuED4m0t18qivVQOxP6aAYG9J6c=
sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.22/go.mod h1:LEScyzhFmoF5pso/YSeBstl57mOzx9xlU9n85RGrDQg=
sigs.k8s.io/apiserver-network-proxy v0.0.24 h1:yaswrAqidc2XdLK2GRacVEBb55g4dg91f/B7b0SYliY=
sigs.k8s.io/apiserver-network-proxy v0.0.24/go.mod h1:z/U9KltvRVSMttVl3cdQo8cPuXEjr+Qn3A5sUJR55XI=
sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.24 h1:bCO6TN9VG1bK3nCG5ghQ5httx1HpsG5MD8XtRDySHDM=
sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.24/go.mod h1:LEScyzhFmoF5pso/YSeBstl57mOzx9xlU9n85RGrDQg=
sigs.k8s.io/apiserver-runtime v1.0.3-0.20210913073608-0663f60bfee2 h1:c6RYHA1wUg9IEsfjnxg0WsPwvDC2Qw2eryXKXgSEF1c=
sigs.k8s.io/apiserver-runtime v1.0.3-0.20210913073608-0663f60bfee2/go.mod h1:gvPfh5FX3Wi3kIRpkh7qvY0i/DQl3SDpRtvqMGZE3Vo=
sigs.k8s.io/controller-runtime v0.6.0/go.mod h1:CpYf5pdNY/B352A1TFLAS2JVSlnGQ5O2cftPHndTroo=

View File

@@ -26,11 +26,12 @@ var (
PerfEnabled = false
)
const (
reconcileTimeout = time.Minute
var (
// ReconcileTimeout timeout for controller to reconcile
ReconcileTimeout = time.Minute * 3
)
// NewReconcileContext create context with default timeout (60s)
func NewReconcileContext(ctx context.Context) (context.Context, context.CancelFunc) {
return context.WithTimeout(ctx, reconcileTimeout)
return context.WithTimeout(ctx, ReconcileTimeout)
}

View File

@@ -40,11 +40,11 @@ import (
"github.com/oam-dev/kubevela/apis/core.oam.dev/v1beta1"
velatypes "github.com/oam-dev/kubevela/apis/types"
"github.com/oam-dev/kubevela/pkg/appfile"
common2 "github.com/oam-dev/kubevela/pkg/controller/common"
core "github.com/oam-dev/kubevela/pkg/controller/core.oam.dev"
"github.com/oam-dev/kubevela/pkg/controller/core.oam.dev/v1alpha1/envbinding"
"github.com/oam-dev/kubevela/pkg/controller/core.oam.dev/v1alpha2/application/assemble"
"github.com/oam-dev/kubevela/pkg/cue/packages"
monitorContext "github.com/oam-dev/kubevela/pkg/monitor/context"
"github.com/oam-dev/kubevela/pkg/oam"
"github.com/oam-dev/kubevela/pkg/oam/discoverymapper"
oamutil "github.com/oam-dev/kubevela/pkg/oam/util"
@@ -87,19 +87,24 @@ type Reconciler struct {
// Reconcile process app event
// nolint:gocyclo
func (r *Reconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) {
ctx, cancel := common2.NewReconcileContext(ctx)
ctx, cancel := context.WithTimeout(ctx, time.Minute)
defer cancel()
klog.InfoS("Reconcile application", "application", klog.KRef(req.Namespace, req.Name))
logCtx := monitorContext.NewTraceContext(ctx, "").AddTag("application", req.String(), "controller", "application")
logCtx.Info("Reconcile application")
defer logCtx.Commit("Reconcile application")
app := new(v1beta1.Application)
if err := r.Get(ctx, client.ObjectKey{
Name: req.Name,
Namespace: req.Namespace,
}, app); err != nil {
logCtx.Error(err, "get application")
return ctrl.Result{}, client.IgnoreNotFound(err)
}
logCtx.AddTag("resource_version", app.ResourceVersion)
ctx = oamutil.SetNamespaceInCtx(ctx, app.Namespace)
logCtx.SetContext(ctx)
if len(app.GetAnnotations()[oam.AnnotationKubeVelaVersion]) == 0 {
oamutil.AddAnnotations(app, map[string]string{
oam.AnnotationKubeVelaVersion: version.VelaVersion,
@@ -111,76 +116,76 @@ func (r *Reconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Resu
app: app,
parser: appParser,
}
endReconcile, err := r.handleFinalizers(ctx, app)
endReconcile, err := r.handleFinalizers(logCtx, app)
if err != nil {
return r.endWithNegativeCondition(ctx, app, condition.ReconcileError(err), common.ApplicationStarting)
return r.endWithNegativeCondition(logCtx, app, condition.ReconcileError(err), common.ApplicationStarting)
}
if endReconcile {
return ctrl.Result{}, nil
}
appFile, err := appParser.GenerateAppFile(ctx, app)
appFile, err := appParser.GenerateAppFile(logCtx, app)
if err != nil {
klog.ErrorS(err, "Failed to parse application", "application", klog.KObj(app))
logCtx.Error(err, "Failed to parse application")
r.Recorder.Event(app, event.Warning(velatypes.ReasonFailedParse, err))
return r.endWithNegativeCondition(ctx, app, condition.ErrorCondition("Parsed", err), common.ApplicationRendering)
return r.endWithNegativeCondition(logCtx, app, condition.ErrorCondition("Parsed", err), common.ApplicationRendering)
}
app.Status.SetConditions(condition.ReadyCondition("Parsed"))
r.Recorder.Event(app, event.Normal(velatypes.ReasonParsed, velatypes.MessageParsed))
if err := handler.PrepareCurrentAppRevision(ctx, appFile); err != nil {
klog.ErrorS(err, "Failed to prepare app revision", "application", klog.KObj(app))
if err := handler.PrepareCurrentAppRevision(logCtx, appFile); err != nil {
logCtx.Error(err, "Failed to prepare app revision")
r.Recorder.Event(app, event.Warning(velatypes.ReasonFailedRevision, err))
return r.endWithNegativeCondition(ctx, app, condition.ErrorCondition("Revision", err), common.ApplicationRendering)
return r.endWithNegativeCondition(logCtx, app, condition.ErrorCondition("Revision", err), common.ApplicationRendering)
}
if err := handler.FinalizeAndApplyAppRevision(ctx); err != nil {
klog.ErrorS(err, "Failed to apply app revision", "application", klog.KObj(app))
if err := handler.FinalizeAndApplyAppRevision(logCtx); err != nil {
logCtx.Error(err, "Failed to apply app revision")
r.Recorder.Event(app, event.Warning(velatypes.ReasonFailedRevision, err))
return r.endWithNegativeCondition(ctx, app, condition.ErrorCondition("Revision", err), common.ApplicationRendering)
return r.endWithNegativeCondition(logCtx, app, condition.ErrorCondition("Revision", err), common.ApplicationRendering)
}
klog.InfoS("Successfully prepare current app revision", "revisionName", handler.currentAppRev.Name,
logCtx.Info("Successfully prepare current app revision", "revisionName", handler.currentAppRev.Name,
"revisionHash", handler.currentRevHash, "isNewRevision", handler.isNewRevision)
app.Status.SetConditions(condition.ReadyCondition("Revision"))
r.Recorder.Event(app, event.Normal(velatypes.ReasonRevisoned, velatypes.MessageRevisioned))
if err := handler.UpdateAppLatestRevisionStatus(ctx); err != nil {
klog.ErrorS(err, "Failed to update application status", "application", klog.KObj(app))
return r.endWithNegativeCondition(ctx, app, condition.ReconcileError(err), common.ApplicationRendering)
if err := handler.UpdateAppLatestRevisionStatus(logCtx); err != nil {
logCtx.Error(err, "Failed to update application status")
return r.endWithNegativeCondition(logCtx, app, condition.ReconcileError(err), common.ApplicationRendering)
}
klog.InfoS("Successfully apply application revision", "application", klog.KObj(app))
logCtx.Info("Successfully apply application revision")
policies, err := appFile.PrepareWorkflowAndPolicy()
if err != nil {
klog.Error(err, "[Handle PrepareWorkflowAndPolicy]")
logCtx.Error(err, "[Handle PrepareWorkflowAndPolicy]")
r.Recorder.Event(app, event.Warning(velatypes.ReasonFailedRender, err))
return r.endWithNegativeCondition(ctx, app, condition.ErrorCondition("PrepareWorkflowAndPolicy", err), common.ApplicationPolicyGenerating)
return r.endWithNegativeCondition(logCtx, app, condition.ErrorCondition("PrepareWorkflowAndPolicy", err), common.ApplicationPolicyGenerating)
}
if len(policies) > 0 {
if err := handler.Dispatch(ctx, "", common.PolicyResourceCreator, policies...); err != nil {
klog.Error(err, "[Handle ApplyPolicyResources]")
logCtx.Error(err, "[Handle ApplyPolicyResources]")
r.Recorder.Event(app, event.Warning(velatypes.ReasonFailedApply, err))
return r.endWithNegativeCondition(ctx, app, condition.ErrorCondition("ApplyPolices", err), common.ApplicationPolicyGenerating)
return r.endWithNegativeCondition(logCtx, app, condition.ErrorCondition("ApplyPolices", err), common.ApplicationPolicyGenerating)
}
klog.InfoS("Successfully generated application policies", "application", klog.KObj(app))
logCtx.Info("Successfully generated application policies")
}
app.Status.SetConditions(condition.ReadyCondition("Render"))
r.Recorder.Event(app, event.Normal(velatypes.ReasonRendered, velatypes.MessageRendered))
if !appWillRollout(app) {
steps, err := handler.GenerateApplicationSteps(ctx, app, appParser, appFile, handler.currentAppRev, r.Client, r.dm, r.pd)
steps, err := handler.GenerateApplicationSteps(logCtx, app, appParser, appFile, handler.currentAppRev, r.Client, r.dm, r.pd)
if err != nil {
klog.Error(err, "[handle workflow]")
logCtx.Error(err, "[handle workflow]")
r.Recorder.Event(app, event.Warning(velatypes.ReasonFailedWorkflow, err))
return r.endWithNegativeCondition(ctx, app, condition.ErrorCondition("Workflow", err), common.ApplicationRunningWorkflow)
return r.endWithNegativeCondition(logCtx, app, condition.ErrorCondition("Workflow", err), common.ApplicationRunningWorkflow)
}
workflowState, err := workflow.NewWorkflow(app, r.Client, appFile.WorkflowMode).ExecuteSteps(ctx, handler.currentAppRev, steps)
workflowState, err := workflow.NewWorkflow(app, r.Client, appFile.WorkflowMode).ExecuteSteps(logCtx.Fork("workflow"), handler.currentAppRev, steps)
if err != nil {
klog.Error(err, "[handle workflow]")
logCtx.Error(err, "[handle workflow]")
r.Recorder.Event(app, event.Warning(velatypes.ReasonFailedWorkflow, err))
return r.endWithNegativeCondition(ctx, app, condition.ErrorCondition("Workflow", err), common.ApplicationRunningWorkflow)
return r.endWithNegativeCondition(logCtx, app, condition.ErrorCondition("Workflow", err), common.ApplicationRunningWorkflow)
}
handler.addServiceStatus(false, app.Status.Services...)
@@ -188,9 +193,10 @@ func (r *Reconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Resu
app.Status.AppliedResources = handler.appliedResources
switch workflowState {
case common.WorkflowStateSuspended:
return ctrl.Result{}, r.patchStatus(ctx, app, common.ApplicationWorkflowSuspending)
logCtx.Info("Workflow return state=Suspend")
return ctrl.Result{}, r.patchStatus(logCtx, app, common.ApplicationWorkflowSuspending)
case common.WorkflowStateTerminated:
return ctrl.Result{}, r.patchStatus(ctx, app, common.ApplicationWorkflowTerminated)
return ctrl.Result{}, r.patchStatus(logCtx, app, common.ApplicationWorkflowTerminated)
case common.WorkflowStateExecuting:
return reconcile.Result{RequeueAfter: baseWorkflowBackoffWaitTime}, r.patchStatus(ctx, app, common.ApplicationRunningWorkflow)
case common.WorkflowStateFinished:
@@ -204,10 +210,9 @@ func (r *Reconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Resu
})
}
if err != nil {
klog.ErrorS(err, "Failed to gc after workflow",
"application", klog.KObj(app))
logCtx.Error(err, "Failed to gc after workflow")
r.Recorder.Event(app, event.Warning(velatypes.ReasonFailedGC, err))
return r.endWithNegativeCondition(ctx, app, condition.ErrorCondition("GCAfterWorkflow", err), common.ApplicationRunningWorkflow)
return r.endWithNegativeCondition(logCtx, app, condition.ErrorCondition("GCAfterWorkflow", err), common.ApplicationRunningWorkflow)
}
app.Status.ResourceTracker = ref
}
@@ -219,33 +224,33 @@ func (r *Reconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Resu
var comps []*velatypes.ComponentManifest
comps, err = appFile.GenerateComponentManifests()
if err != nil {
klog.ErrorS(err, "Failed to render components", "application", klog.KObj(app))
logCtx.Error(err, "Failed to render components")
r.Recorder.Event(app, event.Warning(velatypes.ReasonFailedRender, err))
return r.endWithNegativeCondition(ctx, app, condition.ErrorCondition("Render", err), common.ApplicationRendering)
return r.endWithNegativeCondition(logCtx, app, condition.ErrorCondition("Render", err), common.ApplicationRendering)
}
assemble.HandleCheckManageWorkloadTrait(*handler.currentAppRev, comps)
if err := handler.HandleComponentsRevision(ctx, comps); err != nil {
klog.ErrorS(err, "Failed to handle compoents revision", "application", klog.KObj(app))
if err := handler.HandleComponentsRevision(logCtx, comps); err != nil {
logCtx.Error(err, "Failed to handle components revision")
r.Recorder.Event(app, event.Warning(velatypes.ReasonFailedRevision, err))
return r.endWithNegativeCondition(ctx, app, condition.ErrorCondition("Render", err), common.ApplicationRendering)
return r.endWithNegativeCondition(logCtx, app, condition.ErrorCondition("Render", err), common.ApplicationRendering)
}
klog.Info("Application manifests has prepared and ready for appRollout to handle", "application", klog.KObj(app))
}
// if inplace is false and rolloutPlan is nil, it means the user will use an outer AppRollout object to rollout the application
if handler.app.Spec.RolloutPlan != nil {
res, err := handler.handleRollout(ctx)
res, err := handler.handleRollout(logCtx)
if err != nil {
klog.ErrorS(err, "Failed to handle rollout", "application", klog.KObj(app))
logCtx.Error(err, "Failed to handle rollout")
r.Recorder.Event(app, event.Warning(velatypes.ReasonFailedRollout, err))
return r.endWithNegativeCondition(ctx, app, condition.ErrorCondition("Rollout", err), common.ApplicationRollingOut)
return r.endWithNegativeCondition(logCtx, app, condition.ErrorCondition("Rollout", err), common.ApplicationRollingOut)
}
// skip health check and garbage collection if rollout have not finished
// start next reconcile immediately
if res.Requeue || res.RequeueAfter > 0 {
if err := r.patchStatus(ctx, app, common.ApplicationRollingOut); err != nil {
return r.endWithNegativeCondition(ctx, app, condition.ReconcileError(err), common.ApplicationRollingOut)
if err := r.patchStatus(logCtx, app, common.ApplicationRollingOut); err != nil {
return r.endWithNegativeCondition(logCtx, app, condition.ReconcileError(err), common.ApplicationRollingOut)
}
return res, nil
}
@@ -253,7 +258,7 @@ func (r *Reconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Resu
// there is no need reconcile immediately, that means the rollout operation have finished
r.Recorder.Event(app, event.Normal(velatypes.ReasonRollout, velatypes.MessageRollout))
app.Status.SetConditions(condition.ReadyCondition("Rollout"))
klog.InfoS("Finished rollout ", "application", klog.KObj(app))
logCtx.Info("Finished rollout ")
}
var phase = common.ApplicationRunning
if !hasHealthCheckPolicy(appFile.Policies) {
@@ -264,11 +269,11 @@ func (r *Reconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Resu
}
if err := garbageCollection(ctx, handler); err != nil {
klog.ErrorS(err, "Failed to run garbage collection")
logCtx.Error(err, "Failed to run garbage collection")
r.Recorder.Event(app, event.Warning(velatypes.ReasonFailedGC, err))
return r.endWithNegativeCondition(ctx, app, condition.ReconcileError(err), phase)
return r.endWithNegativeCondition(logCtx, app, condition.ReconcileError(err), phase)
}
klog.Info("Successfully garbage collect", "application", klog.KObj(app))
logCtx.Info("Successfully garbage collect")
app.Status.SetConditions(condition.Condition{
Type: condition.TypeReady,
Status: corev1.ConditionTrue,
@@ -276,17 +281,17 @@ func (r *Reconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Resu
Reason: condition.ReasonReconcileSuccess,
})
r.Recorder.Event(app, event.Normal(velatypes.ReasonDeployed, velatypes.MessageDeployed))
return ctrl.Result{}, r.patchStatus(ctx, app, phase)
return ctrl.Result{}, r.patchStatus(logCtx, app, phase)
}
// NOTE Because resource tracker is cluster-scoped resources, we cannot garbage collect them
// by setting application(namespace-scoped) as their owners.
// We must delete all resource trackers related to an application through finalizer logic.
func (r *Reconciler) handleFinalizers(ctx context.Context, app *v1beta1.Application) (bool, error) {
func (r *Reconciler) handleFinalizers(ctx monitorContext.Context, app *v1beta1.Application) (bool, error) {
if app.ObjectMeta.DeletionTimestamp.IsZero() {
if !meta.FinalizerExists(app, resourceTrackerFinalizer) {
meta.AddFinalizer(app, resourceTrackerFinalizer)
klog.InfoS("Register new finalizer for application", "application", klog.KObj(app), "finalizer", resourceTrackerFinalizer)
ctx.Info("Register new finalizer for application", "finalizer", resourceTrackerFinalizer)
return true, errors.Wrap(r.Client.Update(ctx, app), errUpdateApplicationFinalizer)
}
} else {
@@ -296,7 +301,7 @@ func (r *Reconciler) handleFinalizers(ctx context.Context, app *v1beta1.Applicat
rt := &v1beta1.ResourceTracker{}
rt.SetName(fmt.Sprintf("%s-%s", app.Namespace, app.Name))
if err := r.Client.Delete(ctx, rt); err != nil && !kerrors.IsNotFound(err) {
klog.ErrorS(err, "Failed to delete legacy resource tracker", "name", rt.Name)
ctx.Error(err, "Failed to delete legacy resource tracker", "name", rt.Name)
return true, errors.WithMessage(err, "cannot remove finalizer")
}
meta.RemoveFinalizer(app, legacyResourceTrackerFinalizer)
@@ -310,12 +315,12 @@ func (r *Reconciler) handleFinalizers(ctx context.Context, app *v1beta1.Applicat
}}
rtList := &v1beta1.ResourceTrackerList{}
if err := r.Client.List(ctx, rtList, listOpts...); err != nil {
klog.ErrorS(err, "Failed to list resource tracker of app", "name", app.Name)
ctx.Error(err, "Failed to list resource tracker of app", "name", app.Name)
return true, errors.WithMessage(err, "cannot remove finalizer")
}
for _, rt := range rtList.Items {
if err := r.Client.Delete(ctx, rt.DeepCopy()); err != nil && !kerrors.IsNotFound(err) {
klog.ErrorS(err, "Failed to delete resource tracker", "name", rt.Name)
ctx.Error(err, "Failed to delete resource tracker", "name", rt.Name)
return true, errors.WithMessage(err, "cannot remove finalizer")
}
}

View File

@@ -36,7 +36,6 @@ import (
"github.com/oam-dev/kubevela/pkg/controller/core.oam.dev/v1alpha2/application/dispatch"
"github.com/oam-dev/kubevela/pkg/controller/core.oam.dev/v1alpha2/applicationrollout"
"github.com/oam-dev/kubevela/pkg/controller/utils"
"github.com/oam-dev/kubevela/pkg/cue/process"
oamutil "github.com/oam-dev/kubevela/pkg/oam/util"
)
@@ -174,24 +173,34 @@ func (h *AppHandler) collectHealthStatus(wl *appfile.Workload, appRev *v1beta1.A
}
appName = appRev.Spec.Application.Name
isHealth = true
err error
)
if wl.CapabilityCategory == types.TerraformCategory {
return nil, true, nil
ctx := context.Background()
var configuration terraformapi.Configuration
if err := h.r.Client.Get(ctx, client.ObjectKey{Name: wl.Name, Namespace: h.app.Namespace}, &configuration); err != nil {
return nil, false, errors.WithMessagef(err, "app=%s, comp=%s, check health error", appName, wl.Name)
}
if configuration.Status.Apply.State != terraformtypes.Available {
status.Healthy = false
} else {
status.Healthy = true
}
status.Message = configuration.Status.Apply.Message
} else {
if ok, err := wl.EvalHealth(wl.Ctx, h.r.Client, h.app.Namespace); !ok || err != nil {
isHealth = false
status.Healthy = false
}
status.Message, err = wl.EvalStatus(wl.Ctx, h.r.Client, h.app.Namespace)
if err != nil {
return nil, false, errors.WithMessagef(err, "app=%s, comp=%s, evaluate workload status message error", appName, wl.Name)
}
}
if ok, err := wl.EvalHealth(wl.Ctx, h.r.Client, h.app.Namespace); !ok || err != nil {
isHealth = false
status.Healthy = false
}
var traitStatusList []common.ApplicationTraitStatus
var err error
status.Message, err = wl.EvalStatus(wl.Ctx, h.r.Client, h.app.Namespace)
if err != nil {
return nil, false, errors.WithMessagef(err, "app=%s, comp=%s, evaluate workload status message error", appName, wl.Name)
}
for _, tr := range wl.Traits {
var traitStatus = common.ApplicationTraitStatus{
Type: tr.Name,
@@ -214,95 +223,6 @@ func (h *AppHandler) collectHealthStatus(wl *appfile.Workload, appRev *v1beta1.A
return &status, isHealth, nil
}
func (h *AppHandler) aggregateHealthStatus(appFile *appfile.Appfile) ([]common.ApplicationComponentStatus, bool, error) {
var appStatus []common.ApplicationComponentStatus
var healthy = true
for _, wl := range appFile.Workloads {
var status = common.ApplicationComponentStatus{
Name: wl.Name,
WorkloadDefinition: wl.FullTemplate.Reference.Definition,
Healthy: true,
}
var pCtx process.Context
switch wl.CapabilityCategory {
case types.TerraformCategory:
pCtx = appfile.NewBasicContext(wl, appFile.Name, appFile.AppRevisionName, appFile.Namespace)
ctx := context.Background()
var configuration terraformapi.Configuration
if err := h.r.Client.Get(ctx, client.ObjectKey{Name: wl.Name, Namespace: h.app.Namespace}, &configuration); err != nil {
return nil, false, errors.WithMessagef(err, "app=%s, comp=%s, check health error", appFile.Name, wl.Name)
}
if configuration.Status.Apply.State != terraformtypes.Available {
healthy = false
status.Healthy = false
} else {
status.Healthy = true
}
status.Message = configuration.Status.Apply.Message
default:
pCtx = process.NewContext(h.app.Namespace, wl.Name, appFile.Name, appFile.AppRevisionName)
if !h.isNewRevision && wl.CapabilityCategory != types.CUECategory {
templateStr, err := appfile.GenerateCUETemplate(wl)
if err != nil {
return nil, false, err
}
wl.FullTemplate.TemplateStr = templateStr
}
if err := wl.EvalContext(pCtx); err != nil {
return nil, false, errors.WithMessagef(err, "app=%s, comp=%s, evaluate context error", appFile.Name, wl.Name)
}
workloadHealth, err := wl.EvalHealth(pCtx, h.r.Client, h.app.Namespace)
if err != nil {
return nil, false, errors.WithMessagef(err, "app=%s, comp=%s, check health error", appFile.Name, wl.Name)
}
if !workloadHealth {
// TODO(wonderflow): we should add a custom way to let the template say why it's unhealthy, only a bool flag is not enough
status.Healthy = false
healthy = false
}
status.Message, err = wl.EvalStatus(pCtx, h.r.Client, h.app.Namespace)
if err != nil {
return nil, false, errors.WithMessagef(err, "app=%s, comp=%s, evaluate workload status message error", appFile.Name, wl.Name)
}
}
var traitStatusList []common.ApplicationTraitStatus
for _, tr := range wl.Traits {
if err := tr.EvalContext(pCtx); err != nil {
return nil, false, errors.WithMessagef(err, "app=%s, comp=%s, trait=%s, evaluate context error", appFile.Name, wl.Name, tr.Name)
}
var traitStatus = common.ApplicationTraitStatus{
Type: tr.Name,
Healthy: true,
}
traitHealth, err := tr.EvalHealth(pCtx, h.r.Client, h.app.Namespace)
if err != nil {
return nil, false, errors.WithMessagef(err, "app=%s, comp=%s, trait=%s, check health error", appFile.Name, wl.Name, tr.Name)
}
if !traitHealth {
// TODO(wonderflow): we should add a custom way to let the template say why it's unhealthy, only a bool flag is not enough
traitStatus.Healthy = false
healthy = false
}
traitStatus.Message, err = tr.EvalStatus(pCtx, h.r.Client, h.app.Namespace)
if err != nil {
return nil, false, errors.WithMessagef(err, "app=%s, comp=%s, trait=%s, evaluate status message error", appFile.Name, wl.Name, tr.Name)
}
traitStatusList = append(traitStatusList, traitStatus)
}
status.Traits = traitStatusList
status.Scopes = generateScopeReference(wl.Scopes)
appStatus = append(appStatus, status)
}
return appStatus, healthy, nil
}
func generateScopeReference(scopes []appfile.Scope) []corev1.ObjectReference {
var references []corev1.ObjectReference
for _, scope := range scopes {

View File

@@ -24,22 +24,17 @@ import (
"github.com/oam-dev/kubevela/pkg/oam/testutil"
terraformtypes "github.com/oam-dev/terraform-controller/api/types"
terraformapi "github.com/oam-dev/terraform-controller/api/v1beta1"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/types"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/reconcile"
"sigs.k8s.io/yaml"
"github.com/oam-dev/kubevela/apis/core.oam.dev/common"
"github.com/oam-dev/kubevela/apis/core.oam.dev/v1beta1"
velatypes "github.com/oam-dev/kubevela/apis/types"
"github.com/oam-dev/kubevela/pkg/appfile"
)
const workloadDefinition = `
@@ -157,61 +152,3 @@ var _ = Describe("Test Application apply", func() {
Expect(strings.Compare(applabel, app.Name) == 0).Should(BeTrue())
})
})
var _ = Describe("Test statusAggregate", func() {
It("the component is Terraform type", func() {
var (
ctx = context.TODO()
componentName = "sample-oss"
ns = "default"
h = &AppHandler{r: reconciler, app: &v1beta1.Application{
TypeMeta: metav1.TypeMeta{},
ObjectMeta: metav1.ObjectMeta{Namespace: ns},
}}
appFile = &appfile.Appfile{
Workloads: []*appfile.Workload{
{
Name: componentName,
FullTemplate: &appfile.Template{
Reference: common.WorkloadTypeDescriptor{
Definition: common.WorkloadGVK{APIVersion: "v1", Kind: "A1"},
},
},
CapabilityCategory: velatypes.TerraformCategory,
},
},
}
)
By("aggregate status")
statuses, healthy, err := h.aggregateHealthStatus(appFile)
Expect(statuses).Should(BeNil())
Expect(healthy).Should(Equal(false))
Expect(err).Should(HaveOccurred())
By("create Terraform configuration")
configuration := terraformapi.Configuration{
TypeMeta: metav1.TypeMeta{APIVersion: "terraform.core.oam.dev/v1beta1", Kind: "Configuration"},
ObjectMeta: metav1.ObjectMeta{Name: componentName, Namespace: ns},
}
k8sClient.Create(ctx, &configuration)
By("aggregate status again")
statuses, healthy, err = h.aggregateHealthStatus(appFile)
Expect(len(statuses)).Should(Equal(1))
Expect(healthy).Should(Equal(false))
Expect(err).Should(BeNil())
By("set status for Terraform configuration")
var gotConfiguration terraformapi.Configuration
k8sClient.Get(ctx, client.ObjectKey{Namespace: ns, Name: componentName}, &gotConfiguration)
gotConfiguration.Status.Apply.State = terraformtypes.Available
k8sClient.Status().Update(ctx, &gotConfiguration)
By("aggregate status one more time")
statuses, healthy, err = h.aggregateHealthStatus(appFile)
Expect(len(statuses)).Should(Equal(1))
Expect(healthy).Should(Equal(true))
Expect(err).Should(BeNil())
})
})

View File

@@ -43,7 +43,7 @@ func NewAppManifestsDispatcher(c client.Client, appRev *v1beta1.ApplicationRevis
c: c,
applicator: apply.NewAPIApplicator(c),
appRev: appRev,
gcHandler: NewGCHandler(c, appRev.Namespace),
gcHandler: NewGCHandler(c, appRev.Namespace, *appRev),
}
}

View File

@@ -26,6 +26,8 @@ import (
"testing"
"time"
"github.com/oam-dev/kubevela/apis/core.oam.dev/common"
"github.com/crossplane/crossplane-runtime/pkg/test"
. "github.com/onsi/ginkgo"
@@ -419,7 +421,9 @@ var _ = Describe("Test handleSkipGC func", func() {
})
It("Test GC skip func ", func() {
handler := GCHandler{c: k8sClient}
handler := GCHandler{c: k8sClient, appRev: v1beta1.ApplicationRevision{Spec: v1beta1.ApplicationRevisionSpec{
Application: v1beta1.Application{Spec: v1beta1.ApplicationSpec{Components: []common.ApplicationComponent{{Name: "mywebservice"}}}},
}}}
wlName := "test-workload"
resourceTracker := v1beta1.ResourceTracker{
ObjectMeta: metav1.ObjectMeta{
@@ -430,6 +434,7 @@ var _ = Describe("Test handleSkipGC func", func() {
skipWorkload := &appsv1.Deployment{TypeMeta: metav1.TypeMeta{APIVersion: "apps/v1", Kind: "Deployment"}}
skipWorkload.SetNamespace(namespaceName)
skipWorkload.SetName(wlName)
skipWorkload.SetLabels(map[string]string{oam.LabelAppComponent: "mywebservice"})
skipWorkload.SetOwnerReferences([]metav1.OwnerReference{*metav1.NewControllerRef(
&resourceTracker, v1beta1.ResourceTrackerKindVersionKind),
metav1.OwnerReference{UID: "app-uid", Name: "test-app", APIVersion: v1beta1.SchemeGroupVersion.String(), Kind: v1beta1.ApplicationKind}})

View File

@@ -22,6 +22,9 @@ import (
"github.com/stretchr/testify/assert"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"github.com/oam-dev/kubevela/apis/core.oam.dev/common"
"github.com/oam-dev/kubevela/pkg/oam"
)
func TestSetOAMOwner(t *testing.T) {
@@ -107,3 +110,31 @@ func TestSetOAMOwner(t *testing.T) {
assert.Equal(t, ti.ExpOwner, ti.OO.GetOwnerReferences(), name)
}
}
func TestCheckComponentDeleted(t *testing.T) {
wl_1 := unstructured.Unstructured{}
wl_1.SetLabels(map[string]string{oam.LabelAppComponent: "comp-1"})
wl_2 := unstructured.Unstructured{}
wl_3 := unstructured.Unstructured{}
wl_3.SetLabels(map[string]string{oam.LabelAppComponent: "comp-3"})
components := []common.ApplicationComponent{{Name: "comp-1"}}
testCase := map[string]struct {
u unstructured.Unstructured
res bool
}{
"exsit comp": {wl_1, false},
"no label deleted": {wl_2, true},
"not exsit comp": {wl_3, true},
}
for caseName, s := range testCase {
b := checkResourceRelatedCompDeleted(s.u, components)
if b != s.res {
t.Errorf("check comp deleted func meet error: %s want %v got %v", caseName, s.res, b)
}
}
}

View File

@@ -28,6 +28,7 @@ import (
"k8s.io/klog/v2"
"sigs.k8s.io/controller-runtime/pkg/client"
"github.com/oam-dev/kubevela/apis/core.oam.dev/common"
"github.com/oam-dev/kubevela/apis/core.oam.dev/v1beta1"
"github.com/oam-dev/kubevela/pkg/oam"
)
@@ -38,8 +39,8 @@ type GarbageCollector interface {
}
// NewGCHandler create a GCHandler
func NewGCHandler(c client.Client, ns string) *GCHandler {
return &GCHandler{c, ns, nil, nil}
func NewGCHandler(c client.Client, ns string, appRev v1beta1.ApplicationRevision) *GCHandler {
return &GCHandler{c, ns, nil, nil, appRev}
}
// GCHandler implement GarbageCollector interface
@@ -49,6 +50,8 @@ type GCHandler struct {
oldRT *v1beta1.ResourceTracker
newRT *v1beta1.ResourceTracker
appRev v1beta1.ApplicationRevision
}
// GarbageCollect delete the old resources that are no longer in the new resource tracker
@@ -137,6 +140,10 @@ func (h *GCHandler) handleResourceSkipGC(ctx context.Context, u *unstructured.Un
if _, exist := res.GetAnnotations()[oam.AnnotationSkipGC]; !exist {
return false, nil
}
// if the component have been deleted don't skipGC
if checkResourceRelatedCompDeleted(*res, h.appRev.Spec.Application.Spec.Components) {
return false, nil
}
var owners []metav1.OwnerReference
for _, ownerReference := range res.GetOwnerReferences() {
if ownerReference.UID == oldRt.GetUID() {
@@ -152,3 +159,14 @@ func (h *GCHandler) handleResourceSkipGC(ctx context.Context, u *unstructured.Un
klog.InfoS("succeed to handle a skipGC res kind ", res.GetKind(), "namespace", res.GetNamespace(), "name", res.GetName())
return true, nil
}
func checkResourceRelatedCompDeleted(res unstructured.Unstructured, comps []common.ApplicationComponent) bool {
compName := res.GetLabels()[oam.LabelAppComponent]
deleted := true
for _, comp := range comps {
if compName == comp.Name {
deleted = false
}
}
return deleted
}

View File

@@ -173,7 +173,7 @@ spec:
By("Check whether ConfigMap is created")
var cm corev1.ConfigMap
name := fmt.Sprintf("%s%s", types.CapabilityConfigMapNamePrefix, componentDefinitionName)
name := fmt.Sprintf("component-%s%s", types.CapabilityConfigMapNamePrefix, componentDefinitionName)
Eventually(func() bool {
err := k8sClient.Get(ctx, client.ObjectKey{Namespace: namespace, Name: name}, &cm)
return err == nil
@@ -264,7 +264,7 @@ spec:
By("Check whether ConfigMap is created")
var cm corev1.ConfigMap
name := fmt.Sprintf("%s%s", types.CapabilityConfigMapNamePrefix, componentDefinitionName)
name := fmt.Sprintf("component-%s%s", types.CapabilityConfigMapNamePrefix, componentDefinitionName)
Eventually(func() bool {
err := k8sClient.Get(ctx, client.ObjectKey{Namespace: namespace, Name: name}, &cm)
return err == nil
@@ -306,8 +306,9 @@ spec:
cd.SetNamespace(namespace)
Expect(k8sClient.Create(ctx, &cd)).Should(Succeed())
req := reconcile.Request{NamespacedName: client.ObjectKey{Name: cd.Name, Namespace: cd.Namespace}}
By("Check whether ConfigMap is created")
var cm corev1.ConfigMap
name := fmt.Sprintf("%s%s", types.CapabilityConfigMapNamePrefix, cd.Name)
name := fmt.Sprintf("component-%s%s", types.CapabilityConfigMapNamePrefix, cd.Name)
Eventually(func() bool {
testutil.ReconcileRetry(&r, req)
err := k8sClient.Get(ctx, client.ObjectKey{Namespace: cd.Namespace, Name: name}, &cm)
@@ -347,7 +348,7 @@ spec:
By("Check whether ConfigMap is created")
var cm corev1.ConfigMap
name := fmt.Sprintf("%s%s", types.CapabilityConfigMapNamePrefix, componentDefinitionName)
name := fmt.Sprintf("component-%s%s", types.CapabilityConfigMapNamePrefix, componentDefinitionName)
Eventually(func() bool {
err := k8sClient.Get(ctx, client.ObjectKey{Namespace: namespace, Name: name}, &cm)
return err == nil
@@ -392,7 +393,7 @@ spec:
By("Check whether ConfigMap is created")
var cm corev1.ConfigMap
name := fmt.Sprintf("%s%s", types.CapabilityConfigMapNamePrefix, componentDefinitionName)
name := fmt.Sprintf("component-%s%s", types.CapabilityConfigMapNamePrefix, componentDefinitionName)
Eventually(func() bool {
err := k8sClient.Get(ctx, client.ObjectKey{Namespace: namespace, Name: name}, &cm)
return err == nil
@@ -501,7 +502,7 @@ spec:
By("Check whether ConfigMap is created")
var cm corev1.ConfigMap
name := fmt.Sprintf("%s%s", types.CapabilityConfigMapNamePrefix, componentDefinitionName)
name := fmt.Sprintf("component-%s%s", types.CapabilityConfigMapNamePrefix, componentDefinitionName)
Eventually(func() bool {
err := k8sClient.Get(ctx, client.ObjectKey{Namespace: namespace, Name: name}, &cm)
return err == nil
@@ -725,7 +726,7 @@ spec:
By("Check whether ConfigMap is created")
var cm corev1.ConfigMap
name := fmt.Sprintf("%s%s", types.CapabilityConfigMapNamePrefix, componentDefinitionName)
name := fmt.Sprintf("component-%s%s", types.CapabilityConfigMapNamePrefix, componentDefinitionName)
Eventually(func() bool {
testutil.ReconcileRetry(&r, req)
err := k8sClient.Get(ctx, client.ObjectKey{Namespace: namespace, Name: name}, &cm)

View File

@@ -137,7 +137,7 @@ spec:
By("Check whether ConfigMap is created")
var cm corev1.ConfigMap
name := fmt.Sprintf("%s%s", types.CapabilityConfigMapNamePrefix, traitDefinitionName)
name := fmt.Sprintf("trait-%s%s", types.CapabilityConfigMapNamePrefix, traitDefinitionName)
Eventually(func() bool {
err := k8sClient.Get(ctx, client.ObjectKey{Namespace: namespace, Name: name}, &cm)
return err == nil
@@ -288,7 +288,7 @@ spec:
By("Check whether ConfigMap is created")
var cm corev1.ConfigMap
name := fmt.Sprintf("%s%s", types.CapabilityConfigMapNamePrefix, traitDefinitionName)
name := fmt.Sprintf("trait-%s%s", types.CapabilityConfigMapNamePrefix, traitDefinitionName)
Eventually(func() bool {
testutil.ReconcileRetry(&r, req)
err := k8sClient.Get(ctx, client.ObjectKey{Namespace: namespace, Name: name}, &cm)

View File

@@ -0,0 +1,197 @@
/*
Copyright 2021 The KubeVela Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package workflowstepdefinition
import (
"context"
"fmt"
"time"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/reconcile"
"sigs.k8s.io/yaml"
"github.com/oam-dev/kubevela/apis/core.oam.dev/v1beta1"
"github.com/oam-dev/kubevela/apis/types"
"github.com/oam-dev/kubevela/pkg/oam/testutil"
"github.com/oam-dev/kubevela/pkg/oam/util"
)
var _ = Describe("Apply WorkflowStepDefinition to store its schema to ConfigMap Test", func() {
ctx := context.Background()
var ns corev1.Namespace
Context("When the WorkflowStepDefinition is valid, but the namespace doesn't exist, should occur errors", func() {
It("Apply WorkflowStepDefinition", func() {
By("Apply WorkflowStepDefinition")
var validWorkflowStepDefinition = `
apiVersion: core.oam.dev/v1beta1
apiVersion: core.oam.dev/v1beta1
kind: WorkflowStepDefinition
metadata:
annotations:
definition.oam.dev/description: Apply raw kubernetes objects for your workflow steps
name: apply-object
namespace: not-exist
spec:
schematic:
cue:
template: |
import (
"vela/op"
)
apply: op.#Apply & {
value: parameter.value
cluster: parameter.cluster
}
parameter: {
// +usage=Specify the value of the object
value: {...}
// +usage=Specify the cluster of the object
cluster: *"" | string
}
`
var def v1beta1.WorkflowStepDefinition
Expect(yaml.Unmarshal([]byte(validWorkflowStepDefinition), &def)).Should(BeNil())
Expect(k8sClient.Create(ctx, &def)).Should(Not(Succeed()))
})
})
Context("When the WorkflowStepDefinition is valid, should create a ConfigMap", func() {
var WorkflowStepDefinitionName = "apply-object"
var namespace = "ns-wfs-def-1"
req := reconcile.Request{NamespacedName: client.ObjectKey{Name: WorkflowStepDefinitionName, Namespace: namespace}}
It("Apply WorkflowStepDefinition", func() {
ns = corev1.Namespace{
ObjectMeta: metav1.ObjectMeta{
Name: namespace,
},
}
By("Create a namespace")
Expect(k8sClient.Create(ctx, &ns)).Should(SatisfyAny(Succeed(), &util.AlreadyExistMatcher{}))
By("Apply WorkflowStepDefinition")
var validWorkflowStepDefinition = `
apiVersion: core.oam.dev/v1beta1
kind: WorkflowStepDefinition
metadata:
annotations:
definition.oam.dev/description: Apply raw kubernetes objects for your workflow steps
name: apply-object
namespace: ns-wfs-def-1
spec:
schematic:
cue:
template: |
import (
"vela/op"
)
apply: op.#Apply & {
value: parameter.value
cluster: parameter.cluster
}
parameter: {
// +usage=Specify the value of the object
value: {...}
// +usage=Specify the cluster of the object
cluster: *"" | string
}
`
var def v1beta1.WorkflowStepDefinition
Expect(yaml.Unmarshal([]byte(validWorkflowStepDefinition), &def)).Should(BeNil())
Expect(k8sClient.Create(ctx, &def)).Should(Succeed())
testutil.ReconcileRetry(&r, req)
By("Check whether ConfigMap is created")
var cm corev1.ConfigMap
name := fmt.Sprintf("workflowstep-%s%s", types.CapabilityConfigMapNamePrefix, WorkflowStepDefinitionName)
Eventually(func() bool {
err := k8sClient.Get(ctx, client.ObjectKey{Namespace: namespace, Name: name}, &cm)
return err == nil
}, 30*time.Second, time.Second).Should(BeTrue())
Expect(cm.Data[types.OpenapiV3JSONSchema]).Should(Not(Equal("")))
Expect(cm.Labels["definition.oam.dev/name"]).Should(Equal(WorkflowStepDefinitionName))
By("Check whether ConfigMapRef refer to right")
Eventually(func() string {
_ = k8sClient.Get(ctx, client.ObjectKey{Namespace: def.Namespace, Name: def.Name}, &def)
return def.Status.ConfigMapRef
}, 30*time.Second, time.Second).Should(Equal(name))
By("Delete the workflowstep")
Expect(k8sClient.Delete(ctx, &def)).Should(Succeed())
testutil.ReconcileRetry(&r, req)
})
})
Context("When the WorkflowStepDefinition is invalid, should report issues", func() {
var invalidWorkflowStepDefinitionName = "invalid-wf1"
var namespace = "ns-wfs-def2"
BeforeEach(func() {
ns = corev1.Namespace{
ObjectMeta: metav1.ObjectMeta{
Name: namespace,
},
}
By("Create a namespace")
Expect(k8sClient.Create(ctx, &ns)).Should(SatisfyAny(Succeed(), &util.AlreadyExistMatcher{}))
})
It("Applying invalid WorkflowStepDefinition", func() {
By("Apply the WorkflowStepDefinition")
var invalidWorkflowStepDefinition = `
apiVersion: core.oam.dev/v1beta1
kind: WorkflowStepDefinition
metadata:
annotations:
definition.oam.dev/description: Apply raw kubernetes objects for your workflow steps
name: invalid-wf1
namespace: ns-wfs-def2
spec:
schematic:
cue:
template: |
import (
"vela/op"
)
apply: op.#Apply & {
value: parameter.value
cluster: parameter.cluster
}
`
var invalidDef v1beta1.WorkflowStepDefinition
Expect(yaml.Unmarshal([]byte(invalidWorkflowStepDefinition), &invalidDef)).Should(BeNil())
Expect(k8sClient.Create(ctx, &invalidDef)).Should(Succeed())
gotWorkflowStepDefinition := &v1beta1.WorkflowStepDefinition{}
Expect(k8sClient.Get(ctx, client.ObjectKey{Name: invalidWorkflowStepDefinitionName, Namespace: namespace}, gotWorkflowStepDefinition)).Should(BeNil())
})
})
})

View File

@@ -61,6 +61,10 @@ const (
TerraformTupleTypePrefix string = "tuple("
TerraformMapTypePrefix string = "map("
TerraformObjectTypePrefix string = "object("
typeTraitDefinition = "trait"
typeComponentDefinition = "component"
typeWorkflowStepDefinition = "workflowstep"
)
// ErrNoSectionParameterInCue means there is not parameter section in Cue template of a workload
@@ -245,7 +249,7 @@ func (def *CapabilityComponentDefinition) StoreOpenAPISchema(ctx context.Context
Controller: pointer.BoolPtr(true),
BlockOwnerDeletion: pointer.BoolPtr(true),
}}
cmName, err := def.CreateOrUpdateConfigMap(ctx, k8sClient, namespace, componentDefinition.Name, jsonSchema, ownerReference)
cmName, err := def.CreateOrUpdateConfigMap(ctx, k8sClient, namespace, componentDefinition.Name, typeComponentDefinition, jsonSchema, ownerReference)
if err != nil {
return cmName, err
}
@@ -263,7 +267,7 @@ func (def *CapabilityComponentDefinition) StoreOpenAPISchema(ctx context.Context
Controller: pointer.BoolPtr(true),
BlockOwnerDeletion: pointer.BoolPtr(true),
}}
_, err = def.CreateOrUpdateConfigMap(ctx, k8sClient, namespace, revName, jsonSchema, ownerReference)
_, err = def.CreateOrUpdateConfigMap(ctx, k8sClient, namespace, revName, typeComponentDefinition, jsonSchema, ownerReference)
if err != nil {
return cmName, err
}
@@ -326,7 +330,7 @@ func (def *CapabilityTraitDefinition) StoreOpenAPISchema(ctx context.Context, k8
Controller: pointer.BoolPtr(true),
BlockOwnerDeletion: pointer.BoolPtr(true),
}}
cmName, err := def.CreateOrUpdateConfigMap(ctx, k8sClient, namespace, traitDefinition.Name, jsonSchema, ownerReference)
cmName, err := def.CreateOrUpdateConfigMap(ctx, k8sClient, namespace, traitDefinition.Name, typeTraitDefinition, jsonSchema, ownerReference)
if err != nil {
return cmName, err
}
@@ -344,7 +348,7 @@ func (def *CapabilityTraitDefinition) StoreOpenAPISchema(ctx context.Context, k8
Controller: pointer.BoolPtr(true),
BlockOwnerDeletion: pointer.BoolPtr(true),
}}
_, err = def.CreateOrUpdateConfigMap(ctx, k8sClient, namespace, revName, jsonSchema, ownerReference)
_, err = def.CreateOrUpdateConfigMap(ctx, k8sClient, namespace, revName, typeTraitDefinition, jsonSchema, ownerReference)
if err != nil {
return cmName, err
}
@@ -395,7 +399,7 @@ func (def *CapabilityStepDefinition) StoreOpenAPISchema(ctx context.Context, k8s
Controller: pointer.BoolPtr(true),
BlockOwnerDeletion: pointer.BoolPtr(true),
}}
cmName, err := def.CreateOrUpdateConfigMap(ctx, k8sClient, namespace, stepDefinition.Name, jsonSchema, ownerReference)
cmName, err := def.CreateOrUpdateConfigMap(ctx, k8sClient, namespace, stepDefinition.Name, typeWorkflowStepDefinition, jsonSchema, ownerReference)
if err != nil {
return cmName, err
}
@@ -413,7 +417,7 @@ func (def *CapabilityStepDefinition) StoreOpenAPISchema(ctx context.Context, k8s
Controller: pointer.BoolPtr(true),
BlockOwnerDeletion: pointer.BoolPtr(true),
}}
_, err = def.CreateOrUpdateConfigMap(ctx, k8sClient, namespace, revName, jsonSchema, ownerReference)
_, err = def.CreateOrUpdateConfigMap(ctx, k8sClient, namespace, revName, typeWorkflowStepDefinition, jsonSchema, ownerReference)
if err != nil {
return cmName, err
}
@@ -426,8 +430,8 @@ type CapabilityBaseDefinition struct {
// CreateOrUpdateConfigMap creates ConfigMap to store OpenAPI v3 schema or or updates data in ConfigMap
func (def *CapabilityBaseDefinition) CreateOrUpdateConfigMap(ctx context.Context, k8sClient client.Client, namespace,
definitionName string, jsonSchema []byte, ownerReferences []metav1.OwnerReference) (string, error) {
cmName := fmt.Sprintf("%s%s", types.CapabilityConfigMapNamePrefix, definitionName)
definitionName, definitionType string, jsonSchema []byte, ownerReferences []metav1.OwnerReference) (string, error) {
cmName := fmt.Sprintf("%s-%s%s", definitionType, types.CapabilityConfigMapNamePrefix, definitionName)
var cm v1.ConfigMap
var data = map[string]string{
types.OpenapiV3JSONSchema: string(jsonSchema),

View File

@@ -181,7 +181,7 @@ spec:
Controller: pointer.BoolPtr(true),
BlockOwnerDeletion: pointer.BoolPtr(true),
}}
_, err := def.CreateOrUpdateConfigMap(ctx, k8sClient, namespace, definitionName, []byte(""), ownerReference)
_, err := def.CreateOrUpdateConfigMap(ctx, k8sClient, namespace, definitionName, typeTraitDefinition, []byte(""), ownerReference)
Expect(err).Should(BeNil())
})
})

46
pkg/monitor/README.md Normal file
View File

@@ -0,0 +1,46 @@
# Package Usage
## Context
First, this context is compatible with built-in context interface.
Also it supports fork and commit like trace span.
### Fork
`Fork` will generate a sub context that inherit the parent's tags. When new tags are added to the `sub-context`, the `parent-context` will not be affected.
### Commit
`Commit` will log the context duration, and export metrics or other execution information.
### usage
```
tracerCtx:=context.NewTraceContext(stdCtx,"$id")
defer tracerCtx.Commit("success")
// Execute sub-code logic
subCtx:=tracerCtx.Fork("sub-id")
...
subCtx.Commit("step is executed")
```
## Metrics
First, you need register `metricVec` in package `pkg/monitor/metrics`, like below:
```
StepDurationSummary = prometheus.NewSummaryVec(prometheus.SummaryOpts{
Name: "step_duration_ms",
Help: "step latency distributions.",
Objectives: map[float64]float64{0.5: 0.05, 0.9: 0.01, 0.99: 0.001},
ConstLabels: prometheus.Labels{},
}, []string{"application", "workflow_revision", "step_name", "step_type"})
```
Now, you can export metrics by context,for example
```
subCtx:=tracerCtx.Fork("sub-id",DurationMetric(func(v float64) {
metrics.StepDurationSummary.WithLabelValues(e.app.Name, e.status.AppRevision, stepStatus.Name, stepStatus.Type).Observe(v)
})
subCtx.Commit("export") // At this time, it will export the StepDurationSummary metrics.
```
Context only support `DurationMetric` exporter. you can submit pr to support more exporters.
If metrics have nothing to do with context, there is no need to extend it through context exporter

View File

@@ -0,0 +1,170 @@
/*
Copyright 2021. The KubeVela Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package context
import (
stdctx "context"
"fmt"
"time"
"github.com/oam-dev/kubevela/pkg/utils"
"k8s.io/klog/v2"
)
const (
// spanTagID is the tag name of span ID.
spanTagID = "spanID"
)
// Context keep the trace info
type Context interface {
stdctx.Context
Logger
GetContext() stdctx.Context
SetContext(ctx stdctx.Context)
AddTag(keysAndValues ...interface{}) Context
Fork(name string, exporters ...Exporter) Context
Commit(msg string)
}
// Logger represents the ability to log messages, both errors and not.
type Logger interface {
InfoDepth(depth int, msg string, keysAndValues ...interface{})
Info(msg string, keysAndValues ...interface{})
Error(err error, msg string, keysAndValues ...interface{})
ErrorDepth(depth int, err error, msg string, keysAndValues ...interface{})
Printf(format string, args ...interface{})
V(level int)
}
type traceContext struct {
stdctx.Context
id string
beginTimestamp time.Time
logLevel int
tags []interface{}
exporters []Exporter
parent *traceContext
}
// Fork a child Context extends parent Context
func (t *traceContext) Fork(id string, exporters ...Exporter) Context {
if id == "" {
id = t.id
} else {
id = t.id + "." + id
}
return &traceContext{
Context: t.Context,
id: id,
tags: copySlice(t.tags),
logLevel: t.logLevel,
parent: t,
beginTimestamp: time.Now(),
exporters: exporters,
}
}
// Commit finish the span record
func (t *traceContext) Commit(msg string) {
msg = fmt.Sprintf("[Finished]: %s(%s)", t.id, msg)
duration := time.Since(t.beginTimestamp)
for _, export := range t.exporters {
export(t, duration.Microseconds())
}
klog.InfoSDepth(1, msg, t.getTagsWith("duration", duration.String())...)
}
func (t *traceContext) getTagsWith(keysAndValues ...interface{}) []interface{} {
tags := append(t.tags, keysAndValues...)
return append(tags, spanTagID, t.id)
}
// Info logs a non-error message with the given key/value pairs as context.
func (t *traceContext) Info(msg string, keysAndValues ...interface{}) {
klog.InfoSDepth(1, msg, t.getTagsWith(keysAndValues...)...)
}
// GetContext get raw context.
func (t *traceContext) GetContext() stdctx.Context {
return t.Context
}
// SetContext set raw context.
func (t *traceContext) SetContext(ctx stdctx.Context) {
t.Context = ctx
}
// InfoDepth acts as Info but uses depth to determine which call frame to log.
func (t *traceContext) InfoDepth(depth int, msg string, keysAndValues ...interface{}) {
klog.InfoSDepth(depth+1, msg, t.getTagsWith(keysAndValues...)...)
}
// Error logs an error, with the given message and key/value pairs as context.
func (t *traceContext) Error(err error, msg string, keysAndValues ...interface{}) {
klog.ErrorSDepth(1, err, msg, t.getTagsWith(keysAndValues...)...)
}
// ErrorDepth acts as Error but uses depth to determine which call frame to log.
func (t *traceContext) ErrorDepth(depth int, err error, msg string, keysAndValues ...interface{}) {
klog.ErrorSDepth(depth+1, err, msg, t.getTagsWith(keysAndValues...)...)
}
// Printf formats according to a format specifier and logs.
func (t *traceContext) Printf(format string, args ...interface{}) {
klog.InfoSDepth(1, fmt.Sprintf(format, args...), t.getTagsWith()...)
}
// V reports whether verbosity at the call site is at least the requested level.
func (t *traceContext) V(level int) {
t.logLevel = level
}
// AddTag adds some key-value pairs of context to a logger.
func (t *traceContext) AddTag(keysAndValues ...interface{}) Context {
t.tags = append(t.tags, keysAndValues...)
return t
}
// NewTraceContext new a TraceContext
func NewTraceContext(ctx stdctx.Context, id string) Context {
if id == "" {
id = "i-" + utils.RandomString(8)
}
return &traceContext{
Context: ctx,
id: id,
beginTimestamp: time.Now(),
}
}
func copySlice(in []interface{}) []interface{} {
out := make([]interface{}, len(in))
copy(out, in)
return out
}
// Exporter export context info.
type Exporter func(t *traceContext, duration int64)
// DurationMetric export context duration metric.
func DurationMetric(h func(v float64)) Exporter {
return func(t *traceContext, duration int64) {
h(float64(duration / 1000))
}
}

View File

@@ -0,0 +1,45 @@
/*
Copyright 2021. The KubeVela Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package context
import (
"context"
"fmt"
"testing"
"time"
"github.com/pkg/errors"
"k8s.io/apimachinery/pkg/types"
)
func TestLog(t *testing.T) {
ctx := NewTraceContext(context.Background(), types.NamespacedName{
Namespace: "default",
Name: "test-app",
}.String())
ctx.AddTag("controller", "application")
ctx.Info("init")
ctx.InfoDepth(1, "init")
defer ctx.Commit("close")
spanCtx := ctx.Fork("child1", DurationMetric(func(v float64) {
fmt.Println(v)
}))
time.Sleep(time.Millisecond * 30)
err := errors.New("mock error")
ctx.Error(err, "test case", "generated", "test_log")
ctx.ErrorDepth(1, err, "test case", "generated", "test_log")
spanCtx.Commit("finished")
}

View File

@@ -0,0 +1,36 @@
/*
Copyright 2021. The KubeVela Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package metrics
import (
"github.com/prometheus/client_golang/prometheus"
"k8s.io/klog/v2"
"sigs.k8s.io/controller-runtime/pkg/metrics"
)
var (
// StepDurationSummary report the step execution duration summary.
StepDurationSummary = prometheus.NewSummaryVec(prometheus.SummaryOpts{
Name: "step_duration_ms",
Help: "step latency distributions.",
Objectives: map[float64]float64{0.5: 0.05, 0.9: 0.01, 0.99: 0.001},
ConstLabels: prometheus.Labels{},
}, []string{"application", "workflow_revision", "step_name", "step_type"})
)
func init() {
if err := metrics.Registry.Register(StepDurationSummary); err != nil {
klog.Error(err)
}
}

View File

@@ -34,6 +34,7 @@ import (
"cuelang.org/go/encoding/openapi"
"github.com/AlecAivazis/survey/v2"
"github.com/hashicorp/hcl/v2/hclparse"
clustergatewayapi "github.com/oam-dev/cluster-gateway/pkg/apis/cluster/v1alpha1"
"github.com/oam-dev/terraform-config-inspect/tfconfig"
terraformv1beta1 "github.com/oam-dev/terraform-controller/api/v1beta1"
kruise "github.com/openkruise/kruise-api/apps/v1alpha1"
@@ -54,6 +55,7 @@ import (
oamcore "github.com/oam-dev/kubevela/apis/core.oam.dev"
"github.com/oam-dev/kubevela/apis/core.oam.dev/common"
"github.com/oam-dev/kubevela/apis/core.oam.dev/v1beta1"
oamstandard "github.com/oam-dev/kubevela/apis/standard.oam.dev/v1alpha1"
velacue "github.com/oam-dev/kubevela/pkg/cue"
"github.com/oam-dev/kubevela/pkg/cue/model"
@@ -76,6 +78,7 @@ func init() {
_ = terraformv1beta1.AddToScheme(Scheme)
_ = ocmclusterv1alpha1.Install(Scheme)
_ = ocmworkv1.Install(Scheme)
_ = clustergatewayapi.AddToScheme(Scheme)
// +kubebuilder:scaffold:scheme
}
@@ -242,23 +245,51 @@ func RealtimePrintCommandOutput(cmd *exec.Cmd, logFile string) error {
// ClusterObject2Map convert ClusterObjectReference to a readable map
func ClusterObject2Map(refs []common.ClusterObjectReference) map[string]string {
clusterResourceRefTmpl := "Cluster: %s | Namespace: %s | GVK: %s/%s | Name: %s"
clusterResourceRefTmpl := "Cluster: %s | Namespace: %s | Component: %s | Kind: %s"
objs := make(map[string]string, len(refs))
for _, r := range refs {
if r.Cluster == "" {
r.Cluster = "local"
}
objs[r.Name] = fmt.Sprintf(clusterResourceRefTmpl, r.Cluster, r.Namespace, r.APIVersion, r.ResourceVersion, r.Name)
objs[r.Cluster+"/"+r.Namespace+"/"+r.Name] = fmt.Sprintf(clusterResourceRefTmpl, r.Cluster, r.Namespace, r.Name, r.Kind)
}
return objs
}
// AskToChooseOneAppliedResource will ask users to select one applied resource of the application if more than one
// ResourceLocation indicates the resource location
type ResourceLocation struct {
Cluster string
Namespace string
}
func filterWorkload(resources []common.ClusterObjectReference) []common.ClusterObjectReference {
var filteredOR []common.ClusterObjectReference
loggableWorkload := map[string]bool{
"Deployment": true,
"StatefulSet": true,
"CloneSet": true,
"Job": true,
}
for _, r := range resources {
if _, ok := loggableWorkload[r.Kind]; ok {
filteredOR = append(filteredOR, r)
}
}
return filteredOR
}
// AskToChooseOneEnvResource will ask users to select one applied resource of the application if more than one
// resources is a map for component to applied resources
// return the selected ClusterObjectReference
func AskToChooseOneAppliedResource(resources []common.ClusterObjectReference) (*common.ClusterObjectReference, error) {
func AskToChooseOneEnvResource(app *v1beta1.Application) (*common.ClusterObjectReference, error) {
resources := app.Status.AppliedResources
if len(resources) == 0 {
return nil, fmt.Errorf("no applied resources exist in the application")
return nil, fmt.Errorf("no resources in the application deployed yet")
}
resources = filterWorkload(resources)
// filter locations
if len(resources) == 0 {
return nil, fmt.Errorf("no supported workload resources detected in deployed resources")
}
if len(resources) == 1 {
return &resources[0], nil
@@ -269,7 +300,7 @@ func AskToChooseOneAppliedResource(resources []common.ClusterObjectReference) (*
ops = append(ops, r)
}
prompt := &survey.Select{
Message: "You have multiple applied resources in your app. Please choose one:",
Message: fmt.Sprintf("You have %d deployed resources in your app. Please choose one:", len(ops)),
Options: ops,
}
var selectedRsc string
@@ -285,7 +316,7 @@ func AskToChooseOneAppliedResource(resources []common.ClusterObjectReference) (*
return nil, fmt.Errorf("choosing resource err %w", err)
}
// AskToChooseOneService will ask users to select one service of the application if more than one exidi
// AskToChooseOneService will ask users to select one service of the application if more than one
func AskToChooseOneService(svcNames []string) (string, error) {
if len(svcNames) == 0 {
return "", fmt.Errorf("no service exist in the application")
@@ -305,6 +336,26 @@ func AskToChooseOneService(svcNames []string) (string, error) {
return svcName, nil
}
// AskToChooseOnePods will ask users to select one pods of the resource if more than one
func AskToChooseOnePods(podNames []string) (string, error) {
if len(podNames) == 0 {
return "", fmt.Errorf("no service exist in the application")
}
if len(podNames) == 1 {
return podNames[0], nil
}
prompt := &survey.Select{
Message: "You have multiple pods in the specified resource. Please choose one: ",
Options: podNames,
}
var svcName string
err := survey.AskOne(prompt, &svcName)
if err != nil {
return "", fmt.Errorf("choosing pod err %w", err)
}
return svcName, nil
}
// ReadYamlToObject will read a yaml K8s object to runtime.Object
func ReadYamlToObject(path string, object k8sruntime.Object) error {
data, err := os.ReadFile(filepath.Clean(path))

58
pkg/utils/url.go Normal file
View File

@@ -0,0 +1,58 @@
/*
Copyright 2021 The KubeVela Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package utils
import (
"fmt"
"regexp"
)
// ParseAPIServerEndpoint automatically construct the full url of APIServer
// It will patch port and scheme if not exists
func ParseAPIServerEndpoint(server string) (string, error) {
r := regexp.MustCompile(`^((?P<scheme>http|https)://)?(?P<host>[^:\s]+)(:(?P<port>[0-9]+))?$`)
if !r.MatchString(server) {
return "", fmt.Errorf("invalid endpoint url: %s", server)
}
var scheme, port, host string
results := r.FindStringSubmatch(server)
for i, name := range r.SubexpNames() {
switch name {
case "scheme":
scheme = results[i]
case "host":
host = results[i]
case "port":
port = results[i]
}
}
if scheme == "" {
if port == "80" {
scheme = "http"
} else {
scheme = "https"
}
}
if port == "" {
if scheme == "http" {
port = "80"
} else {
port = "443"
}
}
return fmt.Sprintf("%s://%s:%s", scheme, host, port), nil
}

62
pkg/utils/url_test.go Normal file
View File

@@ -0,0 +1,62 @@
/*
Copyright 2021 The KubeVela Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package utils
import (
"testing"
"github.com/stretchr/testify/require"
)
func TestParseEndpoint(t *testing.T) {
testCases := []struct {
Input string
Output string
HasError bool
}{{
Input: "127.0.0.1",
Output: "https://127.0.0.1:443",
}, {
Input: "http://127.0.0.1",
Output: "http://127.0.0.1:80",
}, {
Input: "127.0.0.1:6443",
Output: "https://127.0.0.1:6443",
}, {
Input: "127.0.0.1:80",
Output: "http://127.0.0.1:80",
}, {
Input: "localhost",
Output: "https://localhost:443",
}, {
Input: "https://worker-control-plane:6443",
Output: "https://worker-control-plane:6443",
}, {
Input: "invalid url",
HasError: true,
}}
r := require.New(t)
for _, testCase := range testCases {
output, err := ParseAPIServerEndpoint(testCase.Input)
if testCase.HasError {
r.Error(err)
continue
}
r.NoError(err)
r.Equal(testCase.Output, output)
}
}

View File

@@ -16,10 +16,9 @@ limitations under the License.
package workflow
import (
"context"
"github.com/oam-dev/kubevela/apis/core.oam.dev/common"
"github.com/oam-dev/kubevela/apis/core.oam.dev/v1beta1"
"github.com/oam-dev/kubevela/pkg/monitor/context"
"github.com/oam-dev/kubevela/pkg/workflow/types"
)

View File

@@ -22,6 +22,8 @@ import (
"fmt"
"strings"
monitorContext "github.com/oam-dev/kubevela/pkg/monitor/context"
"github.com/oam-dev/kubevela/pkg/workflow/hooks"
"cuelang.org/go/cue"
@@ -148,16 +150,28 @@ func (t *TaskLoader) makeTaskGenerator(templ string) (wfTypes.TaskGenerator, err
return false
}
tRunner.run = func(ctx wfContext.Context, options *wfTypes.TaskRunOptions) (common.WorkflowStepStatus, *wfTypes.Operation, error) {
if options.GetTracer == nil {
options.GetTracer = func(id string, step v1beta1.WorkflowStep) monitorContext.Context {
return monitorContext.NewTraceContext(context.Background(), "")
}
}
tracer := options.GetTracer(exec.wfStatus.ID, wfStep).AddTag("step_name", wfStep.Name, "step_type", wfStep.Type)
defer func() {
tracer.Commit(string(exec.status().Phase))
}()
if t.runOptionsProcess != nil {
t.runOptionsProcess(options)
}
paramsValue, err := ctx.MakeParameter(params)
if err != nil {
tracer.Error(err, "make parameter")
return common.WorkflowStepStatus{}, nil, errors.WithMessage(err, "make parameter")
}
for _, hook := range options.PreStartHooks {
if err := hook(ctx, paramsValue, wfStep); err != nil {
tracer.Error(err, "do preStartHook")
return common.WorkflowStepStatus{}, nil, errors.WithMessage(err, "do preStartHook")
}
}
@@ -176,13 +190,19 @@ func (t *TaskLoader) makeTaskGenerator(templ string) (wfTypes.TaskGenerator, err
paramFile = fmt.Sprintf(model.ParameterFieldName+": {%s}\n", ps)
}
taskv, err := t.makeValue(ctx, strings.Join([]string{templ, paramFile}, "\n"), genOpt.ID)
taskv, err := t.makeValue(ctx, strings.Join([]string{templ, paramFile}, "\n"), exec.wfStatus.ID)
if err != nil {
exec.err(err, StatusReasonRendering)
return exec.status(), exec.operation(), nil
}
exec.tracer = tracer
if isDebugMode(taskv) {
exec.printStep("workflowStepStart", "workflow", "", taskv)
defer exec.printStep("workflowStepEnd", "workflow", "", taskv)
}
if err := exec.doSteps(ctx, taskv); err != nil {
tracer.Error(err, "do steps")
exec.err(err, StatusReasonExecute)
return exec.status(), exec.operation(), nil
}
@@ -221,6 +241,8 @@ type executor struct {
suspend bool
terminated bool
wait bool
tracer monitorContext.Context
}
// Suspend let workflow pause.
@@ -264,8 +286,17 @@ func (exec *executor) status() common.WorkflowStepStatus {
return exec.wfStatus
}
func (exec *executor) printStep(phase string, provider string, do string, v *value.Value) {
msg, _ := v.String()
exec.tracer.Info("cue eval: "+msg, "phase", phase, "provider", provider, "do", do)
}
// Handle process task-step value by provider and do.
func (exec *executor) Handle(ctx wfContext.Context, provider string, do string, v *value.Value) error {
if isDebugMode(v) {
exec.printStep("stepStart", provider, do, v)
defer exec.printStep("stepEnd", provider, do, v)
}
h, exist := exec.handlers.GetHandler(provider, do)
if !exist {
return errors.Errorf("handler not found")
@@ -336,6 +367,11 @@ func isStepList(fieldName string) bool {
return strings.HasPrefix(fieldName, "#up_")
}
func isDebugMode(v *value.Value) bool {
debug, _ := v.CueValue().LookupDef("#debug").Bool()
return debug
}
func opTpy(v *value.Value) string {
return getLabel(v, "#do")
}

View File

@@ -22,6 +22,7 @@ import (
"github.com/oam-dev/kubevela/apis/core.oam.dev/common"
"github.com/oam-dev/kubevela/apis/core.oam.dev/v1beta1"
"github.com/oam-dev/kubevela/pkg/cue/model/value"
monitorCtx "github.com/oam-dev/kubevela/pkg/monitor/context"
wfContext "github.com/oam-dev/kubevela/pkg/workflow/context"
)
@@ -42,6 +43,7 @@ type TaskRunOptions struct {
Data *value.Value
PreStartHooks []TaskPreStartHook
PostStopHooks []TaskPostStopHook
GetTracer func(id string, step v1beta1.WorkflowStep) monitorCtx.Context
RunSteps func(isDag bool, runners ...TaskRunner) (*common.WorkflowStatus, error)
}

View File

@@ -17,7 +17,6 @@ limitations under the License.
package workflow
import (
"context"
"fmt"
"github.com/pkg/errors"
@@ -27,6 +26,8 @@ import (
oamcore "github.com/oam-dev/kubevela/apis/core.oam.dev/v1beta1"
"github.com/oam-dev/kubevela/pkg/controller/utils"
"github.com/oam-dev/kubevela/pkg/cue/model/value"
monitorContext "github.com/oam-dev/kubevela/pkg/monitor/context"
"github.com/oam-dev/kubevela/pkg/monitor/metrics"
"github.com/oam-dev/kubevela/pkg/oam/util"
wfContext "github.com/oam-dev/kubevela/pkg/workflow/context"
wfTypes "github.com/oam-dev/kubevela/pkg/workflow/types"
@@ -52,16 +53,18 @@ func NewWorkflow(app *oamcore.Application, cli client.Client, mode common.Workfl
}
// ExecuteSteps process workflow step in order.
func (w *workflow) ExecuteSteps(ctx context.Context, appRev *oamcore.ApplicationRevision, taskRunners []wfTypes.TaskRunner) (common.WorkflowState, error) {
func (w *workflow) ExecuteSteps(ctx monitorContext.Context, appRev *oamcore.ApplicationRevision, taskRunners []wfTypes.TaskRunner) (common.WorkflowState, error) {
revAndSpecHash, err := computeAppRevisionHash(appRev.Name, w.app)
if err != nil {
return common.WorkflowStateExecuting, err
}
ctx.AddTag("workflow_version", revAndSpecHash)
if len(taskRunners) == 0 {
return common.WorkflowStateFinished, nil
}
if w.app.Status.Workflow == nil || w.app.Status.Workflow.AppRevision != revAndSpecHash {
ctx.Info("Restart Workflow")
w.app.Status.Workflow = &common.WorkflowStatus{
AppRevision: revAndSpecHash,
Mode: common.WorkflowModeStep,
@@ -87,22 +90,22 @@ func (w *workflow) ExecuteSteps(ctx context.Context, appRev *oamcore.Application
return common.WorkflowStateFinished, nil
}
var (
wfCtx wfContext.Context
)
wfCtx, err = w.makeContext(w.app.Name)
wfCtx, err := w.makeContext(w.app.Name)
if err != nil {
ctx.Error(err, "make context")
return common.WorkflowStateExecuting, err
}
e := &engine{
status: wfStatus,
dagMode: w.dagMode,
status: wfStatus,
dagMode: w.dagMode,
monitorCtx: ctx,
app: w.app,
}
err = e.run(wfCtx, taskRunners)
if err != nil {
ctx.Error(err, "run steps")
return common.WorkflowStateExecuting, err
}
if wfStatus.Terminated {
@@ -242,7 +245,13 @@ func (e *engine) todoByIndex(taskRunners []wfTypes.TaskRunner) []wfTypes.TaskRun
func (e *engine) steps(wfCtx wfContext.Context, taskRunners []wfTypes.TaskRunner) error {
for _, runner := range taskRunners {
status, operation, err := runner.Run(wfCtx, &wfTypes.TaskRunOptions{})
status, operation, err := runner.Run(wfCtx, &wfTypes.TaskRunOptions{
GetTracer: func(id string, stepStatus oamcore.WorkflowStep) monitorContext.Context {
return e.monitorCtx.Fork(id, monitorContext.DurationMetric(func(v float64) {
metrics.StepDurationSummary.WithLabelValues(e.app.Namespace+"/"+e.app.Name, e.status.AppRevision, stepStatus.Name, stepStatus.Type).Observe(v)
}))
},
})
if err != nil {
return err
}
@@ -269,8 +278,10 @@ func (e *engine) steps(wfCtx wfContext.Context, taskRunners []wfTypes.TaskRunner
}
type engine struct {
dagMode bool
status *common.WorkflowStatus
dagMode bool
status *common.WorkflowStatus
monitorCtx monitorContext.Context
app *oamcore.Application
}
func (e *engine) isDag() bool {

View File

@@ -20,6 +20,8 @@ import (
"context"
"encoding/json"
monitorContext "github.com/oam-dev/kubevela/pkg/monitor/context"
"github.com/oam-dev/kubevela/pkg/cue/model/value"
. "github.com/onsi/ginkgo"
@@ -67,8 +69,9 @@ var _ = Describe("Test Workflow", func() {
Type: "success",
},
})
ctx := monitorContext.NewTraceContext(context.Background(), "test-app")
wf := NewWorkflow(app, k8sClient, common.WorkflowModeStep)
state, err := wf.ExecuteSteps(context.Background(), revision, runners)
state, err := wf.ExecuteSteps(ctx, revision, runners)
Expect(err).ToNot(HaveOccurred())
Expect(state).Should(BeEquivalentTo(common.WorkflowStateExecuting))
workflowStatus := app.Status.Workflow
@@ -105,7 +108,7 @@ var _ = Describe("Test Workflow", func() {
app.Status.Workflow = workflowStatus
wf = NewWorkflow(app, k8sClient, common.WorkflowModeStep)
state, err = wf.ExecuteSteps(context.Background(), revision, runners)
state, err = wf.ExecuteSteps(ctx, revision, runners)
Expect(err).ToNot(HaveOccurred())
Expect(state).Should(BeEquivalentTo(common.WorkflowStateFinished))
app.Status.Workflow.ContextBackend = nil
@@ -144,8 +147,9 @@ var _ = Describe("Test Workflow", func() {
Type: "success",
},
})
ctx := monitorContext.NewTraceContext(context.Background(), "test-app")
wf := NewWorkflow(app, k8sClient, common.WorkflowModeStep)
state, err := wf.ExecuteSteps(context.Background(), revision, runners)
state, err := wf.ExecuteSteps(ctx, revision, runners)
Expect(err).ToNot(HaveOccurred())
Expect(state).Should(BeEquivalentTo(common.WorkflowStateSuspended))
wfStatus := *app.Status.Workflow
@@ -166,7 +170,7 @@ var _ = Describe("Test Workflow", func() {
})).Should(BeEquivalentTo(""))
// check suspend...
state, err = wf.ExecuteSteps(context.Background(), revision, runners)
state, err = wf.ExecuteSteps(ctx, revision, runners)
Expect(err).ToNot(HaveOccurred())
Expect(state).Should(BeEquivalentTo(common.WorkflowStateSuspended))
@@ -174,7 +178,7 @@ var _ = Describe("Test Workflow", func() {
app.Status.Workflow.Suspend = false
// check app meta changed
app.Labels = map[string]string{"for-test": "changed"}
state, err = wf.ExecuteSteps(context.Background(), revision, runners)
state, err = wf.ExecuteSteps(ctx, revision, runners)
Expect(err).ToNot(HaveOccurred())
Expect(state).Should(BeEquivalentTo(common.WorkflowStateFinished))
app.Status.Workflow.ContextBackend = nil
@@ -196,7 +200,7 @@ var _ = Describe("Test Workflow", func() {
}},
})).Should(BeEquivalentTo(""))
state, err = wf.ExecuteSteps(context.Background(), revision, runners)
state, err = wf.ExecuteSteps(ctx, revision, runners)
Expect(err).ToNot(HaveOccurred())
Expect(state).Should(BeEquivalentTo(common.WorkflowStateFinished))
})
@@ -212,8 +216,9 @@ var _ = Describe("Test Workflow", func() {
Type: "terminate",
},
})
ctx := monitorContext.NewTraceContext(context.Background(), "test-app")
wf := NewWorkflow(app, k8sClient, common.WorkflowModeStep)
state, err := wf.ExecuteSteps(context.Background(), revision, runners)
state, err := wf.ExecuteSteps(ctx, revision, runners)
Expect(err).ToNot(HaveOccurred())
Expect(state).Should(BeEquivalentTo(common.WorkflowStateTerminated))
app.Status.Workflow.ContextBackend = nil
@@ -232,7 +237,7 @@ var _ = Describe("Test Workflow", func() {
}},
})).Should(BeEquivalentTo(""))
state, err = wf.ExecuteSteps(context.Background(), revision, runners)
state, err = wf.ExecuteSteps(ctx, revision, runners)
Expect(err).ToNot(HaveOccurred())
Expect(state).Should(BeEquivalentTo(common.WorkflowStateTerminated))
})
@@ -248,8 +253,9 @@ var _ = Describe("Test Workflow", func() {
Type: "error",
},
})
ctx := monitorContext.NewTraceContext(context.Background(), "test-app")
wf := NewWorkflow(app, k8sClient, common.WorkflowModeStep)
state, err := wf.ExecuteSteps(context.Background(), revision, runners)
state, err := wf.ExecuteSteps(ctx, revision, runners)
Expect(err).To(HaveOccurred())
Expect(state).Should(BeEquivalentTo(common.WorkflowStateExecuting))
app.Status.Workflow.ContextBackend = nil
@@ -266,8 +272,9 @@ var _ = Describe("Test Workflow", func() {
It("skip workflow", func() {
app, runners := makeTestCase([]oamcore.WorkflowStep{})
ctx := monitorContext.NewTraceContext(context.Background(), "test-app")
wf := NewWorkflow(app, k8sClient, common.WorkflowModeStep)
state, err := wf.ExecuteSteps(context.Background(), revision, runners)
state, err := wf.ExecuteSteps(ctx, revision, runners)
Expect(err).ToNot(HaveOccurred())
Expect(state).Should(BeEquivalentTo(common.WorkflowStateFinished))
})
@@ -289,7 +296,8 @@ var _ = Describe("Test Workflow", func() {
})
pending = true
wf := NewWorkflow(app, k8sClient, common.WorkflowModeDAG)
state, err := wf.ExecuteSteps(context.Background(), revision, runners)
ctx := monitorContext.NewTraceContext(context.Background(), "test-app")
state, err := wf.ExecuteSteps(ctx, revision, runners)
Expect(err).ToNot(HaveOccurred())
Expect(state).Should(BeEquivalentTo(common.WorkflowStateExecuting))
app.Status.Workflow.ContextBackend = nil
@@ -307,12 +315,12 @@ var _ = Describe("Test Workflow", func() {
}},
})).Should(BeEquivalentTo(""))
state, err = wf.ExecuteSteps(context.Background(), revision, runners)
state, err = wf.ExecuteSteps(ctx, revision, runners)
Expect(err).ToNot(HaveOccurred())
Expect(state).Should(BeEquivalentTo(common.WorkflowStateExecuting))
pending = false
state, err = wf.ExecuteSteps(context.Background(), revision, runners)
state, err = wf.ExecuteSteps(ctx, revision, runners)
Expect(err).ToNot(HaveOccurred())
Expect(state).Should(BeEquivalentTo(common.WorkflowStateFinished))
app.Status.Workflow.ContextBackend = nil
@@ -346,8 +354,9 @@ var _ = Describe("Test Workflow", func() {
Type: "success",
},
})
ctx := monitorContext.NewTraceContext(context.Background(), "test-app")
wf := NewWorkflow(app, k8sClient, common.WorkflowModeStep)
state, err := wf.ExecuteSteps(context.Background(), revision, runners)
state, err := wf.ExecuteSteps(ctx, revision, runners)
Expect(err).ToNot(HaveOccurred())
Expect(state).Should(BeEquivalentTo(common.WorkflowStateExecuting))
Expect(app.Status.Workflow.Steps[0].Phase).Should(BeEquivalentTo(common.WorkflowStepPhaseRunning))

View File

@@ -20,7 +20,6 @@ import (
"context"
"fmt"
v1alpha12 "github.com/oam-dev/cluster-gateway/pkg/apis/cluster/v1alpha1"
"github.com/pkg/errors"
"github.com/spf13/cobra"
v1 "k8s.io/api/core/v1"
@@ -31,11 +30,15 @@ import (
"k8s.io/client-go/tools/clientcmd"
"sigs.k8s.io/controller-runtime/pkg/client"
v1alpha12 "github.com/oam-dev/cluster-gateway/pkg/apis/cluster/v1alpha1"
"github.com/oam-dev/cluster-gateway/pkg/generated/clientset/versioned"
"github.com/oam-dev/kubevela/apis/core.oam.dev/v1alpha1"
"github.com/oam-dev/kubevela/apis/core.oam.dev/v1beta1"
"github.com/oam-dev/kubevela/apis/types"
"github.com/oam-dev/kubevela/pkg/multicluster"
"github.com/oam-dev/kubevela/pkg/oam"
"github.com/oam-dev/kubevela/pkg/utils"
"github.com/oam-dev/kubevela/pkg/utils/common"
errors3 "github.com/oam-dev/kubevela/pkg/utils/errors"
"github.com/oam-dev/kubevela/references/a/preimport"
@@ -83,6 +86,7 @@ func ClusterCommandGroup(c common.Args) *cobra.Command {
NewClusterJoinCommand(&c),
NewClusterRenameCommand(&c),
NewClusterDetachCommand(&c),
NewClusterProbeCommand(&c),
)
return cmd
}
@@ -197,8 +201,12 @@ func NewClusterJoinCommand(c *common.Args) *cobra.Command {
return errors.Wrapf(err, "cannot use cluster name %s", clusterName)
}
var credentialType v1alpha12.CredentialType
endpoint, err := utils.ParseAPIServerEndpoint(cluster.Server)
if err != nil {
return errors.Wrapf(err, "failed to parse apiserver endpoint")
}
data := map[string][]byte{
"endpoint": []byte(cluster.Server),
"endpoint": []byte(endpoint),
"ca.crt": cluster.CertificateAuthorityData,
}
if len(authInfo.Token) > 0 {
@@ -227,7 +235,7 @@ func NewClusterJoinCommand(c *common.Args) *cobra.Command {
_ = c.Client.Delete(context.Background(), secret)
return errors.Wrapf(err, "failed to ensure resourcetracker crd installed in cluster %s", clusterName)
}
cmd.Printf("Successfully add cluster %s, endpoint: %s.\n", clusterName, cluster.Server)
cmd.Printf("Successfully add cluster %s, endpoint: %s.\n", clusterName, endpoint)
return nil
},
}
@@ -324,3 +332,25 @@ func NewClusterDetachCommand(c *common.Args) *cobra.Command {
}
return cmd
}
// NewClusterProbeCommand create command to help user try health probe for existing cluster
func NewClusterProbeCommand(c *common.Args) *cobra.Command {
cmd := &cobra.Command{
Use: "probe [CLUSTER_NAME]",
Short: "probe managed cluster",
Args: cobra.ExactValidArgs(1),
RunE: func(cmd *cobra.Command, args []string) error {
clusterName := args[0]
if clusterName == multicluster.ClusterLocalName {
return errors.New("you must specify a remote cluster name")
}
content, err := versioned.NewForConfigOrDie(c.Config).ClusterV1alpha1().ClusterGateways().RESTClient(clusterName).Get().AbsPath("healthz").DoRaw(context.TODO())
if err != nil {
return errors.Wrapf(err, "failed connect cluster %s", clusterName)
}
cmd.Printf("Connect to cluster %s successfully.\n%s\n", clusterName, string(content))
return nil
},
}
return cmd
}

View File

@@ -22,9 +22,9 @@ import (
"strings"
"time"
"github.com/pkg/errors"
"github.com/spf13/cobra"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/cli-runtime/pkg/genericclioptions"
"k8s.io/client-go/kubernetes"
cmdexec "k8s.io/kubectl/pkg/cmd/exec"
@@ -32,7 +32,7 @@ import (
"github.com/oam-dev/kubevela/apis/core.oam.dev/v1beta1"
"github.com/oam-dev/kubevela/apis/types"
"github.com/oam-dev/kubevela/pkg/oam"
"github.com/oam-dev/kubevela/pkg/multicluster"
"github.com/oam-dev/kubevela/pkg/utils/common"
"github.com/oam-dev/kubevela/pkg/utils/util"
"github.com/oam-dev/kubevela/references/appfile"
@@ -47,20 +47,21 @@ const (
// VelaExecOptions creates options for `exec` command
type VelaExecOptions struct {
Cmd *cobra.Command
Args []string
Stdin bool
TTY bool
ServiceName string
Cmd *cobra.Command
Args []string
Stdin bool
TTY bool
context.Context
Ctx context.Context
VelaC common.Args
Env *types.EnvMeta
App *v1beta1.Application
f k8scmdutil.Factory
kcExecOptions *cmdexec.ExecOptions
ClientSet kubernetes.Interface
resourceName string
resourceNamespace string
f k8scmdutil.Factory
kcExecOptions *cmdexec.ExecOptions
ClientSet kubernetes.Interface
}
// NewExecCommand creates `exec` command
@@ -82,8 +83,10 @@ func NewExecCommand(c common.Args, ioStreams util.IOStreams) *cobra.Command {
Short: "Execute command in a container",
Long: "Execute command in a container",
PersistentPreRunE: func(cmd *cobra.Command, args []string) error {
if err := c.SetConfig(); err != nil {
return err
if c.Config == nil {
if err := c.SetConfig(); err != nil {
return errors.Wrapf(err, "failed to set config for k8s client")
}
}
o.VelaC = c
return nil
@@ -117,19 +120,26 @@ func NewExecCommand(c common.Args, ioStreams util.IOStreams) *cobra.Command {
Annotations: map[string]string{
types.TagCommandType: types.TypeApp,
},
Example: `
# Get output from running 'date' command from app pod, using the first container by default
vela exec my-app -- date
# Switch to raw terminal mode, sends stdin to 'bash' in containers of application my-app
# and sends stdout/stderr from 'bash' back to the client
kubectl exec my-app -i -t -- bash -il
`,
}
cmd.Flags().BoolVarP(&o.Stdin, "stdin", "i", defaultStdin, "Pass stdin to the container")
cmd.Flags().BoolVarP(&o.TTY, "tty", "t", defaultTTY, "Stdin is a TTY")
cmd.Flags().Duration(podRunningTimeoutFlag, defaultPodExecTimeout,
"The length of time (like 5s, 2m, or 3h, higher than zero) to wait until at least one pod is running",
)
cmd.Flags().StringVarP(&o.ServiceName, "svc", "s", "", "service name")
return cmd
}
// Init prepares the arguments accepted by the Exec command
func (o *VelaExecOptions) Init(ctx context.Context, c *cobra.Command, argsIn []string) error {
o.Context = ctx
o.Cmd = c
o.Args = argsIn
@@ -144,27 +154,28 @@ func (o *VelaExecOptions) Init(ctx context.Context, c *cobra.Command, argsIn []s
}
o.App = app
cf := genericclioptions.NewConfigFlags(true)
cf.Namespace = &o.Env.Namespace
o.f = k8scmdutil.NewFactory(k8scmdutil.NewMatchVersionFlags(cf))
if o.ClientSet == nil {
c, err := kubernetes.NewForConfig(o.VelaC.Config)
if err != nil {
return err
}
o.ClientSet = c
targetResource, err := common.AskToChooseOneEnvResource(o.App)
if err != nil {
return err
}
cf := genericclioptions.NewConfigFlags(true)
cf.Namespace = &targetResource.Namespace
o.f = k8scmdutil.NewFactory(k8scmdutil.NewMatchVersionFlags(cf))
o.resourceName = targetResource.Name
o.Ctx = multicluster.ContextWithClusterName(ctx, targetResource.Cluster)
o.resourceNamespace = targetResource.Namespace
k8sClient, err := kubernetes.NewForConfig(o.VelaC.Config)
if err != nil {
return err
}
o.ClientSet = k8sClient
return nil
}
// Complete loads data from the command environment
func (o *VelaExecOptions) Complete() error {
compName, err := o.getComponentName()
if err != nil {
return err
}
podName, err := o.getPodName(compName)
podName, err := o.getPodName(o.resourceName)
if err != nil {
return err
}
@@ -173,53 +184,27 @@ func (o *VelaExecOptions) Complete() error {
args := make([]string, len(o.Args))
copy(args, o.Args)
// args for kcExecOptions MUST be in such formart:
// args for kcExecOptions MUST be in such format:
// [podName, COMMAND...]
args[0] = podName
return o.kcExecOptions.Complete(o.f, o.Cmd, args, 1)
}
func (o *VelaExecOptions) getComponentName() (string, error) {
svcName := o.ServiceName
if svcName != "" {
for _, cc := range o.App.Spec.Components {
if cc.Name == svcName {
return svcName, nil
}
}
o.Cmd.Printf("The service name '%s' is not valid\n", svcName)
}
compName, err := common.AskToChooseOneService(appfile.GetComponents(o.App))
func (o *VelaExecOptions) getPodName(resourceName string) (string, error) {
podList, err := o.ClientSet.CoreV1().Pods(o.resourceNamespace).List(o.Ctx, v1.ListOptions{})
if err != nil {
return "", err
}
return compName, nil
}
func (o *VelaExecOptions) getPodName(compName string) (string, error) {
podList, err := o.ClientSet.CoreV1().Pods(o.Env.Namespace).List(o.Context, v1.ListOptions{
LabelSelector: labels.Set(map[string]string{
// TODO(roywang) except core workloads, not any workloads will pass these label to pod
// find a rigorous way to get pod by compname
oam.LabelAppComponent: compName,
}).String(),
})
if err != nil {
return "", err
}
if podList != nil && len(podList.Items) == 0 {
return "", fmt.Errorf("cannot get pods")
}
var pods []string
for _, p := range podList.Items {
if strings.HasPrefix(p.Name, compName+"-") {
return p.Name, nil
if strings.HasPrefix(p.Name, resourceName) {
pods = append(pods, p.Name)
}
}
// if no pod with name matched prefix as component name
// just return the first one
return podList.Items[0].Name, nil
if len(pods) < 1 {
return "", fmt.Errorf("no pods found created by resource %s", resourceName)
}
return common.AskToChooseOnePods(pods)
}
// Run executes a validated remote execution against a pod

View File

@@ -93,9 +93,8 @@ func (l *Args) Run(ctx context.Context, ioStreams util.IOStreams) error {
if err != nil {
return err
}
appliedResources := l.App.Status.AppliedResources
selectedRes, err := common.AskToChooseOneAppliedResource(appliedResources)
selectedRes, err := common.AskToChooseOneEnvResource(l.App)
if err != nil {
return err
}

View File

@@ -708,6 +708,13 @@ type CommonSchema struct {
// GenerateHelmAndKubeProperties get all properties of a Helm/Kube Category type capability
func (ref *ParseReference) GenerateHelmAndKubeProperties(ctx context.Context, capability *types.Capability) ([]CommonReference, []ConsoleReference, error) {
cmName := fmt.Sprintf("%s%s", types.CapabilityConfigMapNamePrefix, capability.Name)
switch capability.Type {
case types.TypeComponentDefinition:
cmName = fmt.Sprintf("component-%s", cmName)
case types.TypeTrait:
cmName = fmt.Sprintf("trait-%s", cmName)
default:
}
var cm v1.ConfigMap
commonRefs = make([]CommonReference, 0)
if err := ref.Client.Get(ctx, client.ObjectKey{Namespace: capability.Namespace, Name: cmName}, &cm); err != nil {

View File

@@ -0,0 +1,117 @@
/*
Copyright 2021 The KubeVela Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package controllers_test
import (
"context"
"errors"
"fmt"
"math/rand"
"os/exec"
"strconv"
"time"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"sigs.k8s.io/controller-runtime/pkg/client"
"github.com/oam-dev/kubevela/apis/core.oam.dev/v1beta1"
"github.com/oam-dev/kubevela/pkg/oam/util"
"github.com/oam-dev/kubevela/pkg/utils/common"
)
var _ = Describe("Addon tests", func() {
ctx := context.Background()
var namespaceName string
var ns corev1.Namespace
var app v1beta1.Application
createNamespace := func() {
ns = corev1.Namespace{
ObjectMeta: metav1.ObjectMeta{
Name: namespaceName,
},
}
// delete the namespaceName with all its resources
Eventually(
func() error {
return k8sClient.Delete(ctx, &ns, client.PropagationPolicy(metav1.DeletePropagationForeground))
},
time.Second*120, time.Millisecond*500).Should(SatisfyAny(BeNil(), &util.NotFoundMatcher{}))
By("make sure all the resources are removed")
objectKey := client.ObjectKey{
Name: namespaceName,
}
res := &corev1.Namespace{}
Eventually(
func() error {
return k8sClient.Get(ctx, objectKey, res)
},
time.Second*120, time.Millisecond*500).Should(&util.NotFoundMatcher{})
Eventually(
func() error {
return k8sClient.Create(ctx, &ns)
},
time.Second*3, time.Millisecond*300).Should(SatisfyAny(BeNil(), &util.AlreadyExistMatcher{}))
}
BeforeEach(func() {
By("Start to run a test, clean up previous resources")
namespaceName = "app-terraform" + "-" + strconv.FormatInt(rand.Int63(), 16)
createNamespace()
})
AfterEach(func() {
By("Clean up resources after a test")
k8sClient.Delete(ctx, &app)
By(fmt.Sprintf("Delete the entire namespaceName %s", ns.Name))
// delete the namespaceName with all its resources
Expect(k8sClient.Delete(ctx, &ns, client.PropagationPolicy(metav1.DeletePropagationBackground))).Should(BeNil())
})
It("Addons Terraform is successfully enables and Terraform application works", func() {
By("Install Addon Terraform")
output, err := exec.Command("bash", "-c", "/tmp/vela addon enable terraform").Output()
var ee *exec.ExitError
if errors.As(err, &ee) {
fmt.Println("exit code error:", string(ee.Stderr))
}
Expect(err).Should(BeNil())
Expect(string(output)).Should(ContainSubstring("Successfully enable addon:"))
By("Apply an application with Terraform Component")
var terraformApp v1beta1.Application
Expect(common.ReadYamlToObject("testdata/app/app_terraform_oss.yaml", &terraformApp)).Should(BeNil())
terraformApp.Namespace = namespaceName
Eventually(func() error {
return k8sClient.Create(ctx, terraformApp.DeepCopy())
}, 10*time.Second, 500*time.Millisecond).Should(Succeed())
By("Check status.services of the application")
Eventually(
func() error {
k8sClient.Get(ctx, client.ObjectKey{Namespace: terraformApp.Namespace, Name: terraformApp.Name}, &app)
if len(app.Status.Services) == 1 {
return nil
}
return errors.New("expect 1 service")
},
time.Second*30, time.Millisecond*500).ShouldNot(BeNil())
})
})

View File

@@ -0,0 +1,79 @@
/*
Copyright 2021 The KubeVela Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package controllers_test
import (
"math/rand"
"testing"
"time"
"github.com/oam-dev/kubevela/apis/standard.oam.dev/v1alpha1"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
crdv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1"
"k8s.io/apimachinery/pkg/runtime"
clientgoscheme "k8s.io/client-go/kubernetes/scheme"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/client/config"
"sigs.k8s.io/controller-runtime/pkg/envtest/printer"
logf "sigs.k8s.io/controller-runtime/pkg/log"
"sigs.k8s.io/controller-runtime/pkg/log/zap"
core "github.com/oam-dev/kubevela/apis/core.oam.dev"
// +kubebuilder:scaffold:imports
)
var k8sClient client.Client
var scheme = runtime.NewScheme()
func TestAPIs(t *testing.T) {
RegisterFailHandler(Fail)
RunSpecsWithDefaultAndCustomReporters(t,
"Addons Controller Suite",
[]Reporter{printer.NewlineReporter{}})
}
var _ = BeforeSuite(func(done Done) {
By("Bootstrapping test environment")
rand.Seed(time.Now().UnixNano())
logf.SetLogger(zap.New(zap.UseDevMode(true), zap.WriteTo(GinkgoWriter)))
err := clientgoscheme.AddToScheme(scheme)
Expect(err).Should(BeNil())
err = core.AddToScheme(scheme)
Expect(err).Should(BeNil())
err = crdv1.AddToScheme(scheme)
Expect(err).Should(BeNil())
err = v1alpha1.AddToScheme(scheme)
Expect(err).Should(BeNil())
By("Setting up kubernetes client")
k8sClient, err = client.New(config.GetConfigOrDie(), client.Options{Scheme: scheme})
if err != nil {
logf.Log.Error(err, "failed to create k8sClient")
Fail("setup failed")
}
By("Finished setting up test environment")
close(done)
}, 300)
var _ = AfterSuite(func() {
By("Tearing down test environment")
// TearDownSuite()
By("Finished tearing down test environment")
})

View File

@@ -0,0 +1,13 @@
apiVersion: core.oam.dev/v1beta1
kind: Application
metadata:
name: provision-cloud-resource-sample
spec:
components:
- name: sample-oss
type: alibaba-oss
properties:
bucket: vela-website-0911
acl: private
writeConnectionSecretToRef:
name: oss-conn

View File

@@ -372,7 +372,7 @@ var _ = Describe("Test application containing helm module", func() {
It("Test store JSON schema of Helm Chart in ConfigMap", func() {
By("Get the ConfigMap")
cmName := fmt.Sprintf("schema-%s", cdName)
cmName := fmt.Sprintf("component-schema-%s", cdName)
Eventually(func() error {
cm := &corev1.ConfigMap{}
if err := k8sClient.Get(ctx, client.ObjectKey{Name: cmName, Namespace: namespace}, cm); err != nil {

View File

@@ -341,7 +341,7 @@ spec:
It("Test store JSON schema of Kube parameter in ConfigMap", func() {
By("Get the ConfigMap")
cmName := fmt.Sprintf("schema-%s", cdName)
cmName := fmt.Sprintf("component-schema-%s", cdName)
Eventually(func() error {
cm := &corev1.ConfigMap{}
if err := k8sClient.Get(ctx, client.ObjectKey{Name: cmName, Namespace: namespace}, cm); err != nil {

View File

@@ -30,6 +30,7 @@ import (
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/yaml"
common2 "github.com/oam-dev/kubevela/apis/core.oam.dev/common"
"github.com/oam-dev/kubevela/apis/standard.oam.dev/v1alpha1"
"github.com/oam-dev/kubevela/pkg/oam"
"github.com/oam-dev/kubevela/pkg/oam/util"
@@ -105,6 +106,7 @@ var _ = Describe("rollout related e2e-test,rollout trait test", func() {
By("check rollout status have succeed")
Eventually(func() error {
rolloutKey := types.NamespacedName{Namespace: namespaceName, Name: componentName}
rollout = v1alpha1.Rollout{}
if err := k8sClient.Get(ctx, rolloutKey, &rollout); err != nil {
return err
}
@@ -150,7 +152,7 @@ var _ = Describe("rollout related e2e-test,rollout trait test", func() {
}
deployKey = types.NamespacedName{Namespace: namespaceName, Name: rollout.Status.LastSourceRevision}
if err := k8sClient.Get(ctx, deployKey, &sourceDeploy); err == nil || !apierrors.IsNotFound(err) {
return fmt.Errorf("source deploy still exist")
return fmt.Errorf("source deploy still exist namespace %s deployName %s", namespaceName, rollout.Status.LastSourceRevision)
}
return nil
}, time.Second*60, 300*time.Millisecond).Should(BeNil())
@@ -321,7 +323,7 @@ var _ = Describe("rollout related e2e-test,rollout trait test", func() {
}, 30*time.Second, 300*time.Millisecond).Should(BeNil())
})
It("rollout scale up adnd down without rollout batches", func() {
It("rollout scale up and down without rollout batches", func() {
By("first scale operation")
Expect(common.ReadYamlToObject("testdata/rollout/deployment/application.yaml", &app)).Should(BeNil())
app.Namespace = namespaceName
@@ -409,6 +411,43 @@ var _ = Describe("rollout related e2e-test,rollout trait test", func() {
}, 30*time.Second, 300*time.Millisecond).Should(BeNil())
verifySuccess("express-server-v2")
})
It("Delete a component with rollout trait from an application should delete this workload", func() {
By("first scale operation")
Expect(common.ReadYamlToObject("testdata/rollout/deployment/multi_comp_app.yaml", &app)).Should(BeNil())
app.Namespace = namespaceName
Expect(k8sClient.Create(ctx, &app)).Should(BeNil())
verifySuccess("express-server-v1")
componentName = "express-server-another"
verifySuccess("express-server-another-v1")
By("delete a component")
Eventually(func() error {
checkApp := &v1beta1.Application{}
if err := k8sClient.Get(ctx, types.NamespacedName{Namespace: namespaceName, Name: app.Name}, checkApp); err != nil {
return err
}
checkApp.Spec.Components = []common2.ApplicationComponent{checkApp.Spec.Components[0]}
if err := k8sClient.Update(ctx, checkApp); err != nil {
return err
}
return nil
}, 30*time.Second, 300*time.Millisecond).Should(BeNil())
By("check deployment have been gc")
Eventually(func() error {
checkApp := &v1beta1.Application{}
if err := k8sClient.Get(ctx, types.NamespacedName{Namespace: namespaceName, Name: app.Name}, checkApp); err != nil {
return err
}
if len(checkApp.Spec.Components) != 1 || checkApp.Spec.Components[0].Name != "express-server" {
return fmt.Errorf("app hasn't update yet")
}
deploy := v1.Deployment{}
if err := k8sClient.Get(ctx, types.NamespacedName{Namespace: namespaceName, Name: "express-server-another-v1"}, &deploy); err == nil || !apierrors.IsNotFound(err) {
return fmt.Errorf("another deployment haven't been delete")
}
return nil
}, 30*time.Second, 300*time.Millisecond).Should(BeNil())
})
})
const (

View File

@@ -0,0 +1,27 @@
apiVersion: core.oam.dev/v1beta1
kind: Application
metadata:
name: rollout-trait-test
spec:
components:
- name: express-server
type: webservice
properties:
image: stefanprodan/podinfo:4.0.3
traits:
- type: rollout
properties:
targetSize: 2
firstBatchReplicas: 1
secondBatchReplicas: 1
- name: express-server-another
type: webservice
properties:
image: stefanprodan/podinfo:4.0.3
traits:
- type: rollout
properties:
targetSize: 2
firstBatchReplicas: 1
secondBatchReplicas: 1