Compare commits

..

12 Commits

Author SHA1 Message Date
wyike
89d8e37c7c disable rollout and deploy docs (#1860) 2021-07-01 13:38:05 +08:00
yangsoon
446f682f6c fix invalid metadata.labels error when specify the version of the trait in the app (#1855) (#1857)
* fix trait name
* add test
2021-06-29 19:36:53 +08:00
yangsoon
44e8352d1e [FIX]incorrect pod-disruptive defalut configuration in label of traits (#1837) (#1844)
[FIX]incorrect pod-disruptive defalut configuration in sidecar of traits (#1839)

Fix incorrect pod disruptive annotation traits (#1838)

* [FIX]incorrect pod-disruptive defalut configuration in label of traits

* [FIX]incorrect pod-disruptive defalut configuration in annotation of traits

* [FIX]incorrect pod-disruptive defalut configuration in annotation of traits

Co-authored-by: Scaat Feng <scaat.feng@gmail.com>
2021-06-25 10:24:46 +08:00
Jianbo Sun
5d9b3fbce6 Merge pull request #1776 from yangsoon/rfc-log-system
Cherry pick the feature(refactor the log system) to release-1.0 branch
2021-06-08 19:35:13 +08:00
yangsoon
6c0364453f add options (#1775)
1. add ConcurrentReconciles for setting the concurrent reconcile number of the controller
2. add DependCheckWait for setting the time to wait for ApplicationConfiguration's dependent-resource ready
2021-06-08 15:20:55 +08:00
Kinso
2aa5b47e45 fix(log): debug level flag (#1768)
Co-authored-by: kinsolee <lijingzhao@forchange.tech>
2021-06-08 14:40:31 +08:00
yangsoon
efa60a67a1 Add Logging Convention in CONTRIBUTING.md (#1762)
* add logging convention in contributing

* fix log
2021-06-08 14:32:26 +08:00
yangsoon
89b479e1bc improve log system in appconfig (#1758) 2021-06-08 14:32:17 +08:00
yangsoon
d591d6ad64 Improve the logging system (#1735)
* change to klog/v2

* add logfilepath opt in helm chart

* replace klog.InfoF with klog.InfoS and improve messages

Remove string formatting from log message, improve messages in klog.InfoS and use lowerCamelCase to fmt Name arguments in klog.InfoS

* fix klog.Error

for expected errors (errors that can happen during routine operations) use klog.InfoS and pass error in err key instead.

* use klog.KObj and klog.KRef for Kubernetes objects

ensure that kubernetes objects references are consistent within the codebase

* enable set logdebug level

* add log-file-max-size
2021-06-08 14:31:53 +08:00
Jianbo Sun
916b3b52de remove restful API from website docs (#1772) 2021-06-08 10:38:43 +08:00
Zheng Xi Zhou
df0eaaa74a Generate OpenAPI JSON schema for Terraform Component (#1738) (#1753)
Fix #1736
2021-06-04 13:12:44 +08:00
yangsoon
2740067eb5 fix bug: When the Component contains multiple traits of the same type, the status of the trait in the Application is reported incorrectly (#1731) (#1743)
* fix status

* add test

* fix webhook

* add paramter context for customStatus
2021-06-02 10:31:06 +08:00
76 changed files with 1244 additions and 1056 deletions

View File

@@ -109,8 +109,45 @@ Start to test.
```
make e2e-test
```
## Logging Conventions
### Contribute Docs
### Structured logging
We recommend using `klog.InfoS` to structure the log. The `msg` argument need start from a capital letter.
and name arguments should always use lowerCamelCase.
```golang
// func InfoS(msg string, keysAndValues ...interface{})
klog.InfoS("Reconcile traitDefinition", "traitDefinition", klog.KRef(req.Namespace, req.Name))
// output:
// I0605 10:10:57.308074 22276 traitdefinition_controller.go:59] "Reconcile traitDefinition" traitDefinition="vela-system/expose"
```
### Use `klog.KObj` and `klog.KRef` for Kubernetes objects
`klog.KObj` and `klog.KRef` can unify the output of kubernetes object.
```golang
// KObj is used to create ObjectRef when logging information about Kubernetes objects
klog.InfoS("Start to reconcile", "appDeployment", klog.KObj(appDeployment))
// KRef is used to create ObjectRef when logging information about Kubernetes objects without access to metav1.Object
klog.InfoS("Reconcile application", "application", klog.KRef(req.Namespace, req.Name))
```
### Logging Level
[This file](https://github.com/oam-dev/kubevela/blob/master/pkg/controller/common/logs.go) contains KubeVela's log level,
you can set the log level by `klog.V(level)`.
```golang
// you can use klog.V(common.LogDebug) to print debug log
klog.V(common.LogDebug).InfoS("Successfully applied components", "workloads", len(workloads))
```
more detail in [Structured Logging Guide](https://github.com/kubernetes/community/blob/master/contributors/devel/sig-instrumentation/migration-to-structured-logging.md#structured-logging-in-kubernetes).
## Contribute Docs
Please read [the documentation](https://github.com/oam-dev/kubevela/tree/master/docs/README.md) before contributing to the docs.

View File

@@ -156,7 +156,7 @@ e2e-setup:
helm install --create-namespace -n flux-system helm-flux http://oam.dev/catalog/helm-flux2-0.1.0.tgz
helm install kruise https://github.com/openkruise/kruise/releases/download/v0.7.0/kruise-chart.tgz
sh ./hack/e2e/modify_charts.sh
helm upgrade --install --create-namespace --namespace vela-system --set image.pullPolicy=IfNotPresent --set image.repository=vela-core-test --set applicationRevisionLimit=5 --set image.tag=$(GIT_COMMIT) --wait kubevela ./charts/vela-core
helm upgrade --install --create-namespace --namespace vela-system --set image.pullPolicy=IfNotPresent --set image.repository=vela-core-test --set applicationRevisionLimit=5 --set dependCheckWait=10s --set image.tag=$(GIT_COMMIT) --wait kubevela ./charts/vela-core
ginkgo version
ginkgo -v -r e2e/setup
kubectl wait --for=condition=Ready pod -l app.kubernetes.io/name=vela-core,app.kubernetes.io/instance=kubevela -n vela-system --timeout=600s

View File

@@ -8,8 +8,8 @@ metadata:
namespace: {{.Values.systemDefinitionNamespace}}
spec:
appliesToWorkloads:
- webservice
- worker
- deployments.apps
podDisruptive: true
schematic:
cue:
template: |-

View File

@@ -8,8 +8,8 @@ metadata:
namespace: {{.Values.systemDefinitionNamespace}}
spec:
appliesToWorkloads:
- webservice
- worker
- deployments.apps
podDisruptive: true
schematic:
cue:
template: |-

View File

@@ -8,8 +8,8 @@ metadata:
namespace: {{.Values.systemDefinitionNamespace}}
spec:
appliesToWorkloads:
- webservice
- worker
- deployments.apps
podDisruptive: true
schematic:
cue:
template: |-

View File

@@ -107,6 +107,13 @@ spec:
args:
- "--metrics-addr=:8080"
- "--enable-leader-election"
{{ if ne .Values.logFilePath "" }}
- "--log-file-path={{ .Values.logFilePath }}"
- "--log-file-max-size={{ .Values.logFileMaxSize }}"
{{ end }}
{{ if .Values.logDebug }}
- "--log-debug=true"
{{ end }}
{{ if .Values.admissionWebhooks.enabled }}
- "--use-webhook=true"
- "--webhook-port={{ .Values.webhookService.port }}"

View File

@@ -79,8 +79,21 @@ admissionWebhooks:
enabled: false
#If non-empty, write log files in this path
logFilePath: ""
#Defines the maximum size a log file can grow to. Unit is megabytes.
#If the value is 0, the maximum file size is unlimited.
logFileMaxSize: 1024
systemDefinitionNamespace: vela-system
applicationRevisionLimit: 10
definitionRevisionLimit: 20
# concurrentReconciles is the concurrent reconcile number of the controller
concurrentReconciles: 4
# dependCheckWait is the time to wait for ApplicationConfiguration's dependent-resource ready
dependCheckWait: 30s

View File

@@ -27,14 +27,12 @@ import (
"strings"
"time"
"github.com/crossplane/crossplane-runtime/pkg/logging"
"go.uber.org/zap/zapcore"
"gopkg.in/natefinch/lumberjack.v2"
"k8s.io/klog/v2"
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/healthz"
"sigs.k8s.io/controller-runtime/pkg/log/zap"
standardcontroller "github.com/oam-dev/kubevela/pkg/controller"
commonconfig "github.com/oam-dev/kubevela/pkg/controller/common"
oamcontroller "github.com/oam-dev/kubevela/pkg/controller/core.oam.dev"
oamv1alpha2 "github.com/oam-dev/kubevela/pkg/controller/core.oam.dev/v1alpha2"
"github.com/oam-dev/kubevela/pkg/controller/utils"
@@ -53,7 +51,6 @@ const (
)
var (
setupLog = ctrl.Log.WithName(kubevelaName)
scheme = common.Scheme
waitSecretTimeout = 90 * time.Second
waitSecretInterval = 2 * time.Second
@@ -61,8 +58,8 @@ var (
func main() {
var metricsAddr, logFilePath, leaderElectionNamespace string
var enableLeaderElection, logCompress, logDebug bool
var logRetainDate int
var enableLeaderElection, logDebug bool
var logFileMaxSize uint64
var certDir string
var webhookPort int
var useWebhook bool
@@ -82,8 +79,7 @@ func main() {
flag.StringVar(&leaderElectionNamespace, "leader-election-namespace", "",
"Determines the namespace in which the leader election configmap will be created.")
flag.StringVar(&logFilePath, "log-file-path", "", "The file to write logs to.")
flag.IntVar(&logRetainDate, "log-retain-date", 7, "The number of days of logs history to retain.")
flag.BoolVar(&logCompress, "log-compress", true, "Enable compression on the rotated logs.")
flag.Uint64Var(&logFileMaxSize, "log-file-max-size", 1024, "Defines the maximum size a log file can grow to, Unit is megabytes.")
flag.BoolVar(&logDebug, "log-debug", false, "Enable debug logs for development purpose")
flag.IntVar(&controllerArgs.RevisionLimit, "revision-limit", 50,
"RevisionLimit is the maximum number of revisions that will be maintained. The default value is 50.")
@@ -103,29 +99,26 @@ func main() {
flag.DurationVar(&syncPeriod, "informer-re-sync-interval", 60*time.Minute,
"controller shared informer lister full re-sync period")
flag.StringVar(&oam.SystemDefinitonNamespace, "system-definition-namespace", "vela-system", "define the namespace of the system-level definition")
flag.Parse()
flag.IntVar(&controllerArgs.ConcurrentReconciles, "concurrent-reconciles", 4, "concurrent-reconciles is the concurrent reconcile number of the controller. The default value is 4")
flag.DurationVar(&controllerArgs.DependCheckWait, "depend-check-wait", 30*time.Second, "depend-check-wait is the time to wait for ApplicationConfiguration's dependent-resource ready."+
"The default value is 30s, which means if dependent resources were not prepared, the ApplicationConfiguration would be reconciled after 30s.")
flag.Parse()
// setup logging
var w io.Writer
if len(logFilePath) > 0 {
w = zapcore.AddSync(&lumberjack.Logger{
Filename: logFilePath,
MaxAge: logRetainDate, // days
Compress: logCompress,
})
} else {
w = os.Stdout
klog.InitFlags(nil)
if logDebug {
_ = flag.Set("v", strconv.Itoa(int(commonconfig.LogDebug)))
}
logger := zap.New(func(o *zap.Options) {
o.Development = logDebug
o.DestWritter = w
})
ctrl.SetLogger(logger)
if logFilePath != "" {
_ = flag.Set("logtostderr", "false")
_ = flag.Set("log_file", logFilePath)
_ = flag.Set("log_file_max_size", strconv.FormatUint(logFileMaxSize, 10))
}
setupLog.Info(fmt.Sprintf("KubeVela Version: %s, GIT Revision: %s.", version.VelaVersion, version.GitRevision))
setupLog.Info(fmt.Sprintf("Disable Capabilities: %s.", disableCaps))
setupLog.Info(fmt.Sprintf("core init with definition namespace %s", oam.SystemDefinitonNamespace))
klog.InfoS("KubeVela information", "version", version.VelaVersion, "revision", version.GitRevision)
klog.InfoS("Disable capabilities", "name", disableCaps)
klog.InfoS("Vela-Core init", "definition namespace", oam.SystemDefinitonNamespace)
restConfig := ctrl.GetConfigOrDie()
restConfig.UserAgent = kubevelaName + "/" + version.GitRevision
@@ -142,46 +135,46 @@ func main() {
SyncPeriod: &syncPeriod,
})
if err != nil {
setupLog.Error(err, "unable to create a controller manager")
klog.ErrorS(err, "Unable to create a controller manager")
os.Exit(1)
}
if err := registerHealthChecks(mgr); err != nil {
setupLog.Error(err, "unable to register ready/health checks")
klog.ErrorS(err, "Unable to register ready/health checks")
os.Exit(1)
}
if err := utils.CheckDisabledCapabilities(disableCaps); err != nil {
setupLog.Error(err, "unable to get enabled capabilities")
klog.ErrorS(err, "Unable to get enabled capabilities")
os.Exit(1)
}
switch strings.ToLower(applyOnceOnly) {
case "", "false", string(oamcontroller.ApplyOnceOnlyOff):
controllerArgs.ApplyMode = oamcontroller.ApplyOnceOnlyOff
setupLog.Info("ApplyOnceOnly is disabled")
klog.Info("ApplyOnceOnly is disabled")
case "true", string(oamcontroller.ApplyOnceOnlyOn):
controllerArgs.ApplyMode = oamcontroller.ApplyOnceOnlyOn
setupLog.Info("ApplyOnceOnly is enabled, that means workload or trait only apply once if no spec change even they are changed by others")
klog.Info("ApplyOnceOnly is enabled, that means workload or trait only apply once if no spec change even they are changed by others")
case string(oamcontroller.ApplyOnceOnlyForce):
controllerArgs.ApplyMode = oamcontroller.ApplyOnceOnlyForce
setupLog.Info("ApplyOnceOnlyForce is enabled, that means workload or trait only apply once if no spec change even they are changed or deleted by others")
klog.Info("ApplyOnceOnlyForce is enabled, that means workload or trait only apply once if no spec change even they are changed or deleted by others")
default:
setupLog.Error(fmt.Errorf("invalid apply-once-only value: %s", applyOnceOnly),
"unable to setup the vela core controller",
"valid apply-once-only value:", "on/off/force, by default it's off")
klog.ErrorS(fmt.Errorf("invalid apply-once-only value: %s", applyOnceOnly),
"Unable to setup the vela core controller",
"apply-once-only", "on/off/force, by default it's off")
os.Exit(1)
}
dm, err := discoverymapper.New(mgr.GetConfig())
if err != nil {
setupLog.Error(err, "failed to create CRD discovery client")
klog.ErrorS(err, "Failed to create CRD discovery client")
os.Exit(1)
}
controllerArgs.DiscoveryMapper = dm
pd, err := definition.NewPackageDiscover(mgr.GetConfig())
if err != nil {
setupLog.Error(err, "failed to create CRD discovery for CUE package client")
klog.Error(err, "Failed to create CRD discovery for CUE package client")
if !definition.IsCUEParseErr(err) {
os.Exit(1)
}
@@ -189,46 +182,49 @@ func main() {
controllerArgs.PackageDiscover = pd
if useWebhook {
setupLog.Info("vela webhook enabled, will serving at :" + strconv.Itoa(webhookPort))
klog.InfoS("Enable webhook", "server port", strconv.Itoa(webhookPort))
oamwebhook.Register(mgr, controllerArgs)
velawebhook.Register(mgr, disableCaps)
if err := waitWebhookSecretVolume(certDir, waitSecretTimeout, waitSecretInterval); err != nil {
setupLog.Error(err, "unable to get webhook secret")
klog.ErrorS(err, "Unable to get webhook secret")
os.Exit(1)
}
}
if err = oamv1alpha2.Setup(mgr, controllerArgs, logging.NewLogrLogger(setupLog)); err != nil {
setupLog.Error(err, "unable to setup the oam core controller")
if err = oamv1alpha2.Setup(mgr, controllerArgs); err != nil {
klog.ErrorS(err, "Unable to setup the oam core controller")
os.Exit(1)
}
if err = standardcontroller.Setup(mgr, disableCaps); err != nil {
setupLog.Error(err, "unable to setup the vela core controller")
klog.ErrorS(err, "Unable to setup the vela core controller")
os.Exit(1)
}
if driver := os.Getenv(system.StorageDriverEnv); len(driver) == 0 {
// first use system environment,
err := os.Setenv(system.StorageDriverEnv, storageDriver)
if err != nil {
setupLog.Error(err, "unable to setup the vela core controller")
klog.ErrorS(err, "Unable to setup the vela core controller")
os.Exit(1)
}
}
setupLog.Info("use storage driver", "storageDriver", os.Getenv(system.StorageDriverEnv))
klog.InfoS("Use storage driver", "storageDriver", os.Getenv(system.StorageDriverEnv))
setupLog.Info("starting the vela controller manager")
klog.Info("Start the vela controller manager")
if err := mgr.Start(ctrl.SetupSignalHandler()); err != nil {
setupLog.Error(err, "problem running manager")
klog.ErrorS(err, "Failed to run manager")
os.Exit(1)
}
setupLog.Info("program safely stops...")
if logFilePath != "" {
klog.Flush()
}
klog.Info("Safely stops Program...")
}
// registerHealthChecks is used to create readiness&liveness probes
func registerHealthChecks(mgr ctrl.Manager) error {
setupLog.Info("creating readiness/health check")
klog.Info("Create readiness/health check")
if err := mgr.AddReadyzCheck("ping", healthz.Ping); err != nil {
return err
}
@@ -247,8 +243,8 @@ func waitWebhookSecretVolume(certDir string, timeout, interval time.Duration) er
if time.Since(start) > timeout {
return fmt.Errorf("getting webhook secret timeout after %s", timeout.String())
}
setupLog.Info(fmt.Sprintf("waiting webhook secret, time consumed: %d/%d seconds ...",
int64(time.Since(start).Seconds()), int64(timeout.Seconds())))
klog.InfoS("Wait webhook secret", "time consumed(second)", int64(time.Since(start).Seconds()),
"timeout(second)", int64(timeout.Seconds()))
if _, err := os.Stat(certDir); !os.IsNotExist(err) {
ready := func() bool {
f, err := os.Open(filepath.Clean(certDir))
@@ -270,8 +266,8 @@ func waitWebhookSecretVolume(certDir string, timeout, interval time.Duration) er
return nil
})
if err == nil {
setupLog.Info(fmt.Sprintf("webhook secret is ready (time consumed: %d seconds)",
int64(time.Since(start).Seconds())))
klog.InfoS("Webhook secret is ready", "time consumed(second)",
int64(time.Since(start).Seconds()))
return true
}
return false

View File

@@ -1,238 +0,0 @@
---
title: Advanced Rollout Plan
---
The rollout plan feature in KubeVela is essentially provided by `AppRollout` API.
## AppRollout
Below is an example for rolling update an application from v1 to v2 in three batches. The
first batch contains only 1 pod while the rest of the batches split the rest.
```yaml
apiVersion: core.oam.dev/v1beta1
kind: AppRollout
metadata:
name: rolling-example
spec:
sourceAppRevisionName: test-rolling-v1
targetAppRevisionName: test-rolling-v2
componentList:
- metrics-provider
rolloutPlan:
rolloutStrategy: "IncreaseFirst"
rolloutBatches:
- replicas: 1
- replicas: 50%
- replicas: 50%
batchPartition: 1
```
## Basic Usage
1. Deploy application
```yaml
apiVersion: core.oam.dev/v1beta1
kind: Application
metadata:
name: test-rolling
annotations:
"app.oam.dev/rolling-components": "metrics-provider"
"app.oam.dev/rollout-template": "true"
spec:
components:
- name: metrics-provider
type: worker
properties:
cmd:
- ./podinfo
- stress-cpu=1
image: stefanprodan/podinfo:4.0.6
port: 8080
replicas: 5
```
Verify AppRevision `test-rolling-v1` have generated
```shell
$ kubectl get apprev test-rolling-v1
NAME AGE
test-rolling-v1 9s
```
2. Attach the following rollout plan to upgrade the application to v1
```yaml
apiVersion: core.oam.dev/v1beta1
kind: AppRollout
metadata:
name: rolling-example
spec:
# application (revision) reference
targetAppRevisionName: test-rolling-v1
componentList:
- metrics-provider
rolloutPlan:
rolloutStrategy: "IncreaseFirst"
rolloutBatches:
- replicas: 10%
- replicas: 40%
- replicas: 50%
targetSize: 5
```
Use can check the status of the ApplicationRollout and wait for the rollout to complete.
3. User can continue to modify the application image tag and apply.This will generate new AppRevision `test-rolling-v2`
```yaml
apiVersion: core.oam.dev/v1beta1
kind: Application
metadata:
name: test-rolling
annotations:
"app.oam.dev/rolling-components": "metrics-provider"
"app.oam.dev/rollout-template": "true"
spec:
components:
- name: metrics-provider
type: worker
properties:
cmd:
- ./podinfo
- stress-cpu=1
image: stefanprodan/podinfo:5.0.2
port: 8080
replicas: 5
```
Verify AppRevision `test-rolling-v2` have generated
```shell
$ kubectl get apprev test-rolling-v2
NAME AGE
test-rolling-v2 7s
```
4. Apply the application rollout that upgrade the application from v1 to v2
```yaml
apiVersion: core.oam.dev/v1beta1
kind: AppRollout
metadata:
name: rolling-example
spec:
# application (revision) reference
sourceAppRevisionName: test-rolling-v1
targetAppRevisionName: test-rolling-v2
componentList:
- metrics-provider
rolloutPlan:
rolloutStrategy: "IncreaseFirst"
rolloutBatches:
- replicas: 1
- replicas: 2
- replicas: 2
```
User can check the status of the ApplicationRollout and see the rollout completes, and the
ApplicationRollout's "Rolling State" becomes `rolloutSucceed`
## Advanced Usage
Using `AppRollout` separately can enable some advanced use case.
### Revert
5. Apply the application rollout that revert the application from v2 to v1
```yaml
apiVersion: core.oam.dev/v1beta1
kind: AppRollout
metadata:
name: rolling-example
spec:
# application (revision) reference
sourceAppRevisionName: test-rolling-v2
targetAppRevisionName: test-rolling-v1
componentList:
- metrics-provider
rolloutPlan:
rolloutStrategy: "IncreaseFirst"
rolloutBatches:
- replicas: 1
- replicas: 2
- replicas: 2
```
### Skip Revision Rollout
6. User can apply this yaml continue to modify the application image tag.This will generate new AppRevision `test-rolling-v3`
```yaml
apiVersion: core.oam.dev/v1beta1
kind: Application
metadata:
name: test-rolling
annotations:
"app.oam.dev/rolling-components": "metrics-provider"
"app.oam.dev/rollout-template": "true"
spec:
components:
- name: metrics-provider
type: worker
properties:
cmd:
- ./podinfo
- stress-cpu=1
image: stefanprodan/podinfo:5.2.0
port: 8080
replicas: 5
```
Verify AppRevision `test-rolling-v3` have generated
```shell
$ kubectl get apprev test-rolling-v3
NAME AGE
test-rolling-v3 7s
```
7. Apply the application rollout that rollout the application from v1 to v3
```yaml
apiVersion: core.oam.dev/v1beta1
kind: AppRollout
metadata:
name: rolling-example
spec:
# application (revision) reference
sourceAppRevisionName: test-rolling-v1
targetAppRevisionName: test-rolling-v3
componentList:
- metrics-provider
rolloutPlan:
rolloutStrategy: "IncreaseFirst"
rolloutBatches:
- replicas: 1
- replicas: 2
- replicas: 2
```
## More Details About `AppRollout`
### Design Principles and Goals
There are several attempts at solving rollout problem in the cloud native community. However, none
of them provide a true rolling style upgrade. For example, flagger supports Blue/Green, Canary
and A/B testing. Therefore, we decide to add support for batch based rolling upgrade as
our first style to support in KubeVela.
We design KubeVela rollout solutions with the following principles in mind
- First, we want all flavors of rollout controllers share the same core rollout
related logic. The trait and application related logic can be easily encapsulated into its own
package.
- Second, the core rollout related logic is easily extensible to support different type of
workloads, i.e. Deployment, CloneSet, Statefulset, DaemonSet or even customized workloads.
- Thirdly, the core rollout related logic has a well documented state machine that
does state transition explicitly.
- Finally, the controllers can support all the rollout/upgrade needs of an application running
in a production environment including Blue/Green, Canary and A/B testing.
### State Transition
Here is the high level state transition graph
![](../../resources/approllout-status-transition.jpg)
### Roadmap
Our recent roadmap for rollout plan is [here](./roadmap).

View File

@@ -1,230 +0,0 @@
---
title: Placement
---
## Introduction
In this section, we will introduce how to use KubeVela to place application across multiple clusters with traffic management enabled. For traffic management, KubeVela currently allows you to split the traffic onto both the old and new revisions during rolling update and verify the new version while preserving service availability.
### AppDeployment
The `AppDeployment` API in KubeVela is provided to satisfy such requirements. Here's an overview of the API:
```yaml
apiVersion: core.oam.dev/v1beta1
kind: AppDeployment
metadata:
name: sample-appdeploy
spec:
traffic:
hosts:
- example.com
http:
- match:
# match any requests to 'example.com/example-app'
- uri:
prefix: "/example-app"
# split traffic 50/50 on v1/v2 versions of the app
weightedTargets:
- revisionName: example-app-v1
componentName: testsvc
port: 80
weight: 50
- revisionName: example-app-v2
componentName: testsvc
port: 80
weight: 50
appRevisions:
- # Name of the AppRevision.
# Each modification to Application would generate a new AppRevision.
revisionName: example-app-v1
# Cluster specific workload placement config
placement:
- clusterSelector:
# You can select Clusters by name or labels.
# If multiple clusters is selected, one will be picked via a unique hashing algorithm.
labels:
tier: production
name: prod-cluster-1
distribution:
replicas: 5
- # If no clusterSelector is given, it will use the host cluster in which this CR exists
distribution:
replicas: 5
- revisionName: example-app-v2
placement:
- clusterSelector:
labels:
tier: production
name: prod-cluster-1
distribution:
replicas: 5
- distribution:
replicas: 5
```
### Cluster
The clusters selected in the `placement` part from above is defined in Cluster CRD. Here's what it looks like:
```yaml
apiVersion: core.oam.dev/v1beta1
kind: Cluster
metadata:
name: prod-cluster-1
labels:
tier: production
spec:
kubeconfigSecretRef:
name: kubeconfig-cluster-1 # the secret name
```
The secret must contain the kubeconfig credentials in `config` field:
```yaml
apiVersion: v1
kind: Secret
metadata:
name: kubeconfig-cluster-1
data:
config: ... # kubeconfig data
```
## Quickstart
Here's a step-by-step tutorial for you to try out. All of the yaml files are from [`docs/examples/appdeployment/`](https://github.com/oam-dev/kubevela/tree/master/docs/examples/appdeployment).
You must run all commands in that directory.
1. Create an Application
```bash
$ cat <<EOF | kubectl apply -f -
apiVersion: core.oam.dev/v1beta1
kind: Application
metadata:
name: example-app
annotations:
app.oam.dev/revision-only: "true"
spec:
components:
- name: testsvc
type: webservice
properties:
addRevisionLabel: true
image: crccheck/hello-world
port: 8000
EOF
```
This will create `example-app-v1` AppRevision. Check it:
```bash
$ kubectl get applicationrevisions.core.oam.dev
NAME AGE
example-app-v1 116s
```
> Note: with `app.oam.dev/revision-only: "true"` annotation, above `Application` resource won't create any pod instances and leave the real deployment process to `AppDeployment`.
1. Then use the above AppRevision to create an AppDeployment.
```bash
$ kubectl apply -f appdeployment-1.yaml
```
> Note: in order to AppDeployment to work, your workload object must have a `spec.replicas` field for scaling.
1. Now you can check that there will 1 deployment and 2 pod instances deployed
```bash
$ kubectl get deploy
NAME READY UP-TO-DATE AVAILABLE AGE
testsvc-v1 2/2 2 0 27s
```
1. Update Application properties:
```bash
$ cat <<EOF | kubectl apply -f -
apiVersion: core.oam.dev/v1beta1
kind: Application
metadata:
name: example-app
annotations:
app.oam.dev/revision-only: "true"
spec:
components:
- name: testsvc
type: webservice
properties:
addRevisionLabel: true
image: nginx
port: 80
EOF
```
This will create a new `example-app-v2` AppRevision. Check it:
```bash
$ kubectl get applicationrevisions.core.oam.dev
NAME
example-app-v1
example-app-v2
```
1. Then use the two AppRevisions to update the AppDeployment:
```bash
$ kubectl apply -f appdeployment-2.yaml
```
(Optional) If you have Istio installed, you can apply the AppDeployment with traffic split:
```bash
# set up gateway if not yet
$ kubectl apply -f gateway.yaml
$ kubectl apply -f appdeployment-2-traffic.yaml
```
Note that for traffic split to work, your must set the following pod labels in workload cue templates (see [webservice.cue](https://github.com/oam-dev/kubevela/blob/master/hack/vela-templates/cue/webservice.cue)):
```shell
"app.oam.dev/component": context.name
"app.oam.dev/appRevision": context.appRevision
```
1. Now you can check that there will 1 deployment and 1 pod per revision.
```bash
$ kubectl get deploy
NAME READY UP-TO-DATE AVAILABLE AGE
testsvc-v1 1/1 1 1 2m14s
testsvc-v2 1/1 1 1 8s
```
(Optional) To verify traffic split:
```bash
# run this in another terminal
$ kubectl -n istio-system port-forward service/istio-ingressgateway 8080:80
Forwarding from 127.0.0.1:8080 -> 8080
Forwarding from [::1]:8080 -> 8080
# The command should return pages of either docker whale or nginx in 50/50
$ curl -H "Host: example-app.example.com" http://localhost:8080/
```
1. Cleanup:
```bash
kubectl delete appdeployments.core.oam.dev --all
kubectl delete applications.core.oam.dev --all
```

View File

@@ -62,10 +62,5 @@ spec:
User can check the status of the application and see the rollout completes, and the
application's `status.rollout.rollingState` becomes `rolloutSucceed`.
## Advanced Usage
If you want to control and rollout the specific application revisions, or do revert, please refer to [Advanced Usage](advanced-rollout) to learn more details.

View File

@@ -62,4 +62,4 @@ Here are some recommended next steps:
- Learn KubeVela's [core concepts](./concepts)
- Learn more details about [`Application`](end-user/application) and what it can do for you.
- Learn how to attach [rollout plan](end-user/scopes/rollout-plan) to this application, or [place it to multiple runtime clusters](end-user/scopes/appdeploy).
- Learn how to attach [rollout plan](end-user/scopes/rollout-plan) to this application.

View File

@@ -44,7 +44,6 @@ module.exports = {
'end-user/traits/more',
]
},
'end-user/scopes/appdeploy',
'end-user/scopes/rollout-plan',
{
'Observability': [
@@ -169,7 +168,6 @@ module.exports = {
'cli/vela_cap',
],
},
'developers/references/restful-api/rest',
],
},
{

View File

@@ -7,8 +7,8 @@ metadata:
namespace: {{.Values.systemDefinitionNamespace}}
spec:
appliesToWorkloads:
- webservice
- worker
- deployments.apps
podDisruptive: true
schematic:
cue:
template: |-

View File

@@ -7,8 +7,8 @@ metadata:
namespace: {{.Values.systemDefinitionNamespace}}
spec:
appliesToWorkloads:
- webservice
- worker
- deployments.apps
podDisruptive: true
schematic:
cue:
template: |-

View File

@@ -7,8 +7,8 @@ metadata:
namespace: {{.Values.systemDefinitionNamespace}}
spec:
appliesToWorkloads:
- webservice
- worker
- deployments.apps
podDisruptive: true
schematic:
cue:
template: |-

View File

@@ -96,7 +96,7 @@ func (wl *Workload) EvalContext(ctx process.Context) error {
// EvalStatus eval workload status
func (wl *Workload) EvalStatus(ctx process.Context, cli client.Client, ns string) (string, error) {
return wl.engine.Status(ctx, cli, ns, wl.FullTemplate.CustomStatus)
return wl.engine.Status(ctx, cli, ns, wl.FullTemplate.CustomStatus, wl.Params)
}
// EvalHealth eval workload health check
@@ -149,7 +149,7 @@ func (trait *Trait) EvalContext(ctx process.Context) error {
// EvalStatus eval trait status
func (trait *Trait) EvalStatus(ctx process.Context, cli client.Client, ns string) (string, error) {
return trait.engine.Status(ctx, cli, ns, trait.CustomStatusFormat)
return trait.engine.Status(ctx, cli, ns, trait.CustomStatusFormat, trait.Params)
}
// EvalHealth eval trait health check

View File

@@ -180,15 +180,20 @@ func (p *Parser) parseTrait(ctx context.Context, name string, properties map[str
if err != nil {
return nil, err
}
traitName, err := util.ConvertDefinitionRevName(name)
if err != nil {
traitName = name
}
return &Trait{
Name: name,
Name: traitName,
CapabilityCategory: templ.CapabilityCategory,
Params: properties,
Template: templ.TemplateStr,
HealthCheckPolicy: templ.Health,
CustomStatusFormat: templ.CustomStatus,
FullTemplate: templ,
engine: definition.NewTraitAbstractEngine(name, p.pd),
engine: definition.NewTraitAbstractEngine(traitName, p.pd),
}, nil
}

View File

@@ -242,7 +242,7 @@ func loadSchematicToTemplate(tmpl *Template, status *common.Status, schematic *c
return nil
}
// ConvertTemplateJSON2Object convert spec.extension to object
// ConvertTemplateJSON2Object convert spec.extension or spec.schematic to object
func ConvertTemplateJSON2Object(capabilityName string, in *runtime.RawExtension, schematic *common.Schematic) (types.Capability, error) {
var t types.Capability
t.Name = capabilityName

View File

@@ -18,6 +18,12 @@ package common
import "k8s.io/klog/v2"
// klog has multiple levels, you can set the log levels by klog.V()
// Basic examples:
//
// klog.V(1).Info("Prepare to repel boarders")
//
// klog.V(2).ErrorS(err, "Initialization failed")
const (
// LogInfo level is for most info logs, this is the default
// One should just call Info directly.

View File

@@ -29,7 +29,6 @@ import (
"k8s.io/klog/v2"
"github.com/oam-dev/kubevela/apis/standard.oam.dev/v1alpha1"
"github.com/oam-dev/kubevela/pkg/controller/common"
)
// issue an http call to the an end ponit
@@ -125,7 +124,7 @@ func callWebhook(ctx context.Context, resource klog.KMetadata, phase string, rw
}
if !accepted {
err := fmt.Errorf("http request to the webhook not accepeted, http status = %d", status)
klog.V(common.LogDebug).InfoS("the status is not expected", "expected status", rw.ExpectedStatus)
klog.ErrorS(err, "The status is not expected", "expected status", rw.ExpectedStatus)
return err
}
return nil

View File

@@ -17,6 +17,8 @@ limitations under the License.
package core_oam_dev
import (
"time"
"github.com/oam-dev/kubevela/pkg/dsl/definition"
"github.com/oam-dev/kubevela/pkg/oam/discoverymapper"
)
@@ -68,4 +70,10 @@ type Args struct {
DiscoveryMapper discoverymapper.DiscoveryMapper
// PackageDiscover used for CRD discovery in CUE packages, a K8s client is contained in it.
PackageDiscover *definition.PackageDiscover
// ConcurrentReconciles is the concurrent reconcile number of the controller
ConcurrentReconciles int
// DependCheckWait is the time to wait for ApplicationConfiguration's dependent-resource ready
DependCheckWait time.Duration
}

View File

@@ -21,7 +21,6 @@ import (
"encoding/json"
"time"
"github.com/crossplane/crossplane-runtime/pkg/logging"
"github.com/pkg/errors"
istioapiv1beta1 "istio.io/api/networking/v1beta1"
istioclientv1beta1 "istio.io/client-go/pkg/apis/networking/v1beta1"
@@ -34,12 +33,13 @@ import (
"k8s.io/kubectl/pkg/util/slice"
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/controller"
"sigs.k8s.io/controller-runtime/pkg/reconcile"
oamcorealpha "github.com/oam-dev/kubevela/apis/core.oam.dev/v1alpha2"
oamcore "github.com/oam-dev/kubevela/apis/core.oam.dev/v1beta1"
"github.com/oam-dev/kubevela/pkg/clustermanager"
controller "github.com/oam-dev/kubevela/pkg/controller/core.oam.dev"
oamctrl "github.com/oam-dev/kubevela/pkg/controller/core.oam.dev"
"github.com/oam-dev/kubevela/pkg/controller/utils"
"github.com/oam-dev/kubevela/pkg/oam/discoverymapper"
"github.com/oam-dev/kubevela/pkg/utils/apply"
@@ -57,20 +57,11 @@ var (
// Reconciler reconciles an AppDeployment object
type Reconciler struct {
Client client.Client
dm discoverymapper.DiscoveryMapper
wr WorkloadRenderer
Scheme *runtime.Scheme
}
// NewReconciler returns a new instance of Reconciler
func NewReconciler(cli client.Client, sch *runtime.Scheme, dm discoverymapper.DiscoveryMapper) *Reconciler {
return &Reconciler{
dm: dm,
Client: cli,
Scheme: sch,
wr: NewWorkloadRenderer(cli),
}
Client client.Client
dm discoverymapper.DiscoveryMapper
wr WorkloadRenderer
Scheme *runtime.Scheme
concurrentReconciles int
}
// +kubebuilder:rbac:groups=core.oam.dev,resources=appdeployments,verbs=get;list;watch;create;update;patch;delete
@@ -454,6 +445,9 @@ func (r *Reconciler) updateStatus(ctx context.Context, appd *oamcore.AppDeployme
// SetupWithManager setup the controller with manager
func (r *Reconciler) SetupWithManager(mgr ctrl.Manager) error {
return ctrl.NewControllerManagedBy(mgr).
WithOptions(controller.Options{
MaxConcurrentReconciles: r.concurrentReconciles,
}).
For(&oamcore.AppDeployment{}).
Complete(r)
}
@@ -553,7 +547,12 @@ func removeString(slice []string, s string) (result []string) {
}
// Setup adds a controller that reconciles AppDeployment.
func Setup(mgr ctrl.Manager, args controller.Args, _ logging.Logger) error {
r := NewReconciler(mgr.GetClient(), mgr.GetScheme(), args.DiscoveryMapper)
func Setup(mgr ctrl.Manager, args oamctrl.Args) error {
r := &Reconciler{
dm: args.DiscoveryMapper,
Client: mgr.GetClient(),
Scheme: mgr.GetScheme(),
wr: NewWorkloadRenderer(mgr.GetClient()),
}
return r.SetupWithManager(mgr)
}

View File

@@ -25,6 +25,7 @@ import (
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/client-go/rest"
"k8s.io/utils/pointer"
ctrl "sigs.k8s.io/controller-runtime"
@@ -56,6 +57,16 @@ func TestAPIs(t *testing.T) {
[]Reporter{printer.NewlineReporter{}})
}
// NewReconciler returns a new instance of Reconciler
func NewReconciler(cli client.Client, sch *runtime.Scheme, dm discoverymapper.DiscoveryMapper) *Reconciler {
return &Reconciler{
dm: dm,
Client: cli,
Scheme: sch,
wr: NewWorkloadRenderer(cli),
}
}
var _ = BeforeSuite(func(done Done) {
logf.SetLogger(zap.New(zap.UseDevMode(true), zap.WriteTo(GinkgoWriter)))
rand.Seed(time.Now().UnixNano())

View File

@@ -22,17 +22,16 @@ import (
"github.com/crossplane/crossplane-runtime/apis/core/v1alpha1"
"github.com/crossplane/crossplane-runtime/pkg/event"
"github.com/crossplane/crossplane-runtime/pkg/logging"
"github.com/crossplane/crossplane-runtime/pkg/meta"
"github.com/go-logr/logr"
"github.com/pkg/errors"
kerrors "k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/types"
"k8s.io/client-go/util/retry"
"k8s.io/klog/v2"
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/controller"
"sigs.k8s.io/controller-runtime/pkg/reconcile"
"github.com/oam-dev/kubevela/apis/core.oam.dev/common"
@@ -57,13 +56,13 @@ const (
// Reconciler reconciles a Application object
type Reconciler struct {
client.Client
dm discoverymapper.DiscoveryMapper
pd *definition.PackageDiscover
Log logr.Logger
Scheme *runtime.Scheme
Recorder event.Recorder
applicator apply.Applicator
appRevisionLimit int
dm discoverymapper.DiscoveryMapper
pd *definition.PackageDiscover
Scheme *runtime.Scheme
Recorder event.Recorder
applicator apply.Applicator
appRevisionLimit int
concurrentReconciles int
}
// +kubebuilder:rbac:groups=core.oam.dev,resources=applications,verbs=get;list;watch;create;update;patch;delete
@@ -72,7 +71,8 @@ type Reconciler struct {
// Reconcile process app event
func (r *Reconciler) Reconcile(req ctrl.Request) (ctrl.Result, error) {
ctx := context.Background()
applog := r.Log.WithValues("application", req.NamespacedName)
klog.InfoS("Reconcile application", "application", klog.KRef(req.Namespace, req.Name))
app := new(v1beta1.Application)
if err := r.Get(ctx, client.ObjectKey{
Name: req.Name,
@@ -85,43 +85,42 @@ func (r *Reconciler) Reconcile(req ctrl.Request) (ctrl.Result, error) {
}
handler := &appHandler{
r: r,
app: app,
logger: applog,
r: r,
app: app,
}
if app.ObjectMeta.DeletionTimestamp.IsZero() {
if registerFinalizers(app) {
applog.Info("Register new finalizer", "application", app.Namespace+"/"+app.Name, "finalizers", app.ObjectMeta.Finalizers)
klog.InfoS("Register new finalizer for application", "application", klog.KObj(app), "finalizers", app.ObjectMeta.Finalizers)
return reconcile.Result{}, errors.Wrap(r.Client.Update(ctx, app), errUpdateApplicationFinalizer)
}
} else {
needUpdate, err := handler.removeResourceTracker(ctx)
if err != nil {
applog.Error(err, "Failed to remove application resourceTracker")
klog.InfoS("Failed to remove application resourceTracker", "err", err)
app.Status.SetConditions(v1alpha1.ReconcileError(errors.Wrap(err, "error to remove finalizer")))
return reconcile.Result{}, errors.Wrap(r.UpdateStatus(ctx, app), errUpdateApplicationStatus)
}
if needUpdate {
applog.Info("remove finalizer of application", "application", app.Namespace+"/"+app.Name, "finalizers", app.ObjectMeta.Finalizers)
klog.InfoS("Remove finalizer of application", "application", app.Namespace+"/"+app.Name, "finalizers", app.ObjectMeta.Finalizers)
return ctrl.Result{}, errors.Wrap(r.Update(ctx, app), errUpdateApplicationFinalizer)
}
// deleting and no need to handle finalizer
return reconcile.Result{}, nil
}
applog.Info("Start Rendering")
klog.Info("Start Rendering")
app.Status.Phase = common.ApplicationRendering
applog.Info("parse template")
klog.Info("Parse template")
// parse template
appParser := appfile.NewApplicationParser(r.Client, r.dm, r.pd)
ctx = oamutil.SetNamespaceInCtx(ctx, app.Namespace)
generatedAppfile, err := appParser.GenerateAppFile(ctx, app)
if err != nil {
applog.Error(err, "[Handle Parse]")
klog.InfoS("Failed to parse application", "err", err)
app.Status.SetConditions(errorCondition("Parsed", err))
r.Recorder.Event(app, event.Warning(velatypes.ReasonFailedParse, err))
return handler.handleErr(err)
@@ -132,7 +131,7 @@ func (r *Reconciler) Reconcile(req ctrl.Request) (ctrl.Result, error) {
appRev, err := handler.GenerateAppRevision(ctx)
if err != nil {
applog.Error(err, "[Handle Calculate Revision]")
klog.InfoS("Failed to calculate appRevision", "err", err)
app.Status.SetConditions(errorCondition("Parsed", err))
r.Recorder.Event(app, event.Warning(velatypes.ReasonFailedParse, err))
return handler.handleErr(err)
@@ -141,11 +140,11 @@ func (r *Reconciler) Reconcile(req ctrl.Request) (ctrl.Result, error) {
// Record the revision so it can be used to render data in context.appRevision
generatedAppfile.RevisionName = appRev.Name
applog.Info("build template")
klog.Info("Build template")
// build template to applicationconfig & component
ac, comps, err := generatedAppfile.GenerateApplicationConfiguration()
if err != nil {
applog.Error(err, "[Handle GenerateApplicationConfiguration]")
klog.InfoS("Failed to generate applicationConfiguration", "err", err)
app.Status.SetConditions(errorCondition("Built", err))
r.Recorder.Event(app, event.Warning(velatypes.ReasonFailedRender, err))
return handler.handleErr(err)
@@ -153,7 +152,7 @@ func (r *Reconciler) Reconcile(req ctrl.Request) (ctrl.Result, error) {
err = handler.handleResourceTracker(ctx, comps, ac)
if err != nil {
applog.Error(err, "[Handle resourceTracker]")
klog.InfoS("Failed to handle resourceTracker", "err", err)
app.Status.SetConditions(errorCondition("Handle resourceTracker", err))
r.Recorder.Event(app, event.Warning(velatypes.ReasonFailedRender, err))
return handler.handleErr(err)
@@ -164,10 +163,10 @@ func (r *Reconciler) Reconcile(req ctrl.Request) (ctrl.Result, error) {
app.Status.SetConditions(readyCondition("Built"))
r.Recorder.Event(app, event.Normal(velatypes.ReasonRendered, velatypes.MessageRendered))
applog.Info("apply application revision & component to the cluster")
klog.Info("Apply application revision & component to the cluster")
// apply application revision & component to the cluster
if err := handler.apply(ctx, appRev, ac, comps); err != nil {
applog.Error(err, "[Handle apply]")
klog.InfoS("Failed to apply application revision & component to the cluster", "err", err)
app.Status.SetConditions(errorCondition("Applied", err))
r.Recorder.Event(app, event.Warning(velatypes.ReasonFailedApply, err))
return handler.handleErr(err)
@@ -177,7 +176,7 @@ func (r *Reconciler) Reconcile(req ctrl.Request) (ctrl.Result, error) {
if handler.app.Spec.RolloutPlan != nil {
res, err := handler.handleRollout(ctx)
if err != nil {
applog.Error(err, "[handle rollout]")
klog.InfoS("Failed to handle rollout", "err", err)
app.Status.SetConditions(errorCondition("Rollout", err))
r.Recorder.Event(app, event.Warning(velatypes.ReasonFailedRollout, err))
return handler.handleErr(err)
@@ -192,18 +191,18 @@ func (r *Reconciler) Reconcile(req ctrl.Request) (ctrl.Result, error) {
// there is no need reconcile immediately, that means the rollout operation have finished
r.Recorder.Event(app, event.Normal(velatypes.ReasonRollout, velatypes.MessageRollout))
app.Status.SetConditions(readyCondition("Rollout"))
applog.Info("rollout finished")
klog.Info("Finished rollout ")
}
// The following logic will be skipped if rollout have not finished
app.Status.SetConditions(readyCondition("Applied"))
r.Recorder.Event(app, event.Normal(velatypes.ReasonFailedApply, velatypes.MessageApplied))
app.Status.Phase = common.ApplicationHealthChecking
applog.Info("check application health status")
klog.Info("Check application health status")
// check application health status
appCompStatus, healthy, err := handler.statusAggregate(generatedAppfile)
if err != nil {
applog.Error(err, "[status aggregate]")
klog.InfoS("Failed to aggregate status", "err", err)
app.Status.SetConditions(errorCondition("HealthCheck", err))
r.Recorder.Event(app, event.Warning(velatypes.ReasonFailedHealthCheck, err))
return handler.handleErr(err)
@@ -222,7 +221,7 @@ func (r *Reconciler) Reconcile(req ctrl.Request) (ctrl.Result, error) {
err = garbageCollection(ctx, handler)
if err != nil {
applog.Error(err, "[Garbage collection]")
klog.InfoS("Failed to run Garbage collection", "err", err)
r.Recorder.Event(app, event.Warning(velatypes.ReasonFailedGC, err))
}
@@ -254,6 +253,9 @@ func registerFinalizers(app *v1beta1.Application) bool {
func (r *Reconciler) SetupWithManager(mgr ctrl.Manager) error {
// If Application Own these two child objects, AC status change will notify application controller and recursively update AC again, and trigger application event again...
return ctrl.NewControllerManagedBy(mgr).
WithOptions(controller.Options{
MaxConcurrentReconciles: r.concurrentReconciles,
}).
For(&v1beta1.Application{}).
Complete(r)
}
@@ -271,16 +273,16 @@ func (r *Reconciler) UpdateStatus(ctx context.Context, app *v1beta1.Application,
}
// Setup adds a controller that reconciles AppRollout.
func Setup(mgr ctrl.Manager, args core.Args, _ logging.Logger) error {
func Setup(mgr ctrl.Manager, args core.Args) error {
reconciler := Reconciler{
Client: mgr.GetClient(),
Log: ctrl.Log.WithName("Application"),
Scheme: mgr.GetScheme(),
Recorder: event.NewAPIRecorder(mgr.GetEventRecorderFor("Application")),
dm: args.DiscoveryMapper,
pd: args.PackageDiscover,
applicator: apply.NewAPIApplicator(mgr.GetClient()),
appRevisionLimit: args.AppRevisionLimit,
Client: mgr.GetClient(),
Scheme: mgr.GetScheme(),
Recorder: event.NewAPIRecorder(mgr.GetEventRecorderFor("Application")),
dm: args.DiscoveryMapper,
pd: args.PackageDiscover,
applicator: apply.NewAPIApplicator(mgr.GetClient()),
appRevisionLimit: args.AppRevisionLimit,
concurrentReconciles: args.ConcurrentReconciles,
}
return reconciler.SetupWithManager(mgr)
}

View File

@@ -283,9 +283,8 @@ var _ = Describe("Test finalizer related func", func() {
UID: rt.UID}
meta.AddFinalizer(&app.ObjectMeta, resourceTrackerFinalizer)
handler = appHandler{
r: reconciler,
app: app,
logger: reconciler.Log.WithValues("application", "finalizer-func-test"),
r: reconciler,
app: app,
}
need, err := handler.removeResourceTracker(ctx)
Expect(err).Should(BeNil())
@@ -304,9 +303,8 @@ var _ = Describe("Test finalizer related func", func() {
It("Test finalizeResourceTracker func without need ", func() {
app := getApp("app-4", namespace, "worker")
handler = appHandler{
r: reconciler,
app: app,
logger: reconciler.Log.WithValues("application", "finalizer-func-test"),
r: reconciler,
app: app,
}
need, err := handler.removeResourceTracker(ctx)
Expect(err).Should(BeNil())

View File

@@ -24,9 +24,7 @@ import (
"time"
runtimev1alpha1 "github.com/crossplane/crossplane-runtime/apis/core/v1alpha1"
"github.com/crossplane/crossplane-runtime/pkg/logging"
"github.com/crossplane/crossplane-runtime/pkg/meta"
"github.com/go-logr/logr"
terraformtypes "github.com/oam-dev/terraform-controller/api/types"
terraformapi "github.com/oam-dev/terraform-controller/api/v1beta1"
"github.com/pkg/errors"
@@ -80,7 +78,6 @@ type appHandler struct {
r *Reconciler
app *v1beta1.Application
appfile *appfile.Appfile
logger logr.Logger
inplace bool
isNewRevision bool
revisionHash string
@@ -99,7 +96,7 @@ func (h *appHandler) handleErr(err error) (ctrl.Result, error) {
return ctrl.Result{}, nil
}
if nerr != nil {
h.logger.Error(nerr, "[Update] application status")
klog.InfoS("Failed to update application status", "err", nerr)
}
return ctrl.Result{
RequeueAfter: time.Second * 10,
@@ -265,33 +262,32 @@ func (h *appHandler) statusAggregate(appFile *appfile.Appfile) ([]common.Applica
}
}
var traitStatusList []common.ApplicationTraitStatus
for _, tr := range wl.Traits {
if err := tr.EvalContext(pCtx); err != nil {
return nil, false, errors.WithMessagef(err, "app=%s, comp=%s, trait=%s, evaluate context error", appFile.Name, wl.Name, tr.Name)
}
}
var traitStatusList []common.ApplicationTraitStatus
for _, trait := range wl.Traits {
var traitStatus = common.ApplicationTraitStatus{
Type: trait.Name,
Type: tr.Name,
Healthy: true,
}
traitHealth, err := trait.EvalHealth(pCtx, h.r, h.app.Namespace)
traitHealth, err := tr.EvalHealth(pCtx, h.r, h.app.Namespace)
if err != nil {
return nil, false, errors.WithMessagef(err, "app=%s, comp=%s, trait=%s, check health error", appFile.Name, wl.Name, trait.Name)
return nil, false, errors.WithMessagef(err, "app=%s, comp=%s, trait=%s, check health error", appFile.Name, wl.Name, tr.Name)
}
if !traitHealth {
// TODO(wonderflow): we should add a custom way to let the template say why it's unhealthy, only a bool flag is not enough
traitStatus.Healthy = false
healthy = false
}
traitStatus.Message, err = trait.EvalStatus(pCtx, h.r, h.app.Namespace)
traitStatus.Message, err = tr.EvalStatus(pCtx, h.r, h.app.Namespace)
if err != nil {
return nil, false, errors.WithMessagef(err, "app=%s, comp=%s, trait=%s, evaluate status message error", appFile.Name, wl.Name, trait.Name)
return nil, false, errors.WithMessagef(err, "app=%s, comp=%s, trait=%s, evaluate status message error", appFile.Name, wl.Name, tr.Name)
}
traitStatusList = append(traitStatusList, traitStatus)
}
status.Traits = traitStatusList
status.Scopes = generateScopeReference(wl.Scopes)
appStatus = append(appStatus, status)
@@ -316,7 +312,7 @@ func (h *appHandler) createOrUpdateComponent(ctx context.Context, comp *v1alpha2
if err = h.r.Create(ctx, comp); err != nil {
return "", err
}
h.logger.Info("Created a new component", "component name", comp.GetName())
klog.InfoS("Created a new component", "component", klog.KObj(comp))
} else {
// remember the revision if there is a previous component
if curComp.Status.LatestRevision != nil {
@@ -326,7 +322,7 @@ func (h *appHandler) createOrUpdateComponent(ctx context.Context, comp *v1alpha2
if err := h.r.Update(ctx, comp); err != nil {
return "", err
}
h.logger.Info("Updated a component", "component name", comp.GetName())
klog.InfoS("Updated a component", "component", klog.KObj(comp))
}
// remove the object from the raw extension before we can compare with the existing componentRevision whose
// object is persisted as Raw data after going through api server
@@ -337,19 +333,18 @@ func (h *appHandler) createOrUpdateComponent(ctx context.Context, comp *v1alpha2
updatedComp.Spec.Helm.Repository.Object = nil
}
if len(preRevisionName) != 0 {
needNewRevision, err := utils.CompareWithRevision(ctx, h.r,
logging.NewLogrLogger(h.logger), compName, compNameSpace, preRevisionName, &updatedComp.Spec)
needNewRevision, err := utils.CompareWithRevision(ctx, h.r, compName, compNameSpace, preRevisionName, &updatedComp.Spec)
if err != nil {
return "", errors.Wrap(err, fmt.Sprintf("compare with existing controllerRevision %s failed",
preRevisionName))
}
if !needNewRevision {
h.logger.Info("no need to wait for a new component revision", "component name", updatedComp.GetName(),
klog.InfoS("No need to wait for a new component revision", "component", klog.KObj(updatedComp),
"revision", preRevisionName)
return preRevisionName, nil
}
}
h.logger.Info("wait for a new component revision", "component name", compName,
klog.InfoS("Wait for a new component revision", "component name", compName,
"previous revision", preRevisionName)
// get the new component revision that contains the component with retry
checkForRevision := func() (bool, error) {
@@ -361,7 +356,7 @@ func (h *appHandler) createOrUpdateComponent(ctx context.Context, comp *v1alpha2
if curComp.Status.LatestRevision == nil || curComp.Status.LatestRevision.Name == preRevisionName {
return false, nil
}
needNewRevision, err := utils.CompareWithRevision(ctx, h.r, logging.NewLogrLogger(h.logger), compName,
needNewRevision, err := utils.CompareWithRevision(ctx, h.r, compName,
compNameSpace, curComp.Status.LatestRevision.Name, &updatedComp.Spec)
if err != nil {
// retry no matter what
@@ -371,7 +366,7 @@ func (h *appHandler) createOrUpdateComponent(ctx context.Context, comp *v1alpha2
// end the loop if we find the revision
if !needNewRevision {
curRevisionName = curComp.Status.LatestRevision.Name
h.logger.Info("get a matching component revision", "component name", compName,
klog.InfoS("Get a matching component revision", "component name", compName,
"current revision", curRevisionName)
}
return !needNewRevision, nil
@@ -419,13 +414,13 @@ func (h *appHandler) createOrUpdateAppContext(ctx context.Context, owners []meta
if !apierrors.IsNotFound(err) {
return err
}
klog.InfoS("create a new appContext", "application name",
klog.InfoS("Create a new appContext", "application name",
appContext.GetName(), "revision it points to", appContext.Spec.ApplicationRevisionName)
return h.r.Create(ctx, &appContext)
}
// we don't need to create another appConfig
klog.InfoS("replace the existing appContext", "application name", appContext.GetName(),
klog.InfoS("Replace the existing appContext", "appContext", klog.KObj(&appContext),
"revision it points to", appContext.Spec.ApplicationRevisionName)
appContext.ResourceVersion = curAppContext.ResourceVersion
return h.r.Update(ctx, &appContext)
@@ -539,7 +534,7 @@ func (h *appHandler) removeResourceTracker(ctx context.Context) (bool, error) {
if err != nil {
return false, err
}
h.logger.Info("delete application resourceTracker")
klog.Info("Delete application resourceTracker")
meta.RemoveFinalizer(h.app, resourceTrackerFinalizer)
h.app.Status.ResourceTracker = nil
return true, nil

View File

@@ -22,7 +22,6 @@ import (
"strings"
"time"
"github.com/crossplane/crossplane-runtime/pkg/logging"
"github.com/ghodss/yaml"
terraformtypes "github.com/oam-dev/terraform-controller/api/types"
terraformapi "github.com/oam-dev/terraform-controller/api/v1beta1"
@@ -121,9 +120,8 @@ var _ = Describe("Test Application apply", func() {
}},
}
handler = appHandler{
r: reconciler,
app: app,
logger: reconciler.Log.WithValues("application", "unit-test"),
r: reconciler,
app: app,
}
By("Create the Namespace for test")
Expect(k8sClient.Create(ctx, &ns)).Should(Succeed())
@@ -184,7 +182,7 @@ var _ = Describe("Test Application apply", func() {
By("verify that the revision is the set correctly and newRevision is true")
Expect(err).ShouldNot(HaveOccurred())
// verify the revision actually contains the right component
Expect(utils.CompareWithRevision(ctx, handler.r, logging.NewLogrLogger(handler.logger), component.GetName(),
Expect(utils.CompareWithRevision(ctx, handler.r, component.GetName(),
component.GetNamespace(), revision, &component.Spec)).Should(BeTrue())
preRevision := revision
@@ -203,7 +201,7 @@ var _ = Describe("Test Application apply", func() {
By("verify that the revision is changed and newRevision is true")
Expect(err).ShouldNot(HaveOccurred())
Expect(revision).ShouldNot(BeIdenticalTo(preRevision))
Expect(utils.CompareWithRevision(ctx, handler.r, logging.NewLogrLogger(handler.logger), component.GetName(),
Expect(utils.CompareWithRevision(ctx, handler.r, component.GetName(),
component.GetNamespace(), revision, &component.Spec)).Should(BeTrue())
// revision increased
Expect(strings.Compare(revision, preRevision) > 0).Should(BeTrue())
@@ -251,9 +249,8 @@ var _ = Describe("Test applyHelmModuleResources", func() {
ctx = context.TODO()
app = &v1beta1.Application{}
handler = appHandler{
r: reconciler,
app: app,
logger: reconciler.Log.WithValues("application", "unit-test"),
r: reconciler,
app: app,
}
handler.r.applicator = apply.NewAPIApplicator(reconciler.Client)
})

View File

@@ -24,6 +24,7 @@ import (
apiequality "k8s.io/apimachinery/pkg/api/equality"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/klog/v2"
"k8s.io/utils/pointer"
"sigs.k8s.io/controller-runtime/pkg/client"
@@ -53,11 +54,11 @@ func (h *appHandler) UpdateRevisionStatus(ctx context.Context, revName, hash str
}
// make sure that we persist the latest revision first
if err := h.r.UpdateStatus(ctx, h.app); err != nil {
h.logger.Error(err, "update the latest appConfig revision to status", "application name", h.app.GetName(),
"latest revision", revName)
klog.InfoS("Failed to update the latest appConfig revision to status", "application", klog.KObj(h.app),
"latest revision", revName, "err", err)
return err
}
h.logger.Info("recorded the latest appConfig revision", "application name", h.app.GetName(),
klog.InfoS("Recorded the latest appConfig revision", "application", klog.KObj(h.app),
"latest revision", revName)
return nil
}
@@ -130,7 +131,8 @@ func (h *appHandler) gatherRevisionSpec() (*v1beta1.ApplicationRevision, string,
}
appRevisionHash, err := ComputeAppRevisionHash(appRev)
if err != nil {
h.logger.Error(err, "compute hash of appRevision for application", "application name", h.app.GetName())
klog.InfoS("Failed to compute hash of appRevision for application", "application", klog.KObj(h.app),
"err", err)
return appRev, "", err
}
return appRev, appRevisionHash, nil
@@ -153,8 +155,8 @@ func (h *appHandler) compareWithLastRevisionSpec(ctx context.Context, newAppRevi
lastAppRevision := &v1beta1.ApplicationRevision{}
if err := h.r.Get(ctx, client.ObjectKey{Name: h.app.Status.LatestRevision.Name,
Namespace: h.app.Namespace}, lastAppRevision); err != nil {
h.logger.Error(err, "get the last appRevision from K8s", "application name",
h.app.GetName(), "revision", h.app.Status.LatestRevision.Name)
klog.InfoS("Failed to get the last appRevision from K8s", "application",
klog.KObj(h.app), "revision", h.app.Status.LatestRevision.Name, "err", err)
return false, errors.Wrapf(err, "fail to get applicationRevision %s", h.app.Status.LatestRevision.Name)
}
if DeepEqualRevision(lastAppRevision, newAppRevision) {
@@ -315,7 +317,7 @@ func cleanUpApplicationRevision(ctx context.Context, h *appHandler) error {
if needKill <= 0 {
return nil
}
h.logger.Info("application controller cleanup old appRevisions", "needKillNum", needKill)
klog.InfoS("Application controller cleanup old appRevisions", "needKillNum", needKill)
sortedRevision := appRevisionList.Items
sort.Sort(historiesByRevision(sortedRevision))

View File

@@ -387,9 +387,8 @@ var _ = Describe("Test gatherUsingAppRevision func", func() {
}
Expect(k8sClient.Create(ctx, appContext)).Should(BeNil())
handler := appHandler{
r: reconciler,
app: app,
logger: reconciler.Log.WithValues("application", "gatherUsingAppRevision-func-test"),
r: reconciler,
app: app,
}
Eventually(func() error {
using, err := gatherUsingAppRevision(ctx, &handler)

View File

@@ -146,9 +146,8 @@ var _ = Describe("test generate revision ", func() {
appRevision2.Name = "appRevision2"
handler = appHandler{
r: reconciler,
app: &app,
logger: reconciler.Log.WithValues("apply", "unit-test"),
r: reconciler,
app: &app,
}
})

View File

@@ -25,7 +25,6 @@ import (
"time"
"github.com/crossplane/crossplane-runtime/pkg/event"
"github.com/crossplane/crossplane-runtime/pkg/logging"
"github.com/go-logr/logr"
terraformv1beta1 "github.com/oam-dev/terraform-controller/api/v1beta1"
. "github.com/onsi/ginkgo"
@@ -131,7 +130,6 @@ var _ = BeforeSuite(func(done Done) {
Expect(err).To(BeNil())
reconciler = &Reconciler{
Client: k8sClient,
Log: ctrl.Log.WithName("Application-Test"),
Scheme: testScheme,
dm: dm,
pd: pd,
@@ -153,7 +151,6 @@ var _ = BeforeSuite(func(done Done) {
For(&v1alpha2.Component{}).
Watches(&source.Kind{Type: &v1alpha2.Component{}}, &applicationconfiguration.ComponentHandler{
Client: ctlManager.GetClient(),
Logger: logging.NewLogrLogger(ctrl.Log.WithName("application-testsuite-component-handler")),
RevisionLimit: 100,
CustomRevisionHookURL: "",
}).Complete(&NoOpReconciler{

View File

@@ -26,7 +26,6 @@ import (
"github.com/crossplane/crossplane-runtime/apis/core/v1alpha1"
"github.com/crossplane/crossplane-runtime/pkg/event"
"github.com/crossplane/crossplane-runtime/pkg/fieldpath"
"github.com/crossplane/crossplane-runtime/pkg/logging"
"github.com/crossplane/crossplane-runtime/pkg/meta"
"github.com/crossplane/crossplane-runtime/pkg/resource"
"github.com/pkg/errors"
@@ -39,10 +38,12 @@ import (
"k8s.io/klog/v2"
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/controller"
"sigs.k8s.io/controller-runtime/pkg/reconcile"
"github.com/oam-dev/kubevela/apis/core.oam.dev/v1alpha2"
oamtype "github.com/oam-dev/kubevela/apis/types"
"github.com/oam-dev/kubevela/pkg/controller/common"
core "github.com/oam-dev/kubevela/pkg/controller/core.oam.dev"
"github.com/oam-dev/kubevela/pkg/oam"
"github.com/oam-dev/kubevela/pkg/oam/discoverymapper"
@@ -50,10 +51,7 @@ import (
"github.com/oam-dev/kubevela/pkg/utils/apply"
)
const (
reconcileTimeout = 1 * time.Minute
dependCheckWait = 10 * time.Second
)
const reconcileTimeout = 1 * time.Minute
// Reconcile error strings.
const (
@@ -84,16 +82,21 @@ const (
)
// Setup adds a controller that reconciles ApplicationConfigurations.
func Setup(mgr ctrl.Manager, args core.Args, l logging.Logger) error {
func Setup(mgr ctrl.Manager, args core.Args) error {
name := "oam/" + strings.ToLower(v1alpha2.ApplicationConfigurationGroupKind)
return ctrl.NewControllerManagedBy(mgr).
builder := ctrl.NewControllerManagedBy(mgr)
builder.WithOptions(controller.Options{
MaxConcurrentReconciles: args.ConcurrentReconciles,
})
return builder.
Named(name).
For(&v1alpha2.ApplicationConfiguration{}).
Complete(NewReconciler(mgr, args.DiscoveryMapper,
l.WithValues("controller", name),
WithRecorder(event.NewAPIRecorder(mgr.GetEventRecorderFor(name))),
WithApplyOnceOnlyMode(args.ApplyMode)))
WithApplyOnceOnlyMode(args.ApplyMode),
WithDependCheckWait(args.DependCheckWait)))
}
// An OAMApplicationReconciler reconciles OAM ApplicationConfigurations by rendering and
@@ -104,11 +107,11 @@ type OAMApplicationReconciler struct {
workloads WorkloadApplicator
gc GarbageCollector
scheme *runtime.Scheme
log logging.Logger
record event.Recorder
preHooks map[string]ControllerHooks
postHooks map[string]ControllerHooks
applyOnceOnlyMode core.ApplyOnceOnlyMode
dependCheckWait time.Duration
}
// A ReconcilerOption configures a Reconciler.
@@ -166,9 +169,16 @@ func WithApplyOnceOnlyMode(mode core.ApplyOnceOnlyMode) ReconcilerOption {
}
}
// WithDependCheckWait set depend check wait
func WithDependCheckWait(dependCheckWait time.Duration) ReconcilerOption {
return func(r *OAMApplicationReconciler) {
r.dependCheckWait = dependCheckWait
}
}
// NewReconciler returns an OAMApplicationReconciler that reconciles ApplicationConfigurations
// by rendering and instantiating their Components and Traits.
func NewReconciler(m ctrl.Manager, dm discoverymapper.DiscoveryMapper, log logging.Logger, o ...ReconcilerOption) *OAMApplicationReconciler {
func NewReconciler(m ctrl.Manager, dm discoverymapper.DiscoveryMapper, o ...ReconcilerOption) *OAMApplicationReconciler {
r := &OAMApplicationReconciler{
client: m.GetClient(),
scheme: m.GetScheme(),
@@ -185,7 +195,6 @@ func NewReconciler(m ctrl.Manager, dm discoverymapper.DiscoveryMapper, log loggi
dm: dm,
},
gc: GarbageCollectorFn(eligible),
log: log,
record: event.NewNopRecorder(),
preHooks: make(map[string]ControllerHooks),
postHooks: make(map[string]ControllerHooks),
@@ -206,8 +215,7 @@ func NewReconciler(m ctrl.Manager, dm discoverymapper.DiscoveryMapper, log loggi
// Reconcile an OAM ApplicationConfigurations by rendering and instantiating its
// Components and Traits.
func (r *OAMApplicationReconciler) Reconcile(req reconcile.Request) (reconcile.Result, error) {
log := r.log.WithValues("request", req)
log.Debug("Reconciling")
klog.InfoS("Reconcile applicationConfiguration", "applicationConfiguration", klog.KRef(req.Namespace, req.Name))
ctx, cancel := context.WithTimeout(context.Background(), reconcileTimeout)
defer cancel()
@@ -224,13 +232,13 @@ func (r *OAMApplicationReconciler) Reconcile(req reconcile.Request) (reconcile.R
ctx = util.SetNamespaceInCtx(ctx, ac.Namespace)
if ac.ObjectMeta.DeletionTimestamp.IsZero() {
if registerFinalizers(ac) {
log.Debug("Register new finalizers", "finalizers", ac.ObjectMeta.Finalizers)
klog.V(common.LogDebug).InfoS("Register new finalizers", "finalizers", ac.ObjectMeta.Finalizers)
return reconcile.Result{}, errors.Wrap(r.client.Update(ctx, ac), errUpdateAppConfigStatus)
}
} else {
if err := r.workloads.Finalize(ctx, ac); err != nil {
log.Debug("Failed to finalize workloads", "workloads status", ac.Status.Workloads,
"error", err)
klog.InfoS("Failed to finalize workloads", "workloads status", ac.Status.Workloads,
"err", err)
r.record.Event(ac, event.Warning(reasonCannotFinalizeWorkloads, err))
ac.SetConditions(v1alpha1.ReconcileError(errors.Wrap(err, errFinalizeWorkloads)))
return reconcile.Result{}, errors.Wrap(r.UpdateStatus(ctx, ac), errUpdateAppConfigStatus)
@@ -238,7 +246,7 @@ func (r *OAMApplicationReconciler) Reconcile(req reconcile.Request) (reconcile.R
return reconcile.Result{}, errors.Wrap(r.client.Update(ctx, ac), errUpdateAppConfigStatus)
}
reconResult := r.ACReconcile(ctx, ac, log)
reconResult := r.ACReconcile(ctx, ac)
// always update ac status and set the error
err := errors.Wrap(r.UpdateStatus(ctx, ac), errUpdateAppConfigStatus)
// use the controller build-in backoff mechanism if an error occurs
@@ -249,17 +257,16 @@ func (r *OAMApplicationReconciler) Reconcile(req reconcile.Request) (reconcile.R
}
// ACReconcile contains all the reconcile logic of an AC, it can be used by other controller
func (r *OAMApplicationReconciler) ACReconcile(ctx context.Context, ac *v1alpha2.ApplicationConfiguration,
log logging.Logger) (result reconcile.Result) {
func (r *OAMApplicationReconciler) ACReconcile(ctx context.Context, ac *v1alpha2.ApplicationConfiguration) (result reconcile.Result) {
acPatch := ac.DeepCopy()
// execute the posthooks at the end no matter what
defer func() {
updateObservedGeneration(ac)
for name, hook := range r.postHooks {
exeResult, err := hook.Exec(ctx, ac, log)
exeResult, err := hook.Exec(ctx, ac)
if err != nil {
log.Debug("Failed to execute post-hooks", "hook name", name, "error", err,
klog.InfoS("Failed to execute post-hooks", "hook name", name, "err", err,
"requeue-after", result.RequeueAfter)
r.record.Event(ac, event.Warning(reasonCannotExecutePosthooks, err))
ac.SetConditions(v1alpha1.ReconcileError(errors.Wrap(err, errExecutePosthooks)))
@@ -273,9 +280,9 @@ func (r *OAMApplicationReconciler) ACReconcile(ctx context.Context, ac *v1alpha2
// execute the prehooks
for name, hook := range r.preHooks {
result, err := hook.Exec(ctx, ac, log)
result, err := hook.Exec(ctx, ac)
if err != nil {
log.Debug("Failed to execute pre-hooks", "hook name", name, "error", err, "requeue-after", result.RequeueAfter)
klog.InfoS("Failed to execute pre-hooks", "hook name", name, "requeue-after", result.RequeueAfter, "err", err)
r.record.Event(ac, event.Warning(reasonCannotExecutePrehooks, err))
ac.SetConditions(v1alpha1.ReconcileError(errors.Wrap(err, errExecutePrehooks)))
return result
@@ -283,13 +290,13 @@ func (r *OAMApplicationReconciler) ACReconcile(ctx context.Context, ac *v1alpha2
r.record.Event(ac, event.Normal(reasonExecutePrehook, "Successfully executed a prehook", "prehook name ", name))
}
log = log.WithValues("uid", ac.GetUID(), "version", ac.GetResourceVersion())
klog.InfoS("ApplicationConfiguration", "uid", ac.GetUID(), "version", ac.GetResourceVersion())
// we have special logics for application generated applicationConfiguration
if isControlledByApp(ac) {
if ac.GetAnnotations()[oam.AnnotationAppRevision] == strconv.FormatBool(true) {
msg := "Encounter an application revision, no need to reconcile"
log.Info(msg)
klog.Info(msg)
r.record.Event(ac, event.Normal(reasonRevision, msg))
ac.SetConditions(v1alpha1.Unavailable())
ac.Status.RollingStatus = oamtype.InactiveAfterRollingCompleted
@@ -300,18 +307,18 @@ func (r *OAMApplicationReconciler) ACReconcile(ctx context.Context, ac *v1alpha2
workloads, depStatus, err := r.components.Render(ctx, ac)
if err != nil {
log.Info("Cannot render components", "error", err)
klog.InfoS("Cannot render components", "err", err)
r.record.Event(ac, event.Warning(reasonCannotRenderComponents, err))
ac.SetConditions(v1alpha1.ReconcileError(errors.Wrap(err, errRenderComponents)))
return reconcile.Result{}
}
log.Debug("Successfully rendered components", "workloads", len(workloads))
klog.V(common.LogDebug).InfoS("Successfully rendered components", "workloads", len(workloads))
r.record.Event(ac, event.Normal(reasonRenderComponents, "Successfully rendered components",
"workloads", strconv.Itoa(len(workloads))))
applyOpts := []apply.ApplyOption{apply.MustBeControllableBy(ac.GetUID()), applyOnceOnly(ac, r.applyOnceOnlyMode, log)}
applyOpts := []apply.ApplyOption{apply.MustBeControllableBy(ac.GetUID()), applyOnceOnly(ac, r.applyOnceOnlyMode)}
if err := r.workloads.Apply(ctx, ac.Status.Workloads, workloads, applyOpts...); err != nil {
log.Debug("Cannot apply workload", "error", err)
klog.InfoS("Cannot apply workload", "err", err)
r.record.Event(ac, event.Warning(reasonCannotApplyComponents, err))
ac.SetConditions(v1alpha1.ReconcileError(errors.Wrap(err, errApplyComponents)))
return reconcile.Result{}
@@ -322,7 +329,7 @@ func (r *OAMApplicationReconciler) ACReconcile(ctx context.Context, ac *v1alpha2
klog.InfoS("mark the ac rolling status as templated", "appConfig", klog.KRef(ac.Namespace, ac.Name))
ac.Status.RollingStatus = oamtype.RollingTemplated
}
log.Debug("Successfully applied components", "workloads", len(workloads))
klog.V(common.LogDebug).InfoS("Successfully applied components", "workloads", len(workloads))
r.record.Event(ac, event.Normal(reasonApplyComponents, "Successfully applied components",
"workloads", strconv.Itoa(len(workloads))))
@@ -333,24 +340,24 @@ func (r *OAMApplicationReconciler) ACReconcile(ctx context.Context, ac *v1alpha2
for _, e := range r.gc.Eligible(ac.GetNamespace(), ac.Status.Workloads, workloads) {
// https://github.com/golang/go/wiki/CommonMistakes#using-reference-to-loop-iterator-variable
e := e
log := log.WithValues("kind", e.GetKind(), "name", e.GetName())
klog.InfoS("Collect garbage ", "resource", klog.KRef(e.GetNamespace(), e.GetName()),
"apiVersion", e.GetAPIVersion(), "kind", e.GetKind())
record := r.record.WithAnnotations("kind", e.GetKind(), "name", e.GetName())
err := r.confirmDeleteOnApplyOnceMode(ctx, ac.GetNamespace(), &e)
if err != nil {
log.Debug("confirm component can't be garbage collected", "error", err)
klog.InfoS("Confirm component can't be garbage collected", "err", err)
record.Event(ac, event.Warning(reasonCannotGGComponents, err))
ac.SetConditions(v1alpha1.ReconcileError(errors.Wrap(err, errGCComponent)))
return reconcile.Result{}
}
if err := r.client.Delete(ctx, &e); resource.IgnoreNotFound(err) != nil {
log.Debug("Cannot garbage collect component", "error", err)
klog.InfoS("Cannot garbage collect component", "err", err)
record.Event(ac, event.Warning(reasonCannotGGComponents, err))
ac.SetConditions(v1alpha1.ReconcileError(errors.Wrap(err, errGCComponent)))
return reconcile.Result{}
}
log.Debug("Garbage collected resource")
klog.V(common.LogDebug).Info("Garbage collected resource")
record.Event(ac, event.Normal(reasonGGComponent, "Successfully garbage collected component"))
}
@@ -360,7 +367,7 @@ func (r *OAMApplicationReconciler) ACReconcile(ctx context.Context, ac *v1alpha2
ac.Status.Dependency = v1alpha2.DependencyStatus{}
var waitTime time.Duration
if len(depStatus.Unsatisfied) != 0 {
waitTime = dependCheckWait
waitTime = r.dependCheckWait
ac.Status.Dependency = *depStatus
}
@@ -689,7 +696,7 @@ func (e *GenerationUnchanged) Error() string {
// applyOnceOnly is an ApplyOption that controls the applying mechanism for workload and trait.
// More detail refers to the ApplyOnceOnlyMode type annotation
func applyOnceOnly(ac *v1alpha2.ApplicationConfiguration, mode core.ApplyOnceOnlyMode, log logging.Logger) apply.ApplyOption {
func applyOnceOnly(ac *v1alpha2.ApplicationConfiguration, mode core.ApplyOnceOnlyMode) apply.ApplyOption {
return func(_ context.Context, existing, desired runtime.Object) error {
if mode == core.ApplyOnceOnlyOff {
return nil
@@ -705,7 +712,7 @@ func applyOnceOnly(ac *v1alpha2.ApplicationConfiguration, mode core.ApplyOnceOnl
dLabels[oam.LabelOAMResourceType] != oam.ResourceTypeTrait {
// this ApplyOption only works for workload and trait
// skip if the resource is not workload nor trait, e.g., scope
log.Info("ignore apply only once check, because resourceType is not workload or trait",
klog.InfoS("Ignore apply only once check, because resourceType is not workload or trait",
oam.LabelOAMResourceType, dLabels[oam.LabelOAMResourceType])
return nil
}
@@ -714,7 +721,7 @@ func applyOnceOnly(ac *v1alpha2.ApplicationConfiguration, mode core.ApplyOnceOnl
if existing == nil {
if mode != core.ApplyOnceOnlyForce {
// non-force mode will always create the resource if not exist.
log.Info("apply only once with mode:" + string(mode) + ", but old resource not exist, will create a new one")
klog.InfoS("Apply only once with mode:" + string(mode) + ", but old resource not exist, will create a new one")
return nil
}
@@ -759,7 +766,7 @@ func applyOnceOnly(ac *v1alpha2.ApplicationConfiguration, mode core.ApplyOnceOnl
if createdBefore {
message = "apply only once with mode: force, but resource updated, will create new"
}
log.Info(message, "appConfig", ac.Name, "gvk", desired.GetObjectKind().GroupVersionKind(), "name", d.GetName(),
klog.InfoS(message, "appConfig", ac.Name, "gvk", desired.GetObjectKind().GroupVersionKind(), "name", d.GetName(),
"resourceType", dLabels[oam.LabelOAMResourceType], "appliedCompRevision", appliedRevision,
"labeledCompRevision", dLabels[oam.LabelAppComponentRevision],
"appliedGeneration", appliedGeneration, "labeledGeneration", dAnnots[oam.AnnotationAppGeneration])
@@ -780,7 +787,7 @@ func applyOnceOnly(ac *v1alpha2.ApplicationConfiguration, mode core.ApplyOnceOnl
// that means its spec is not changed
if (e.GetAnnotations()[oam.AnnotationAppGeneration] != dAnnots[oam.AnnotationAppGeneration]) ||
(eLabels[oam.LabelAppComponentRevision] != dLabels[oam.LabelAppComponentRevision]) {
log.Info("apply only once with mode: "+string(mode)+", but new generation or revision created, will create new",
klog.InfoS("Apply only once with mode: "+string(mode)+", but new generation or revision created, will create new",
oam.AnnotationAppGeneration, e.GetAnnotations()[oam.AnnotationAppGeneration]+"/"+dAnnots[oam.AnnotationAppGeneration],
oam.LabelAppComponentRevision, eLabels[oam.LabelAppComponentRevision]+"/"+dLabels[oam.LabelAppComponentRevision])
// its spec is changed, so apply new configuration to it

View File

@@ -24,7 +24,6 @@ import (
"time"
runtimev1alpha1 "github.com/crossplane/crossplane-runtime/apis/core/v1alpha1"
"github.com/crossplane/crossplane-runtime/pkg/logging"
"github.com/crossplane/crossplane-runtime/pkg/test"
"github.com/google/go-cmp/cmp"
"github.com/google/go-cmp/cmp/cmpopts"
@@ -182,6 +181,7 @@ func TestReconciler(t *testing.T) {
WithRenderer(ComponentRenderFn(func(_ context.Context, _ *v1alpha2.ApplicationConfiguration) ([]Workload, *v1alpha2.DependencyStatus, error) {
return nil, &v1alpha2.DependencyStatus{}, errBoom
})),
WithDependCheckWait(10 * time.Second),
},
},
want: want{
@@ -212,6 +212,7 @@ func TestReconciler(t *testing.T) {
_ []Workload, _ ...apply.ApplyOption) error {
return errBoom
}}),
WithDependCheckWait(10 * time.Second),
},
},
want: want{
@@ -246,6 +247,7 @@ func TestReconciler(t *testing.T) {
WithGarbageCollector(GarbageCollectorFn(func(_ string, _ []v1alpha2.WorkloadStatus, _ []Workload) []unstructured.Unstructured {
return []unstructured.Unstructured{*workload}
})),
WithDependCheckWait(10 * time.Second),
},
},
want: want{
@@ -310,10 +312,11 @@ func TestReconciler(t *testing.T) {
WithGarbageCollector(GarbageCollectorFn(func(_ string, _ []v1alpha2.WorkloadStatus, _ []Workload) []unstructured.Unstructured {
return []unstructured.Unstructured{*trait}
})),
WithDependCheckWait(10 * time.Second),
},
},
want: want{
result: reconcile.Result{RequeueAfter: dependCheckWait},
result: reconcile.Result{RequeueAfter: 10 * time.Second},
},
},
"FailedPreHook": {
@@ -346,15 +349,16 @@ func TestReconciler(t *testing.T) {
WithGarbageCollector(GarbageCollectorFn(func(_ string, _ []v1alpha2.WorkloadStatus, _ []Workload) []unstructured.Unstructured {
return []unstructured.Unstructured{*trait}
})),
WithPrehook("preHookSuccess", ControllerHooksFn(func(ctx context.Context, ac *v1alpha2.ApplicationConfiguration, logger logging.Logger) (reconcile.Result, error) {
WithPrehook("preHookSuccess", ControllerHooksFn(func(ctx context.Context, ac *v1alpha2.ApplicationConfiguration) (reconcile.Result, error) {
return reconcile.Result{RequeueAfter: 15 * time.Second}, nil
})),
WithPrehook("preHookFailed", ControllerHooksFn(func(ctx context.Context, ac *v1alpha2.ApplicationConfiguration, logger logging.Logger) (reconcile.Result, error) {
WithPrehook("preHookFailed", ControllerHooksFn(func(ctx context.Context, ac *v1alpha2.ApplicationConfiguration) (reconcile.Result, error) {
return reconcile.Result{RequeueAfter: 15 * time.Second}, errBoom
})),
WithPosthook("postHook", ControllerHooksFn(func(ctx context.Context, ac *v1alpha2.ApplicationConfiguration, logger logging.Logger) (reconcile.Result, error) {
WithPosthook("postHook", ControllerHooksFn(func(ctx context.Context, ac *v1alpha2.ApplicationConfiguration) (reconcile.Result, error) {
return reconcile.Result{}, nil
})),
WithDependCheckWait(10 * time.Second),
},
},
want: want{
@@ -420,12 +424,13 @@ func TestReconciler(t *testing.T) {
WithGarbageCollector(GarbageCollectorFn(func(_ string, _ []v1alpha2.WorkloadStatus, _ []Workload) []unstructured.Unstructured {
return []unstructured.Unstructured{*trait}
})),
WithPosthook("preHookSuccess", ControllerHooksFn(func(ctx context.Context, ac *v1alpha2.ApplicationConfiguration, logger logging.Logger) (reconcile.Result, error) {
WithPosthook("preHookSuccess", ControllerHooksFn(func(ctx context.Context, ac *v1alpha2.ApplicationConfiguration) (reconcile.Result, error) {
return reconcile.Result{}, nil
})),
WithPosthook("preHookFailed", ControllerHooksFn(func(ctx context.Context, ac *v1alpha2.ApplicationConfiguration, logger logging.Logger) (reconcile.Result, error) {
WithPosthook("preHookFailed", ControllerHooksFn(func(ctx context.Context, ac *v1alpha2.ApplicationConfiguration) (reconcile.Result, error) {
return reconcile.Result{RequeueAfter: 15 * time.Second}, errBoom
})),
WithDependCheckWait(10 * time.Second),
},
},
want: want{
@@ -465,18 +470,19 @@ func TestReconciler(t *testing.T) {
WithGarbageCollector(GarbageCollectorFn(func(_ string, _ []v1alpha2.WorkloadStatus, _ []Workload) []unstructured.Unstructured {
return []unstructured.Unstructured{*trait}
})),
WithPrehook("preHookSuccess", ControllerHooksFn(func(ctx context.Context, ac *v1alpha2.ApplicationConfiguration, logger logging.Logger) (reconcile.Result, error) {
WithPrehook("preHookSuccess", ControllerHooksFn(func(ctx context.Context, ac *v1alpha2.ApplicationConfiguration) (reconcile.Result, error) {
return reconcile.Result{RequeueAfter: 15 * time.Second}, nil
})),
WithPrehook("preHookFailed", ControllerHooksFn(func(ctx context.Context, ac *v1alpha2.ApplicationConfiguration, logger logging.Logger) (reconcile.Result, error) {
WithPrehook("preHookFailed", ControllerHooksFn(func(ctx context.Context, ac *v1alpha2.ApplicationConfiguration) (reconcile.Result, error) {
return reconcile.Result{RequeueAfter: 15 * time.Second}, errBoom
})),
WithPosthook("preHookSuccess", ControllerHooksFn(func(ctx context.Context, ac *v1alpha2.ApplicationConfiguration, logger logging.Logger) (reconcile.Result, error) {
WithPosthook("preHookSuccess", ControllerHooksFn(func(ctx context.Context, ac *v1alpha2.ApplicationConfiguration) (reconcile.Result, error) {
return reconcile.Result{}, nil
})),
WithPosthook("preHookFailed", ControllerHooksFn(func(ctx context.Context, ac *v1alpha2.ApplicationConfiguration, logger logging.Logger) (reconcile.Result, error) {
WithPosthook("preHookFailed", ControllerHooksFn(func(ctx context.Context, ac *v1alpha2.ApplicationConfiguration) (reconcile.Result, error) {
return reconcile.Result{RequeueAfter: 15 * time.Second}, errBoom
})),
WithDependCheckWait(10 * time.Second),
},
},
want: want{
@@ -538,10 +544,10 @@ func TestReconciler(t *testing.T) {
WithGarbageCollector(GarbageCollectorFn(func(_ string, _ []v1alpha2.WorkloadStatus, _ []Workload) []unstructured.Unstructured {
return []unstructured.Unstructured{*trait}
})),
WithPrehook("preHook", ControllerHooksFn(func(ctx context.Context, ac *v1alpha2.ApplicationConfiguration, logger logging.Logger) (reconcile.Result, error) {
WithPrehook("preHook", ControllerHooksFn(func(ctx context.Context, ac *v1alpha2.ApplicationConfiguration) (reconcile.Result, error) {
return reconcile.Result{}, nil
})),
WithPosthook("postHook", ControllerHooksFn(func(ctx context.Context, ac *v1alpha2.ApplicationConfiguration, logger logging.Logger) (reconcile.Result, error) {
WithPosthook("postHook", ControllerHooksFn(func(ctx context.Context, ac *v1alpha2.ApplicationConfiguration) (reconcile.Result, error) {
return reconcile.Result{}, nil
})),
},
@@ -588,6 +594,9 @@ func TestReconciler(t *testing.T) {
MockStatusUpdate: test.NewMockStatusUpdateFn(nil),
},
},
o: []ReconcilerOption{
WithDependCheckWait(10 * time.Second),
},
},
want: want{
result: reconcile.Result{},
@@ -620,6 +629,9 @@ func TestReconciler(t *testing.T) {
MockStatusUpdate: test.NewMockStatusUpdateFn(nil),
},
},
o: []ReconcilerOption{
WithDependCheckWait(10 * time.Second),
},
},
want: want{
result: reconcile.Result{},
@@ -656,6 +668,7 @@ func TestReconciler(t *testing.T) {
WithApplicator(WorkloadApplyFns{FinalizeFn: func(ctx context.Context, ac *v1alpha2.ApplicationConfiguration) error {
return errBoom
}}),
WithDependCheckWait(10 * time.Second),
},
},
want: want{
@@ -666,7 +679,7 @@ func TestReconciler(t *testing.T) {
for name, tc := range cases {
t.Run(name, func(t *testing.T) {
r := NewReconciler(tc.args.m, nil, logging.NewNopLogger(), tc.args.o...)
r := NewReconciler(tc.args.m, nil, tc.args.o...)
got, err := r.Reconcile(reconcile.Request{})
if diff := cmp.Diff(tc.want.err, err, test.EquateErrors()); diff != "" {
@@ -1879,7 +1892,7 @@ func TestUpdateStatus(t *testing.T) {
},
}
r := NewReconciler(m, nil, logging.NewNopLogger())
r := NewReconciler(m, nil)
ac := &v1alpha2.ApplicationConfiguration{}
err := r.client.Get(context.Background(), types.NamespacedName{Name: "example-appconfig"}, ac)

View File

@@ -20,9 +20,6 @@ import (
"context"
"time"
"github.com/crossplane/crossplane-runtime/pkg/logging"
ctrl "sigs.k8s.io/controller-runtime"
"k8s.io/apimachinery/pkg/types"
. "github.com/onsi/ginkgo"
@@ -311,7 +308,7 @@ var _ = Describe("Test apply (workloads/traits) once only", func() {
When("ApplyOnceOnlyForce is enabled", func() {
It("tests the situation where workload is not applied at the first because of unsatisfied dependency",
func() {
componentHandler := &ComponentHandler{Client: k8sClient, RevisionLimit: 100, Logger: logging.NewLogrLogger(ctrl.Log.WithName("component-handler"))}
componentHandler := &ComponentHandler{Client: k8sClient, RevisionLimit: 100}
By("Enable ApplyOnceOnlyForce")
reconciler.applyOnceOnlyMode = core.ApplyOnceOnlyForce
@@ -461,7 +458,7 @@ var _ = Describe("Test apply (workloads/traits) once only", func() {
It("tests the situation where workload is not applied at the first because of unsatisfied dependency and revision specified",
func() {
componentHandler := &ComponentHandler{Client: k8sClient, RevisionLimit: 100, Logger: logging.NewLogrLogger(ctrl.Log.WithName("component-handler"))}
componentHandler := &ComponentHandler{Client: k8sClient, RevisionLimit: 100}
By("Enable ApplyOnceOnlyForce")
reconciler.applyOnceOnlyMode = core.ApplyOnceOnlyForce

View File

@@ -21,7 +21,6 @@ import (
"fmt"
"sort"
"github.com/crossplane/crossplane-runtime/pkg/logging"
appsv1 "k8s.io/api/apps/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
@@ -46,7 +45,6 @@ const ControllerRevisionComponentLabel = "controller.oam.dev/component"
// ComponentHandler will watch component change and generate Revision automatically.
type ComponentHandler struct {
Client client.Client
Logger logging.Logger
RevisionLimit int
CustomRevisionHookURL string
}
@@ -108,7 +106,7 @@ func (c *ComponentHandler) getRelatedAppConfig(object metav1.Object) []reconcile
var appConfigs v1alpha2.ApplicationConfigurationList
err := c.Client.List(context.Background(), &appConfigs)
if err != nil {
c.Logger.Info(fmt.Sprintf("error list all applicationConfigurations %v", err))
klog.Info(fmt.Sprintf("error list all applicationConfigurations %v", err))
return nil
}
var reqs []reconcile.Request
@@ -126,12 +124,12 @@ func (c *ComponentHandler) IsRevisionDiff(mt klog.KMetadata, curComp *v1alpha2.C
// client in controller-runtime will use informer cache
// use client will be more efficient
needNewRevision, err := utils.CompareWithRevision(context.TODO(), c.Client, c.Logger, mt.GetName(), mt.GetNamespace(),
needNewRevision, err := utils.CompareWithRevision(context.TODO(), c.Client, mt.GetName(), mt.GetNamespace(),
curComp.Status.LatestRevision.Name, &curComp.Spec)
// TODO: this might be a bug that we treat all errors getting from k8s as a new revision
// but the client go event handler doesn't handle an error. We need to see if we can retry this
if err != nil {
c.Logger.Info(fmt.Sprintf("Failed to compare the component with its latest revision with err = %+v", err),
klog.InfoS(fmt.Sprintf("Failed to compare the component with its latest revision with err = %+v", err),
"component", mt.GetName(), "latest revision", curComp.Status.LatestRevision.Name)
return true, curComp.Status.LatestRevision.Revision
}
@@ -154,7 +152,7 @@ func (c *ComponentHandler) createControllerRevision(mt metav1.Object, obj runtim
reqs := c.getRelatedAppConfig(mt)
// Hook to custom revision service if exist
if err := c.customComponentRevisionHook(reqs, comp); err != nil {
c.Logger.Info(fmt.Sprintf("fail to hook from custom revision service(%s) %v", c.CustomRevisionHookURL, err), "componentName", mt.GetName())
klog.InfoS(fmt.Sprintf("fail to hook from custom revision service(%s) %v", c.CustomRevisionHookURL, err), "componentName", mt.GetName())
return nil, false
}
@@ -194,21 +192,21 @@ func (c *ComponentHandler) createControllerRevision(mt metav1.Object, obj runtim
// TODO: we should update the status first. otherwise, the subsequent create will all fail if the update fails
err := c.Client.Create(context.TODO(), &revision)
if err != nil {
c.Logger.Info(fmt.Sprintf("error create controllerRevision %v", err), "componentName", mt.GetName())
klog.InfoS(fmt.Sprintf("error create controllerRevision %v", err), "componentName", mt.GetName())
return nil, false
}
err = c.UpdateStatus(context.Background(), comp)
if err != nil {
c.Logger.Info(fmt.Sprintf("update component status latestRevision %s err %v", revisionName, err), "componentName", mt.GetName())
klog.InfoS(fmt.Sprintf("update component status latestRevision %s err %v", revisionName, err), "componentName", mt.GetName())
return nil, false
}
c.Logger.Info(fmt.Sprintf("ControllerRevision %s created", revisionName))
klog.InfoS("Create ControllerRevision", "name", revisionName)
// garbage collect
if int64(c.RevisionLimit) < nextRevision {
if err := c.cleanupControllerRevision(comp); err != nil {
c.Logger.Info(fmt.Sprintf("failed to clean up revisions of Component %v.", err))
klog.Info(fmt.Sprintf("failed to clean up revisions of Component %v.", err))
}
}
return reqs, true
@@ -280,7 +278,7 @@ func (c *ComponentHandler) cleanupControllerRevision(curComp *v1alpha2.Component
if err := c.Client.Delete(context.TODO(), &revisionToClean); err != nil {
return err
}
c.Logger.Info(fmt.Sprintf("ControllerRevision %s deleted", revision.Name))
klog.InfoS("Delete controllerRevision", "name", revision.Name)
toKill--
}
return nil

View File

@@ -22,7 +22,6 @@ import (
"strings"
"testing"
"github.com/crossplane/crossplane-runtime/pkg/logging"
"github.com/crossplane/crossplane-runtime/pkg/test"
"github.com/stretchr/testify/assert"
appsv1 "k8s.io/api/apps/v1"
@@ -31,7 +30,6 @@ import (
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/types"
"k8s.io/client-go/util/workqueue"
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/controller/controllertest"
"sigs.k8s.io/controller-runtime/pkg/event"
"sigs.k8s.io/controller-runtime/pkg/reconcile"
@@ -122,7 +120,6 @@ func TestComponentHandler(t *testing.T) {
return nil
}),
},
Logger: logging.NewLogrLogger(ctrl.Log.WithName("test")),
RevisionLimit: 2,
}
comp := &v1alpha2.Component{

View File

@@ -19,7 +19,6 @@ package applicationconfiguration
import (
"context"
"github.com/crossplane/crossplane-runtime/pkg/logging"
"sigs.k8s.io/controller-runtime/pkg/reconcile"
"github.com/oam-dev/kubevela/apis/core.oam.dev/v1alpha2"
@@ -27,13 +26,13 @@ import (
// A ControllerHooks provide customized reconcile logic for an ApplicationConfiguration
type ControllerHooks interface {
Exec(ctx context.Context, ac *v1alpha2.ApplicationConfiguration, logger logging.Logger) (reconcile.Result, error)
Exec(ctx context.Context, ac *v1alpha2.ApplicationConfiguration) (reconcile.Result, error)
}
// ControllerHooksFn reconciles an ApplicationConfiguration
type ControllerHooksFn func(ctx context.Context, ac *v1alpha2.ApplicationConfiguration, logger logging.Logger) (reconcile.Result, error)
type ControllerHooksFn func(ctx context.Context, ac *v1alpha2.ApplicationConfiguration) (reconcile.Result, error)
// Exec the customized reconcile logic on the ApplicationConfiguration
func (fn ControllerHooksFn) Exec(ctx context.Context, ac *v1alpha2.ApplicationConfiguration, logger logging.Logger) (reconcile.Result, error) {
return fn(ctx, ac, logger)
func (fn ControllerHooksFn) Exec(ctx context.Context, ac *v1alpha2.ApplicationConfiguration) (reconcile.Result, error) {
return fn(ctx, ac)
}

View File

@@ -26,14 +26,12 @@ import (
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"github.com/crossplane/crossplane-runtime/pkg/logging"
v1 "k8s.io/api/apps/v1"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/utils/pointer"
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/reconcile"
@@ -370,7 +368,7 @@ var _ = Describe("Test Component Revision Enabled with custom component revision
It("custom component change revision lead to revision difference, it should not loop infinitely create", func() {
srv := httptest.NewServer(RevisionHandler)
defer srv.Close()
customComponentHandler := &ComponentHandler{Client: k8sClient, RevisionLimit: 100, Logger: logging.NewLogrLogger(ctrl.Log.WithName("component-handler")), CustomRevisionHookURL: srv.URL}
customComponentHandler := &ComponentHandler{Client: k8sClient, RevisionLimit: 100, CustomRevisionHookURL: srv.URL}
getDeploy := func(image string) *v1.Deployment {
return &v1.Deployment{
TypeMeta: metav1.TypeMeta{

View File

@@ -26,7 +26,6 @@ import (
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"github.com/crossplane/crossplane-runtime/pkg/logging"
corev1 "k8s.io/api/core/v1"
crdv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1"
"k8s.io/apimachinery/pkg/api/meta"
@@ -176,8 +175,8 @@ var _ = BeforeSuite(func(done Done) {
}, time.Second*30, time.Millisecond*500).Should(BeNil())
Expect(mapping.Resource.Resource).Should(Equal("foo"))
reconciler = NewReconciler(mgr, dm, logging.NewLogrLogger(ctrl.Log.WithName("suit-test-appconfig")))
componentHandler = &ComponentHandler{Client: k8sClient, RevisionLimit: 100, Logger: logging.NewLogrLogger(ctrl.Log.WithName("component-handler"))}
reconciler = NewReconciler(mgr, dm)
componentHandler = &ComponentHandler{Client: k8sClient, RevisionLimit: 100}
By("Creating workload definition and trait definition")
wd := v1alpha2.WorkloadDefinition{

View File

@@ -23,12 +23,13 @@ import (
"time"
"github.com/crossplane/crossplane-runtime/pkg/event"
"github.com/crossplane/crossplane-runtime/pkg/logging"
"github.com/pkg/errors"
apierrors "k8s.io/apimachinery/pkg/api/errors"
ktype "k8s.io/apimachinery/pkg/types"
"k8s.io/klog/v2"
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/controller"
"sigs.k8s.io/controller-runtime/pkg/reconcile"
"sigs.k8s.io/controller-runtime/pkg/source"
@@ -52,16 +53,16 @@ const reconcileTimeout = 1 * time.Minute
// Reconciler reconciles an Application Context by constructing an in-memory
// application configuration and reuse its reconcile logic
type Reconciler struct {
client client.Client
log logging.Logger
record event.Recorder
mgr ctrl.Manager
applyMode core.ApplyOnceOnlyMode
client client.Client
record event.Recorder
mgr ctrl.Manager
applyMode core.ApplyOnceOnlyMode
concurrentReconciles int
}
// Reconcile reconcile an application context
func (r *Reconciler) Reconcile(request reconcile.Request) (reconcile.Result, error) {
r.log.Debug("Reconciling")
klog.InfoS("Reconcile", "applicationContext", klog.KRef(request.Namespace, request.Name))
ctx, cancel := context.WithTimeout(context.Background(), reconcileTimeout)
defer cancel()
// fetch the app context
@@ -104,8 +105,8 @@ func (r *Reconciler) Reconcile(request reconcile.Request) (reconcile.Result, err
// makes sure that the appConfig's owner is the same as the appContext
appConfig.SetOwnerReferences(appContext.GetOwnerReferences())
// call into the old ac Reconciler and copy the status back
acReconciler := ac.NewReconciler(r.mgr, dm, r.log, ac.WithRecorder(r.record), ac.WithApplyOnceOnlyMode(r.applyMode))
reconResult := acReconciler.ACReconcile(ctx, appConfig, r.log)
acReconciler := ac.NewReconciler(r.mgr, dm, ac.WithRecorder(r.record), ac.WithApplyOnceOnlyMode(r.applyMode))
reconResult := acReconciler.ACReconcile(ctx, appConfig)
appContextPatch := client.MergeFrom(appContext.DeepCopy())
appContext.Status = appConfig.Status
// always update ac status and set the error
@@ -126,25 +127,27 @@ func (r *Reconciler) SetupWithManager(mgr ctrl.Manager, compHandler *ac.Componen
r.record = event.NewAPIRecorder(mgr.GetEventRecorderFor("AppRollout")).
WithAnnotations("controller", "AppRollout")
return ctrl.NewControllerManagedBy(mgr).
WithOptions(controller.Options{
MaxConcurrentReconciles: r.concurrentReconciles,
}).
For(&v1alpha2.ApplicationContext{}).
Watches(&source.Kind{Type: &v1alpha2.Component{}}, compHandler).
Complete(r)
}
// Setup adds a controller that reconciles ApplicationContext
func Setup(mgr ctrl.Manager, args core.Args, l logging.Logger) error {
func Setup(mgr ctrl.Manager, args core.Args) error {
name := "oam/" + strings.ToLower(v1alpha2.ApplicationContextGroupKind)
record := event.NewAPIRecorder(mgr.GetEventRecorderFor(name))
reconciler := Reconciler{
client: mgr.GetClient(),
mgr: mgr,
log: l.WithValues("controller", name),
record: record,
applyMode: args.ApplyMode,
client: mgr.GetClient(),
mgr: mgr,
record: record,
applyMode: args.ApplyMode,
concurrentReconciles: args.ConcurrentReconciles,
}
compHandler := &ac.ComponentHandler{
Client: mgr.GetClient(),
Logger: l,
RevisionLimit: args.RevisionLimit,
CustomRevisionHookURL: args.CustomRevisionHookURL,
}

View File

@@ -20,14 +20,12 @@ package applicationcontext
import (
"context"
"github.com/crossplane/crossplane-runtime/pkg/logging"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
v1 "k8s.io/api/apps/v1"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/reconcile"
@@ -161,7 +159,6 @@ var _ = Describe("Test ApplicationContext Controller", func() {
})
It("Testing Setup", func() {
logr := ctrl.Log.WithName("ApplicationContext")
Expect(Setup(mgr, core_oam_dev.Args{}, logging.NewLogrLogger(logr).WithValues("suitTest", "Setup"))).Should(BeNil())
Expect(Setup(mgr, core_oam_dev.Args{})).Should(BeNil())
})
})

View File

@@ -26,7 +26,6 @@ import (
"sigs.k8s.io/controller-runtime/pkg/manager"
"github.com/crossplane/crossplane-runtime/pkg/event"
"github.com/crossplane/crossplane-runtime/pkg/logging"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
crdv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1"
@@ -93,17 +92,14 @@ var _ = BeforeSuite(func(done Done) {
var name = "ApplicationContext"
logr := ctrl.Log.WithName("ApplicationContext")
r = Reconciler{
client: mgr.GetClient(),
log: logging.NewLogrLogger(logr).WithValues("suitTest", name),
mgr: mgr,
record: event.NewAPIRecorder(mgr.GetEventRecorderFor(name)),
applyMode: core.ApplyOnceOnlyOff,
}
compHandler := &ac.ComponentHandler{
Client: mgr.GetClient(),
Logger: logging.NewLogrLogger(logr),
RevisionLimit: defRevisionLimit,
CustomRevisionHookURL: "",
}

View File

@@ -22,7 +22,6 @@ import (
"time"
"github.com/crossplane/crossplane-runtime/pkg/event"
"github.com/crossplane/crossplane-runtime/pkg/logging"
"github.com/crossplane/crossplane-runtime/pkg/meta"
"github.com/pkg/errors"
apierrors "k8s.io/apimachinery/pkg/api/errors"
@@ -31,6 +30,7 @@ import (
"k8s.io/klog/v2"
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/controller"
"sigs.k8s.io/controller-runtime/pkg/reconcile"
oamv1alpha2 "github.com/oam-dev/kubevela/apis/core.oam.dev/v1alpha2"
@@ -38,7 +38,7 @@ import (
"github.com/oam-dev/kubevela/apis/standard.oam.dev/v1alpha1"
"github.com/oam-dev/kubevela/apis/types"
"github.com/oam-dev/kubevela/pkg/controller/common/rollout"
controller "github.com/oam-dev/kubevela/pkg/controller/core.oam.dev"
oamctrl "github.com/oam-dev/kubevela/pkg/controller/core.oam.dev"
"github.com/oam-dev/kubevela/pkg/oam"
"github.com/oam-dev/kubevela/pkg/oam/discoverymapper"
oamutil "github.com/oam-dev/kubevela/pkg/oam/util"
@@ -55,9 +55,10 @@ const (
// Reconciler reconciles an AppRollout object
type Reconciler struct {
client.Client
dm discoverymapper.DiscoveryMapper
record event.Recorder
Scheme *runtime.Scheme
dm discoverymapper.DiscoveryMapper
record event.Recorder
Scheme *runtime.Scheme
concurrentReconciles int
}
// +kubebuilder:rbac:groups=core.oam.dev,resources=approllouts,verbs=get;list;watch;create;update;patch;delete
@@ -340,10 +341,10 @@ func (r *Reconciler) updateStatus(ctx context.Context, appRollout *v1beta1.AppRo
// NewReconciler render a applicationRollout reconciler
func NewReconciler(c client.Client, dm discoverymapper.DiscoveryMapper, record event.Recorder, scheme *runtime.Scheme) *Reconciler {
return &Reconciler{
c,
dm,
record,
scheme,
Client: c,
dm: dm,
record: record,
Scheme: scheme,
}
}
@@ -352,17 +353,21 @@ func (r *Reconciler) SetupWithManager(mgr ctrl.Manager) error {
r.record = event.NewAPIRecorder(mgr.GetEventRecorderFor("AppRollout")).
WithAnnotations("controller", "AppRollout")
return ctrl.NewControllerManagedBy(mgr).
WithOptions(controller.Options{
MaxConcurrentReconciles: r.concurrentReconciles,
}).
For(&v1beta1.AppRollout{}).
Owns(&v1beta1.Application{}).
Complete(r)
}
// Setup adds a controller that reconciles AppRollout.
func Setup(mgr ctrl.Manager, args controller.Args, _ logging.Logger) error {
func Setup(mgr ctrl.Manager, args oamctrl.Args) error {
reconciler := Reconciler{
Client: mgr.GetClient(),
dm: args.DiscoveryMapper,
Scheme: mgr.GetScheme(),
Client: mgr.GetClient(),
dm: args.DiscoveryMapper,
Scheme: mgr.GetScheme(),
concurrentReconciles: args.ConcurrentReconciles,
}
return reconciler.SetupWithManager(mgr)
}

View File

@@ -24,7 +24,6 @@ import (
cpv1alpha1 "github.com/crossplane/crossplane-runtime/apis/core/v1alpha1"
"github.com/crossplane/crossplane-runtime/pkg/event"
"github.com/crossplane/crossplane-runtime/pkg/logging"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
@@ -33,11 +32,12 @@ import (
"k8s.io/utils/pointer"
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/controller"
"github.com/oam-dev/kubevela/apis/core.oam.dev/common"
"github.com/oam-dev/kubevela/apis/core.oam.dev/v1beta1"
"github.com/oam-dev/kubevela/apis/types"
controller "github.com/oam-dev/kubevela/pkg/controller/core.oam.dev"
oamctrl "github.com/oam-dev/kubevela/pkg/controller/core.oam.dev"
coredef "github.com/oam-dev/kubevela/pkg/controller/core.oam.dev/v1alpha2/core"
"github.com/oam-dev/kubevela/pkg/controller/utils"
"github.com/oam-dev/kubevela/pkg/dsl/definition"
@@ -49,17 +49,17 @@ import (
// Reconciler reconciles a ComponentDefinition object
type Reconciler struct {
client.Client
dm discoverymapper.DiscoveryMapper
pd *definition.PackageDiscover
Scheme *runtime.Scheme
record event.Recorder
defRevLimit int
dm discoverymapper.DiscoveryMapper
pd *definition.PackageDiscover
Scheme *runtime.Scheme
record event.Recorder
defRevLimit int
concurrentReconciles int
}
// Reconcile is the main logic for ComponentDefinition controller
func (r *Reconciler) Reconcile(req ctrl.Request) (ctrl.Result, error) {
definitionName := req.NamespacedName.Name
klog.InfoS("Reconciling ComponentDefinition", "Name", definitionName, "Namespace", req.Namespace)
klog.InfoS("Reconcile componentDefinition", "componentDefinition", klog.KRef(req.Namespace, req.Name))
ctx := context.Background()
var componentDefinition v1beta1.ComponentDefinition
@@ -96,20 +96,20 @@ func (r *Reconciler) Reconcile(req ctrl.Request) (ctrl.Result, error) {
// generate DefinitionRevision from componentDefinition
defRev, isNewRevision, err := coredef.GenerateDefinitionRevision(ctx, r.Client, &componentDefinition)
if err != nil {
klog.ErrorS(err, "cannot generate DefinitionRevision", "ComponentDefinitionName", componentDefinition.Name)
r.record.Event(handler.cd, event.Warning("cannot generate DefinitionRevision", err))
klog.InfoS("Could not generate DefinitionRevision", "componentDefinition", klog.KObj(&componentDefinition), "err", err)
r.record.Event(&componentDefinition, event.Warning("Could not generate DefinitionRevision", err))
return ctrl.Result{}, util.PatchCondition(ctx, r, &componentDefinition,
cpv1alpha1.ReconcileError(fmt.Errorf(util.ErrGenerateDefinitionRevision, componentDefinition.Name, err)))
}
if !isNewRevision {
if err = r.createOrUpdateComponentDefRevision(ctx, req.Namespace, &componentDefinition, defRev); err != nil {
klog.ErrorS(err, "cannot update DefinitionRevision")
r.record.Event(&(componentDefinition), event.Warning("cannot update DefinitionRevision", err))
klog.InfoS("Could not update DefinitionRevision", "err", err)
r.record.Event(&(componentDefinition), event.Warning("Could not update DefinitionRevision", err))
return ctrl.Result{}, util.PatchCondition(ctx, r, &(componentDefinition),
cpv1alpha1.ReconcileError(fmt.Errorf(util.ErrCreateOrUpdateDefinitionRevision, defRev.Name, err)))
}
klog.InfoS("Successfully update DefinitionRevision", "name", defRev.Name)
klog.InfoS("Successfully update definitionRevision", "definitionRevision", klog.KObj(defRev))
if err := coredef.CleanUpDefinitionRevision(ctx, r.Client, &componentDefinition, r.defRevLimit); err != nil {
klog.Error("[Garbage collection]")
@@ -138,6 +138,8 @@ func (r *Reconciler) Reconcile(req ctrl.Request) (ctrl.Result, error) {
def.Helm = componentDefinition.Spec.Schematic.HELM
case util.KubeDef:
def.Kube = componentDefinition.Spec.Schematic.KUBE
case util.TerraformDef:
def.Terraform = componentDefinition.Spec.Schematic.Terraform
default:
}
@@ -157,7 +159,7 @@ func (r *Reconciler) Reconcile(req ctrl.Request) (ctrl.Result, error) {
return ctrl.Result{}, util.PatchCondition(ctx, r, &(def.ComponentDefinition),
cpv1alpha1.ReconcileError(fmt.Errorf(util.ErrCreateOrUpdateDefinitionRevision, defRev.Name, err)))
}
klog.InfoS("Successfully create DefinitionRevision", "name", defRev.Name)
klog.InfoS("Successfully create definitionRevision", "definitionRevision", klog.KObj(defRev))
def.ComponentDefinition.Status.LatestRevision = &common.Revision{
Name: defRev.Name,
@@ -230,18 +232,22 @@ func (r *Reconciler) SetupWithManager(mgr ctrl.Manager) error {
r.record = event.NewAPIRecorder(mgr.GetEventRecorderFor("ComponentDefinition")).
WithAnnotations("controller", "ComponentDefinition")
return ctrl.NewControllerManagedBy(mgr).
WithOptions(controller.Options{
MaxConcurrentReconciles: r.concurrentReconciles,
}).
For(&v1beta1.ComponentDefinition{}).
Complete(r)
}
// Setup adds a controller that reconciles ComponentDefinition.
func Setup(mgr ctrl.Manager, args controller.Args, _ logging.Logger) error {
func Setup(mgr ctrl.Manager, args oamctrl.Args) error {
r := Reconciler{
Client: mgr.GetClient(),
Scheme: mgr.GetScheme(),
dm: args.DiscoveryMapper,
pd: args.PackageDiscover,
defRevLimit: args.DefRevisionLimit,
Client: mgr.GetClient(),
Scheme: mgr.GetScheme(),
dm: args.DiscoveryMapper,
pd: args.PackageDiscover,
defRevLimit: args.DefRevisionLimit,
concurrentReconciles: args.ConcurrentReconciles,
}
return r.SetupWithManager(mgr)
}

View File

@@ -275,6 +275,115 @@ spec:
})
})
Context("When the ComponentDefinition contains Terraform Module, should create a ConfigMap", func() {
var componentDefinitionName = "alibaba-rds-test"
var namespace = "default"
req := reconcile.Request{NamespacedName: client.ObjectKey{Name: componentDefinitionName, Namespace: namespace}}
It("Applying Terraform ComponentDefinition", func() {
By("Apply ComponentDefinition")
var validComponentDefinition = `
apiVersion: core.oam.dev/v1alpha2
kind: ComponentDefinition
metadata:
name: alibaba-rds-test
annotations:
definition.oam.dev/description: Terraform configuration for Alibaba Cloud RDS object
type: terraform
spec:
workload:
definition:
apiVersion: apps/v1
kind: Deployment
schematic:
terraform:
configuration: |
module "rds" {
source = "terraform-alicloud-modules/rds/alicloud"
engine = "MySQL"
engine_version = "8.0"
instance_type = "rds.mysql.c1.large"
instance_storage = "20"
instance_name = var.instance_name
account_name = var.account_name
password = var.password
}
output "DB_NAME" {
value = module.rds.this_db_instance_name
}
output "DB_USER" {
value = module.rds.this_db_database_account
}
output "DB_PORT" {
value = module.rds.this_db_instance_port
}
output "DB_HOST" {
value = module.rds.this_db_instance_connection_string
}
output "DB_PASSWORD" {
value = module.rds.this_db_instance_port
}
variable "instance_name" {
description = "RDS instance name"
type = string
default = "poc"
}
variable "account_name" {
description = "RDS instance user account name"
type = "string"
default = "oam"
}
variable "password" {
description = "RDS instance account password"
type = "string"
default = "xxx"
}
variable "intVar" {
type = "number"
}
variable "boolVar" {
type = "bool"
}
variable "listVar" {
type = "list"
}
variable "mapVar" {
type = "map"
}
`
var def v1beta1.ComponentDefinition
Expect(yaml.Unmarshal([]byte(validComponentDefinition), &def)).Should(BeNil())
def.Namespace = namespace
Expect(k8sClient.Create(ctx, &def)).Should(Succeed())
reconcileRetry(&r, req)
By("Check whether ConfigMap is created")
var cm corev1.ConfigMap
name := fmt.Sprintf("%s%s", types.CapabilityConfigMapNamePrefix, componentDefinitionName)
Eventually(func() bool {
err := k8sClient.Get(ctx, client.ObjectKey{Namespace: namespace, Name: name}, &cm)
return err == nil
}, 10*time.Second, time.Second).Should(BeTrue())
Expect(cm.Data[types.OpenapiV3JSONSchema]).Should(Not(Equal("")))
By("Check whether ConfigMapRef reference to the right ComponentDefinition")
Eventually(func() string {
_ = k8sClient.Get(ctx, client.ObjectKey{Namespace: def.Namespace, Name: def.Name}, &def)
return def.Status.ConfigMapRef
}, 10*time.Second, time.Second).Should(Equal(name))
})
})
Context("When the ComponentDefinition is invalid, should hit issues", func() {
var namespace = "ns-def"
BeforeEach(func() {

View File

@@ -44,11 +44,17 @@ func (h *handler) CreateWorkloadDefinition(ctx context.Context) (util.WorkloadTy
workloadType = util.ReferWorkload
workloadName = h.cd.Spec.Workload.Type
}
if h.cd.Spec.Schematic != nil && h.cd.Spec.Schematic.HELM != nil {
workloadType = util.HELMDef
}
if h.cd.Spec.Schematic != nil && h.cd.Spec.Schematic.KUBE != nil {
workloadType = util.KubeDef
if h.cd.Spec.Schematic != nil {
if h.cd.Spec.Schematic.HELM != nil {
workloadType = util.HELMDef
}
if h.cd.Spec.Schematic.KUBE != nil {
workloadType = util.KubeDef
}
if h.cd.Spec.Schematic.Terraform != nil {
workloadType = util.TerraformDef
}
}
wd := new(v1beta1.WorkloadDefinition)

View File

@@ -22,21 +22,20 @@ import (
"sync"
"time"
runtimev1alpha1 "github.com/crossplane/crossplane-runtime/apis/core/v1alpha1"
"github.com/crossplane/crossplane-runtime/pkg/event"
"github.com/crossplane/crossplane-runtime/pkg/resource"
"github.com/pkg/errors"
"k8s.io/apimachinery/pkg/types"
"k8s.io/client-go/util/retry"
"k8s.io/klog/v2"
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/reconcile"
runtimev1alpha1 "github.com/crossplane/crossplane-runtime/apis/core/v1alpha1"
"github.com/crossplane/crossplane-runtime/pkg/event"
"github.com/crossplane/crossplane-runtime/pkg/logging"
"github.com/crossplane/crossplane-runtime/pkg/resource"
controller "github.com/oam-dev/kubevela/pkg/controller/core.oam.dev"
"github.com/oam-dev/kubevela/apis/core.oam.dev/v1alpha2"
"github.com/oam-dev/kubevela/pkg/controller/common"
controller "github.com/oam-dev/kubevela/pkg/controller/core.oam.dev"
)
const (
@@ -56,14 +55,13 @@ const (
)
// Setup adds a controller that reconciles HealthScope.
func Setup(mgr ctrl.Manager, _ controller.Args, l logging.Logger) error {
func Setup(mgr ctrl.Manager, _ controller.Args) error {
name := "oam/" + strings.ToLower(v1alpha2.HealthScopeGroupKind)
return ctrl.NewControllerManagedBy(mgr).
Named(name).
For(&v1alpha2.HealthScope{}).
Complete(NewReconciler(mgr,
WithLogger(l.WithValues("controller", name)),
WithRecorder(event.NewAPIRecorder(mgr.GetEventRecorderFor(name))),
))
}
@@ -71,8 +69,6 @@ func Setup(mgr ctrl.Manager, _ controller.Args, l logging.Logger) error {
// A Reconciler reconciles OAM Scopes by keeping track of the health status of components.
type Reconciler struct {
client client.Client
log logging.Logger
record event.Recorder
// traitChecker represents checker fetching health condition from HealthCheckTrait
traitChecker WorloadHealthChecker
@@ -86,13 +82,6 @@ type Reconciler struct {
// A ReconcilerOption configures a Reconciler.
type ReconcilerOption func(*Reconciler)
// WithLogger specifies how the Reconciler should log messages.
func WithLogger(l logging.Logger) ReconcilerOption {
return func(r *Reconciler) {
r.log = l
}
}
// WithRecorder specifies how the Reconciler should record events.
func WithRecorder(er event.Recorder) ReconcilerOption {
return func(r *Reconciler) {
@@ -121,7 +110,6 @@ func WithChecker(c WorloadHealthChecker) ReconcilerOption {
func NewReconciler(m ctrl.Manager, o ...ReconcilerOption) *Reconciler {
r := &Reconciler{
client: m.GetClient(),
log: logging.NewNopLogger(),
record: event.NewNopRecorder(),
traitChecker: WorkloadHealthCheckFn(CheckByHealthCheckTrait),
checkers: []WorloadHealthChecker{
@@ -142,8 +130,7 @@ func NewReconciler(m ctrl.Manager, o ...ReconcilerOption) *Reconciler {
// Reconcile an OAM HealthScope by keeping track of its health status.
func (r *Reconciler) Reconcile(req reconcile.Request) (reconcile.Result, error) {
log := r.log.WithValues("request", req)
log.Debug("Reconciling")
klog.InfoS("Reconcile healthScope", "healthScope", klog.KRef(req.Namespace, req.Name))
ctx, cancel := context.WithTimeout(context.Background(), reconcileTimeout)
defer cancel()
@@ -164,10 +151,10 @@ func (r *Reconciler) Reconcile(req reconcile.Request) (reconcile.Result, error)
start := time.Now()
log = log.WithValues("uid", hs.GetUID(), "version", hs.GetResourceVersion())
klog.InfoS("healthScope", "uid", hs.GetUID(), "version", hs.GetResourceVersion())
scopeCondition, wlConditions := r.GetScopeHealthStatus(ctx, hs)
log.Debug("Successfully ran health check", "scope", hs.Name)
klog.V(common.LogDebug).InfoS("Successfully ran health check", "scope", hs.Name)
r.record.Event(hs, event.Normal(reasonHealthCheck, "Successfully ran health check"))
elapsed := time.Since(start)
@@ -179,7 +166,7 @@ func (r *Reconciler) Reconcile(req reconcile.Request) (reconcile.Result, error)
// GetScopeHealthStatus get the status of the healthscope based on workload resources.
func (r *Reconciler) GetScopeHealthStatus(ctx context.Context, healthScope *v1alpha2.HealthScope) (ScopeHealthCondition, []*WorkloadHealthCondition) {
log := r.log.WithValues("get scope health status", healthScope.GetName())
klog.InfoS("Get scope health status", "name", healthScope.GetName())
scopeCondition := ScopeHealthCondition{
HealthStatus: StatusHealthy, // if no workload referenced, scope is healthy by default
}
@@ -207,7 +194,7 @@ func (r *Reconciler) GetScopeHealthStatus(ctx context.Context, healthScope *v1al
wlHealthCondition = r.traitChecker.Check(ctx, r.client, resRef, healthScope.GetNamespace())
if wlHealthCondition != nil {
log.Debug("get health condition from health check trait ", "workload", resRef, "healthCondition", wlHealthCondition)
klog.V(common.LogDebug).InfoS("Get health condition from health check trait ", "workload", resRef, "healthCondition", wlHealthCondition)
// get healthCondition from HealthCheckTrait
workloadHealthConditionsC <- wlHealthCondition
return
@@ -216,14 +203,14 @@ func (r *Reconciler) GetScopeHealthStatus(ctx context.Context, healthScope *v1al
for _, checker := range r.checkers {
wlHealthCondition = checker.Check(ctxWithTimeout, r.client, resRef, healthScope.GetNamespace())
if wlHealthCondition != nil {
log.Debug("get health condition from built-in checker", "workload", resRef, "healthCondition", wlHealthCondition)
klog.V(common.LogDebug).InfoS("Get health condition from built-in checker", "workload", resRef, "healthCondition", wlHealthCondition)
// found matched checker and get health condition
workloadHealthConditionsC <- wlHealthCondition
return
}
}
// handle unknown workload
log.Debug("get unknown workload", "workload", resRef)
klog.V(common.LogDebug).InfoS("Gpkg/controller/core.oam.dev/v1alpha2/setup.go:42:69et unknown workload", "workload", resRef)
workloadHealthConditionsC <- r.unknownChecker.Check(ctx, r.client, resRef, healthScope.GetNamespace())
}(workloadRef)
}

View File

@@ -37,7 +37,6 @@ import (
"github.com/crossplane/crossplane-runtime/apis/core/v1alpha1"
"github.com/crossplane/crossplane-runtime/pkg/event"
"github.com/crossplane/crossplane-runtime/pkg/fieldpath"
"github.com/crossplane/crossplane-runtime/pkg/logging"
"github.com/crossplane/crossplane-runtime/pkg/test"
"github.com/oam-dev/kubevela/apis/core.oam.dev/v1alpha2"
@@ -69,7 +68,6 @@ var _ = Describe("HealthScope Controller Reconcile Test", func() {
return &WorkloadHealthCondition{HealthStatus: StatusUnhealthy}
})
reconciler := NewReconciler(mockMgr,
WithLogger(logging.NewNopLogger().WithValues("HealthScopeReconciler")),
WithRecorder(event.NewNopRecorder()),
WithChecker(MockHealthyChecker),
)
@@ -162,7 +160,6 @@ var _ = Describe("Test GetScopeHealthStatus", func() {
Client: &test.MockClient{},
}
reconciler := NewReconciler(mockMgr,
WithLogger(logging.NewNopLogger().WithValues("HealthScopeReconciler")),
WithRecorder(event.NewNopRecorder()),
)
reconciler.client = test.NewMockClient()

View File

@@ -23,15 +23,14 @@ import (
cpv1alpha1 "github.com/crossplane/crossplane-runtime/apis/core/v1alpha1"
"github.com/crossplane/crossplane-runtime/pkg/event"
"github.com/crossplane/crossplane-runtime/pkg/logging"
cpmeta "github.com/crossplane/crossplane-runtime/pkg/meta"
"github.com/go-logr/logr"
"github.com/pkg/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/client-go/discovery"
"k8s.io/klog/v2"
"k8s.io/kube-openapi/pkg/util/proto"
"k8s.io/kubectl/pkg/explain"
"k8s.io/kubectl/pkg/util/openapi"
@@ -52,12 +51,11 @@ const (
)
// Setup adds a controller that reconciles ContainerizedWorkload.
func Setup(mgr ctrl.Manager, args controller.Args, _ logging.Logger) error {
func Setup(mgr ctrl.Manager, args controller.Args) error {
reconciler := Reconciler{
Client: mgr.GetClient(),
DiscoveryClient: *discovery.NewDiscoveryClientForConfigOrDie(mgr.GetConfig()),
dm: args.DiscoveryMapper,
log: ctrl.Log.WithName("ManualScalarTrait"),
record: event.NewAPIRecorder(mgr.GetEventRecorderFor("ManualScalarTrait")),
Scheme: mgr.GetScheme(),
}
@@ -69,7 +67,6 @@ type Reconciler struct {
client.Client
discovery.DiscoveryClient
dm discoverymapper.DiscoveryMapper
log logr.Logger
record event.Recorder
Scheme *runtime.Scheme
}
@@ -83,9 +80,7 @@ type Reconciler struct {
// +kubebuilder:rbac:groups=apps,resources=deployments,verbs=get;list;watch;update;patch;delete
func (r *Reconciler) Reconcile(req ctrl.Request) (ctrl.Result, error) {
ctx := context.Background()
mLog := r.log.WithValues("manualscalar trait", req.NamespacedName)
mLog.Info("Reconcile manualscalar trait")
klog.InfoS("Reconcile manualscalar trait", "trait", klog.KRef(req.Namespace, req.Name))
var manualScalar oamv1alpha2.ManualScalerTrait
if err := r.Get(ctx, req.NamespacedName, &manualScalar); err != nil {
@@ -94,17 +89,17 @@ func (r *Reconciler) Reconcile(req ctrl.Request) (ctrl.Result, error) {
ctx = util.SetNamespaceInCtx(ctx, manualScalar.Namespace)
r.log.Info("Get the manualscalar trait", "ReplicaCount", manualScalar.Spec.ReplicaCount,
klog.InfoS("Get the manualscalar trait", "ReplicaCount", manualScalar.Spec.ReplicaCount,
"Annotations", manualScalar.GetAnnotations())
// find the resource object to record the event to, default is the parent appConfig.
eventObj, err := util.LocateParentAppConfig(ctx, r.Client, &manualScalar)
if eventObj == nil {
// fallback to workload itself
mLog.Error(err, "Failed to find the parent resource", "manualScalar", manualScalar.Name)
klog.ErrorS(err, "Failed to find the parent resource", "manualScalar", manualScalar.Name)
eventObj = &manualScalar
}
// Fetch the workload instance this trait is referring to
workload, err := util.FetchWorkload(ctx, r, mLog, &manualScalar)
workload, err := util.FetchWorkload(ctx, r, &manualScalar)
if err != nil {
r.record.Event(eventObj, event.Warning(util.ErrLocateWorkload, err))
return util.ReconcileWaitResult, util.PatchCondition(
@@ -112,9 +107,9 @@ func (r *Reconciler) Reconcile(req ctrl.Request) (ctrl.Result, error) {
}
// Fetch the child resources list from the corresponding workload
resources, err := util.FetchWorkloadChildResources(ctx, mLog, r, r.dm, workload)
resources, err := util.FetchWorkloadChildResources(ctx, r, r.dm, workload)
if err != nil {
mLog.Error(err, "Error while fetching the workload child resources", "workload", workload.UnstructuredContent())
klog.ErrorS(err, "Error while fetching the workload child resources", "workload", workload.UnstructuredContent())
r.record.Event(eventObj, event.Warning(util.ErrFetchChildResources, err))
return util.ReconcileWaitResult, util.PatchCondition(ctx, r, &manualScalar,
cpv1alpha1.ReconcileError(errors.New(util.ErrFetchChildResources)))
@@ -124,7 +119,7 @@ func (r *Reconciler) Reconcile(req ctrl.Request) (ctrl.Result, error) {
resources = append(resources, workload)
}
// Scale the child resources that we know how to scale
result, err := r.scaleResources(ctx, mLog, manualScalar, resources)
result, err := r.scaleResources(ctx, manualScalar, resources)
// the scaleResources function will patch error message and should return here to prevent the condition override by the following patch.
if result == util.ReconcileWaitResult {
return result, err
@@ -140,8 +135,7 @@ func (r *Reconciler) Reconcile(req ctrl.Request) (ctrl.Result, error) {
}
// identify child resources and scale them
func (r *Reconciler) scaleResources(ctx context.Context, mLog logr.Logger,
manualScalar oamv1alpha2.ManualScalerTrait, resources []*unstructured.Unstructured) (ctrl.Result, error) {
func (r *Reconciler) scaleResources(ctx context.Context, manualScalar oamv1alpha2.ManualScalerTrait, resources []*unstructured.Unstructured) (ctrl.Result, error) {
// scale all the resources that we can scale
isController := false
bod := true
@@ -170,27 +164,27 @@ func (r *Reconciler) scaleResources(ctx context.Context, mLog logr.Logger,
if locateReplicaField(document, res) {
found = true
resPatch := client.MergeFrom(res.DeepCopyObject())
mLog.Info("Get the resource the trait is going to modify",
klog.InfoS("Get the resource the trait is going to modify",
"resource name", res.GetName(), "UID", res.GetUID())
cpmeta.AddOwnerReference(res, ownerRef)
err := unstructured.SetNestedField(res.Object, int64(manualScalar.Spec.ReplicaCount), "spec", "replicas")
if err != nil {
mLog.Error(err, "Failed to patch a resource for scaling")
klog.ErrorS(err, "Failed to patch a resource for scaling")
return util.ReconcileWaitResult,
util.PatchCondition(ctx, r, &manualScalar, cpv1alpha1.ReconcileError(errors.Wrap(err, errPatchTobeScaledResource)))
}
// merge patch to scale the resource
if err := r.Patch(ctx, res, resPatch, client.FieldOwner(manualScalar.GetUID())); err != nil {
mLog.Error(err, "Failed to scale a resource")
klog.ErrorS(err, "Failed to scale a resource")
return util.ReconcileWaitResult,
util.PatchCondition(ctx, r, &manualScalar, cpv1alpha1.ReconcileError(errors.Wrap(err, errScaleResource)))
}
mLog.Info("Successfully scaled a resource", "resource GVK", res.GroupVersionKind().String(),
klog.InfoS("Successfully scaled a resource", "resource GVK", res.GroupVersionKind().String(),
"res UID", res.GetUID(), "target replica", manualScalar.Spec.ReplicaCount)
}
}
if !found {
mLog.Info("Cannot locate any resource", "total resources", len(resources))
klog.InfoS("Cannot locate any resource", "total resources", len(resources))
return util.ReconcileWaitResult,
util.PatchCondition(ctx, r, &manualScalar, cpv1alpha1.ReconcileError(errors.New(errScaleResource)))
}

View File

@@ -24,7 +24,6 @@ import (
cpv1alpha1 "github.com/crossplane/crossplane-runtime/apis/core/v1alpha1"
"github.com/crossplane/crossplane-runtime/pkg/event"
"github.com/crossplane/crossplane-runtime/pkg/logging"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
@@ -33,11 +32,12 @@ import (
"k8s.io/utils/pointer"
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/controller"
"github.com/oam-dev/kubevela/apis/core.oam.dev/common"
"github.com/oam-dev/kubevela/apis/core.oam.dev/v1beta1"
"github.com/oam-dev/kubevela/apis/types"
controller "github.com/oam-dev/kubevela/pkg/controller/core.oam.dev"
oamctrl "github.com/oam-dev/kubevela/pkg/controller/core.oam.dev"
coredef "github.com/oam-dev/kubevela/pkg/controller/core.oam.dev/v1alpha2/core"
"github.com/oam-dev/kubevela/pkg/controller/utils"
"github.com/oam-dev/kubevela/pkg/dsl/definition"
@@ -49,17 +49,17 @@ import (
// Reconciler reconciles a TraitDefinition object
type Reconciler struct {
client.Client
dm discoverymapper.DiscoveryMapper
pd *definition.PackageDiscover
Scheme *runtime.Scheme
record event.Recorder
defRevLimit int
dm discoverymapper.DiscoveryMapper
pd *definition.PackageDiscover
Scheme *runtime.Scheme
record event.Recorder
defRevLimit int
concurrentReconciles int
}
// Reconcile is the main logic for TraitDefinition controller
func (r *Reconciler) Reconcile(req ctrl.Request) (ctrl.Result, error) {
definitionName := req.NamespacedName.Name
klog.InfoS("Reconciling TraitDefinition...", "Name", definitionName, "Namespace", req.Namespace)
klog.InfoS("Reconcile traitDefinition", "traitDefinition", klog.KRef(req.Namespace, req.Name))
ctx := context.Background()
var traitdefinition v1beta1.TraitDefinition
@@ -80,7 +80,7 @@ func (r *Reconciler) Reconcile(req ctrl.Request) (ctrl.Result, error) {
err := utils.RefreshPackageDiscover(r.dm, r.pd, common.WorkloadGVK{},
traitdefinition.Spec.Reference, types.TypeTrait)
if err != nil {
klog.ErrorS(err, "cannot refresh packageDiscover")
klog.InfoS("Could not refresh packageDiscover", "err", err)
r.record.Event(&traitdefinition, event.Warning("cannot refresh packageDiscover", err))
return ctrl.Result{}, util.PatchCondition(ctx, r, &traitdefinition,
cpv1alpha1.ReconcileError(fmt.Errorf(util.ErrRefreshPackageDiscover, err)))
@@ -90,22 +90,22 @@ func (r *Reconciler) Reconcile(req ctrl.Request) (ctrl.Result, error) {
// generate DefinitionRevision from traitDefinition
defRev, isNewRevision, err := coredef.GenerateDefinitionRevision(ctx, r.Client, &traitdefinition)
if err != nil {
klog.ErrorS(err, "cannot generate DefinitionRevision", "TraitDefinitionName", traitdefinition.Name)
r.record.Event(&traitdefinition, event.Warning("cannot generate DefinitionRevision", err))
klog.InfoS("Could not generate definitionRevision", "traitDefinition", klog.KObj(&traitdefinition), "err", err)
r.record.Event(&traitdefinition, event.Warning("Could not generate DefinitionRevision", err))
return ctrl.Result{}, util.PatchCondition(ctx, r, &traitdefinition,
cpv1alpha1.ReconcileError(fmt.Errorf(util.ErrGenerateDefinitionRevision, traitdefinition.Name, err)))
}
if !isNewRevision {
if err = r.createOrUpdateTraitDefRevision(ctx, req.Namespace, &traitdefinition, defRev); err != nil {
klog.ErrorS(err, "cannot update DefinitionRevision")
klog.InfoS("Could not update DefinitionRevision", "err", err)
r.record.Event(&(traitdefinition), event.Warning("cannot update DefinitionRevision", err))
return ctrl.Result{}, util.PatchCondition(ctx, r, &(traitdefinition),
cpv1alpha1.ReconcileError(fmt.Errorf(util.ErrCreateOrUpdateDefinitionRevision, defRev.Name, err)))
}
klog.InfoS("Successfully update DefinitionRevision", "name", defRev.Name)
klog.InfoS("Successfully update definitionRevision", "definitionRevision", klog.KObj(defRev))
if err := coredef.CleanUpDefinitionRevision(ctx, r.Client, &traitdefinition, r.defRevLimit); err != nil {
klog.Error("[Garbage collection]")
klog.InfoS("Failed to collect garbage", "err", err)
r.record.Event(&traitdefinition, event.Warning("failed to garbage collect DefinitionRevision of type TraitDefinition", err))
}
return ctrl.Result{}, nil
@@ -130,7 +130,7 @@ func (r *Reconciler) Reconcile(req ctrl.Request) (ctrl.Result, error) {
return ctrl.Result{}, util.PatchCondition(ctx, r, &(def.TraitDefinition),
cpv1alpha1.ReconcileError(fmt.Errorf(util.ErrCreateOrUpdateDefinitionRevision, defRev.Name, err)))
}
klog.InfoS("Successfully create DefinitionRevision", "name", defRev.Name)
klog.InfoS("Successfully create definitionRevision", "definitionRevision", klog.KObj(defRev))
def.TraitDefinition.Status.LatestRevision = &common.Revision{
Name: defRev.Name,
@@ -203,18 +203,22 @@ func (r *Reconciler) SetupWithManager(mgr ctrl.Manager) error {
r.record = event.NewAPIRecorder(mgr.GetEventRecorderFor("TraitDefinition")).
WithAnnotations("controller", "TraitDefinition")
return ctrl.NewControllerManagedBy(mgr).
WithOptions(controller.Options{
MaxConcurrentReconciles: r.concurrentReconciles,
}).
For(&v1beta1.TraitDefinition{}).
Complete(r)
}
// Setup adds a controller that reconciles TraitDefinition.
func Setup(mgr ctrl.Manager, args controller.Args, _ logging.Logger) error {
func Setup(mgr ctrl.Manager, args oamctrl.Args) error {
r := Reconciler{
Client: mgr.GetClient(),
Scheme: mgr.GetScheme(),
dm: args.DiscoveryMapper,
pd: args.PackageDiscover,
defRevLimit: args.DefRevisionLimit,
Client: mgr.GetClient(),
Scheme: mgr.GetScheme(),
dm: args.DiscoveryMapper,
pd: args.PackageDiscover,
defRevLimit: args.DefRevisionLimit,
concurrentReconciles: args.ConcurrentReconciles,
}
return r.SetupWithManager(mgr)
}

View File

@@ -23,8 +23,6 @@ import (
cpv1alpha1 "github.com/crossplane/crossplane-runtime/apis/core/v1alpha1"
"github.com/crossplane/crossplane-runtime/pkg/event"
"github.com/crossplane/crossplane-runtime/pkg/logging"
"github.com/go-logr/logr"
"github.com/pkg/errors"
appsv1 "k8s.io/api/apps/v1"
corev1 "k8s.io/api/core/v1"
@@ -32,6 +30,7 @@ import (
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/types"
"k8s.io/client-go/util/retry"
"k8s.io/klog/v2"
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/builder"
"sigs.k8s.io/controller-runtime/pkg/client"
@@ -52,10 +51,9 @@ const (
)
// Setup adds a controller that reconciles ContainerizedWorkload.
func Setup(mgr ctrl.Manager, _ controller.Args, _ logging.Logger) error {
func Setup(mgr ctrl.Manager, _ controller.Args) error {
reconciler := Reconciler{
Client: mgr.GetClient(),
log: ctrl.Log.WithName("ContainerizedWorkload"),
record: event.NewAPIRecorder(mgr.GetEventRecorderFor("ContainerizedWorkload")),
Scheme: mgr.GetScheme(),
}
@@ -65,7 +63,6 @@ func Setup(mgr ctrl.Manager, _ controller.Args, _ logging.Logger) error {
// Reconciler reconciles a ContainerizedWorkload object
type Reconciler struct {
client.Client
log logr.Logger
record event.Recorder
Scheme *runtime.Scheme
}
@@ -78,27 +75,26 @@ type Reconciler struct {
// +kubebuilder:rbac:groups=core,resources=configmaps,verbs=get;list;watch;create;update;patch;delete
func (r *Reconciler) Reconcile(req ctrl.Request) (ctrl.Result, error) {
ctx := context.Background()
log := r.log.WithValues("containerizedworkload", req.NamespacedName)
log.Info("Reconcile container workload")
klog.InfoS("Reconcile containerizedworkload", klog.KRef(req.Namespace, req.Name))
var workload v1alpha2.ContainerizedWorkload
if err := r.Get(ctx, req.NamespacedName, &workload); err != nil {
if apierrors.IsNotFound(err) {
log.Info("Container workload is deleted")
klog.Info("Container workload is deleted")
}
return ctrl.Result{}, client.IgnoreNotFound(err)
}
log.Info("Get the workload", "apiVersion", workload.APIVersion, "kind", workload.Kind)
klog.InfoS("Get the workload", "apiVersion", workload.APIVersion, "kind", workload.Kind)
// find the resource object to record the event to, default is the parent appConfig.
eventObj, err := util.LocateParentAppConfig(ctx, r.Client, &workload)
if eventObj == nil {
// fallback to workload itself
log.Error(err, "workload", "name", workload.Name)
klog.ErrorS(err, "workload", "name", workload.Name)
eventObj = &workload
}
deploy, err := r.renderDeployment(ctx, &workload)
if err != nil {
log.Error(err, "Failed to render a deployment")
klog.ErrorS(err, "Failed to render a deployment")
r.record.Event(eventObj, event.Warning(errRenderWorkload, err))
return util.ReconcileWaitResult,
util.PatchCondition(ctx, r, &workload, cpv1alpha1.ReconcileError(errors.Wrap(err, errRenderWorkload)))
@@ -106,7 +102,7 @@ func (r *Reconciler) Reconcile(req ctrl.Request) (ctrl.Result, error) {
// server side apply, only the fields we set are touched
applyOpts := []client.PatchOption{client.ForceOwnership, client.FieldOwner(workload.GetUID())}
if err := r.Patch(ctx, deploy, client.Apply, applyOpts...); err != nil {
log.Error(err, "Failed to apply to a deployment")
klog.ErrorS(err, "Failed to apply to a deployment")
r.record.Event(eventObj, event.Warning(errApplyDeployment, err))
return util.ReconcileWaitResult,
util.PatchCondition(ctx, r, &workload, cpv1alpha1.ReconcileError(errors.Wrap(err, errApplyDeployment)))
@@ -118,14 +114,14 @@ func (r *Reconciler) Reconcile(req ctrl.Request) (ctrl.Result, error) {
configMapApplyOpts := []client.PatchOption{client.ForceOwnership, client.FieldOwner(deploy.GetUID())}
configmaps, err := r.renderConfigMaps(ctx, &workload, deploy)
if err != nil {
log.Error(err, "Failed to render configmaps")
klog.ErrorS(err, "Failed to render configmaps")
r.record.Event(eventObj, event.Warning(errRenderWorkload, err))
return util.ReconcileWaitResult,
util.PatchCondition(ctx, r, &workload, cpv1alpha1.ReconcileError(errors.Wrap(err, errRenderWorkload)))
}
for _, cm := range configmaps {
if err := r.Patch(ctx, cm, client.Apply, configMapApplyOpts...); err != nil {
log.Error(err, "Failed to apply a configmap")
klog.ErrorS(err, "Failed to apply a configmap")
r.record.Event(eventObj, event.Warning(errApplyConfigMap, err))
return util.ReconcileWaitResult,
util.PatchCondition(ctx, r, &workload, cpv1alpha1.ReconcileError(errors.Wrap(err, errApplyConfigMap)))
@@ -138,14 +134,14 @@ func (r *Reconciler) Reconcile(req ctrl.Request) (ctrl.Result, error) {
// TODO(rz): remove this after we have service trait
service, err := r.renderService(ctx, &workload, deploy)
if err != nil {
log.Error(err, "Failed to render a service")
klog.ErrorS(err, "Failed to render a service")
r.record.Event(eventObj, event.Warning(errRenderService, err))
return util.ReconcileWaitResult,
util.PatchCondition(ctx, r, &workload, cpv1alpha1.ReconcileError(errors.Wrap(err, errRenderService)))
}
// server side apply the service
if err := r.Patch(ctx, service, client.Apply, applyOpts...); err != nil {
log.Error(err, "Failed to apply a service")
klog.ErrorS(err, "Failed to apply a service")
r.record.Event(eventObj, event.Warning(errApplyDeployment, err))
return util.ReconcileWaitResult,
util.PatchCondition(ctx, r, &workload, cpv1alpha1.ReconcileError(errors.Wrap(err, errApplyService)))
@@ -155,7 +151,7 @@ func (r *Reconciler) Reconcile(req ctrl.Request) (ctrl.Result, error) {
workload.Name, service.Name)))
// garbage collect the service/deployments that we created but not needed
if err := r.cleanupResources(ctx, &workload, &deploy.UID, &service.UID); err != nil {
log.Error(err, "Failed to clean up resources")
klog.ErrorS(err, "Failed to clean up resources")
r.record.Event(eventObj, event.Warning(errApplyDeployment, err))
}
workload.Status.Resources = nil

View File

@@ -24,11 +24,11 @@ import (
corev1 "k8s.io/api/core/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/types"
"k8s.io/klog/v2"
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/client"
"github.com/oam-dev/kubevela/apis/core.oam.dev/v1alpha2"
"github.com/oam-dev/kubevela/pkg/oam"
"github.com/oam-dev/kubevela/pkg/oam/util"
)
@@ -56,7 +56,7 @@ func (r *Reconciler) renderDeployment(ctx context.Context,
}
}
}
r.log.Info("rendered a deployment", "deploy", deploy.Spec.Template.Spec)
klog.InfoS("Rendered a deployment", "deploy", deploy.Spec.Template.Spec)
// set the controller reference so that we can watch this deployment and it will be deleted automatically
if err := ctrl.SetControllerReference(workload, deploy, r.Scheme); err != nil {
@@ -112,14 +112,14 @@ func (r *Reconciler) renderConfigMaps(ctx context.Context,
// nolint:gocyclo
func (r *Reconciler) cleanupResources(ctx context.Context,
workload *v1alpha2.ContainerizedWorkload, deployUID, serviceUID *types.UID) error {
log := r.log.WithValues("gc deployment", workload.Name)
klog.InfoS("GC deployment", "workload", klog.KObj(workload))
var deploy appsv1.Deployment
var service corev1.Service
for _, res := range workload.Status.Resources {
uid := res.UID
if res.Kind == util.KindDeployment && res.APIVersion == appsv1.SchemeGroupVersion.String() {
if uid != *deployUID {
log.Info("Found an orphaned deployment", "deployment UID", *deployUID, "orphaned UID", uid)
klog.InfoS("Found an orphaned deployment", "deployment UID", *deployUID, "orphaned UID", uid)
dn := client.ObjectKey{Name: res.Name, Namespace: workload.Namespace}
if err := r.Get(ctx, dn, &deploy); err != nil {
if apierrors.IsNotFound(err) {
@@ -130,11 +130,11 @@ func (r *Reconciler) cleanupResources(ctx context.Context,
if err := r.Delete(ctx, &deploy); err != nil {
return err
}
log.Info("Removed an orphaned deployment", "deployment UID", *deployUID, "orphaned UID", uid)
klog.InfoS("Removed an orphaned deployment", "deployment UID", *deployUID, "orphaned UID", uid)
}
} else if res.Kind == util.KindService && res.APIVersion == corev1.SchemeGroupVersion.String() {
if uid != *serviceUID {
log.Info("Found an orphaned service", "orphaned UID", uid)
klog.InfoS("Found an orphaned service", "orphaned UID", uid)
sn := client.ObjectKey{Name: res.Name, Namespace: workload.Namespace}
if err := r.Get(ctx, sn, &service); err != nil {
if apierrors.IsNotFound(err) {
@@ -145,7 +145,7 @@ func (r *Reconciler) cleanupResources(ctx context.Context,
if err := r.Delete(ctx, &service); err != nil {
return err
}
log.Info("Removed an orphaned service", "orphaned UID", uid)
klog.InfoS("Removed an orphaned service", "orphaned UID", uid)
}
}
}

View File

@@ -27,7 +27,6 @@ import (
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/types"
clientgoscheme "k8s.io/client-go/kubernetes/scheme"
ctrl "sigs.k8s.io/controller-runtime"
core "github.com/oam-dev/kubevela/apis/core.oam.dev"
"github.com/oam-dev/kubevela/apis/core.oam.dev/v1alpha2"
@@ -64,7 +63,6 @@ func TestRenderDeployment(t *testing.T) {
r := Reconciler{
Client: nil,
log: ctrl.Log.WithName("ContainerizedWorkload"),
record: nil,
Scheme: scheme,
}
@@ -152,7 +150,6 @@ func TestRenderConfigMaps(t *testing.T) {
r := Reconciler{
Client: nil,
log: ctrl.Log.WithName("ContainerizedWorkload"),
record: nil,
Scheme: scheme,
}

View File

@@ -17,7 +17,6 @@ limitations under the License.
package v1alpha2
import (
"github.com/crossplane/crossplane-runtime/pkg/logging"
ctrl "sigs.k8s.io/controller-runtime"
controller "github.com/oam-dev/kubevela/pkg/controller/core.oam.dev"
@@ -34,18 +33,18 @@ import (
)
// Setup workload controllers.
func Setup(mgr ctrl.Manager, args controller.Args, l logging.Logger) error {
for _, setup := range []func(ctrl.Manager, controller.Args, logging.Logger) error{
func Setup(mgr ctrl.Manager, args controller.Args) error {
for _, setup := range []func(ctrl.Manager, controller.Args) error{
containerizedworkload.Setup, manualscalertrait.Setup, healthscope.Setup,
application.Setup, applicationrollout.Setup, applicationcontext.Setup, appdeployment.Setup,
traitdefinition.Setup, componentdefinition.Setup,
} {
if err := setup(mgr, args, l); err != nil {
if err := setup(mgr, args); err != nil {
return err
}
}
if args.ApplicationConfigurationInstalled {
return applicationconfiguration.Setup(mgr, args, l)
return applicationconfiguration.Setup(mgr, args)
}
return nil
}

View File

@@ -48,6 +48,17 @@ import (
// ErrNoSectionParameterInCue means there is not parameter section in Cue template of a workload
const ErrNoSectionParameterInCue = "capability %s doesn't contain section `parameter`"
// data types of parameter value
const (
TerraformVariableString string = "string"
TerraformVariableNumber string = "number"
TerraformVariableBool string = "bool"
TerraformVariableList string = "list"
TerraformVariableTuple string = "tuple"
TerraformVariableMap string = "map"
TerraformVariableObject string = "object"
)
// CapabilityDefinitionInterface is the interface for Capability (WorkloadDefinition and TraitDefinition)
type CapabilityDefinitionInterface interface {
GetCapabilityObject(ctx context.Context, k8sClient client.Client, namespace, name string) (*types.Capability, error)
@@ -62,8 +73,9 @@ type CapabilityComponentDefinition struct {
WorkloadType util.WorkloadType `json:"workloadType"`
WorkloadDefName string `json:"workloadDefName"`
Helm *commontypes.Helm `json:"helm"`
Kube *commontypes.Kube `json:"kube"`
Helm *commontypes.Helm `json:"helm"`
Kube *commontypes.Kube `json:"kube"`
Terraform *commontypes.Terraform `json:"terraform"`
CapabilityBaseDefinition
}
@@ -133,7 +145,44 @@ func (def *CapabilityComponentDefinition) GetKubeSchematicOpenAPISchema(params [
}
properties[p.Name] = tmp
}
s := openapi3.NewObjectSchema().WithProperties(properties)
return generateJSONSchemaWithRequiredProperty(properties, required)
}
// GetOpenAPISchemaFromTerraformComponentDefinition gets OpenAPI v3 schema by WorkloadDefinition name
func GetOpenAPISchemaFromTerraformComponentDefinition(configuration string) ([]byte, error) {
schemas := make(map[string]*openapi3.Schema)
var required []string
variables, err := common.ParseTerraformVariables(configuration)
if err != nil {
return nil, errors.Wrap(err, "failed to generate capability properties")
}
for k, v := range variables {
var schema *openapi3.Schema
switch v.Type {
case TerraformVariableString:
schema = openapi3.NewStringSchema()
case TerraformVariableNumber:
schema = openapi3.NewFloat64Schema()
case TerraformVariableBool:
schema = openapi3.NewBoolSchema()
case TerraformVariableList, TerraformVariableTuple:
schema = openapi3.NewArraySchema()
case TerraformVariableMap, TerraformVariableObject:
schema = openapi3.NewObjectSchema()
}
schema.Title = k
required = append(required, k)
if v.Default != nil {
schema.Default = v.Default
}
schema.Description = v.Description
schemas[v.Name] = schema
}
return generateJSONSchemaWithRequiredProperty(schemas, required)
}
func generateJSONSchemaWithRequiredProperty(schemas map[string]*openapi3.Schema, required []string) ([]byte, error) {
s := openapi3.NewObjectSchema().WithProperties(schemas)
if len(required) > 0 {
s.Required = required
}
@@ -154,6 +203,11 @@ func (def *CapabilityComponentDefinition) StoreOpenAPISchema(ctx context.Context
jsonSchema, err = helm.GetChartValuesJSONSchema(ctx, def.Helm)
case util.KubeDef:
jsonSchema, err = def.GetKubeSchematicOpenAPISchema(def.Kube.Parameters)
case util.TerraformDef:
if def.Terraform == nil {
return fmt.Errorf("no Configuration is set in Terraform specification: %s", def.Name)
}
jsonSchema, err = GetOpenAPISchemaFromTerraformComponentDefinition(def.Terraform.Configuration)
default:
jsonSchema, err = def.GetOpenAPISchema(ctx, k8sClient, pd, namespace, name)
}

View File

@@ -22,6 +22,7 @@ import (
"io/ioutil"
"os"
"path/filepath"
"strings"
"testing"
"github.com/crossplane/crossplane-runtime/pkg/test"
@@ -148,3 +149,73 @@ func TestGenerateOpenAPISchemaFromCapabilityParameter(t *testing.T) {
})
}
}
func TestGetOpenAPISchemaFromTerraformComponentDefinition(t *testing.T) {
configuration := `
module "rds" {
source = "terraform-alicloud-modules/rds/alicloud"
engine = "MySQL"
engine_version = "8.0"
instance_type = "rds.mysql.c1.large"
instance_storage = "20"
instance_name = var.instance_name
account_name = var.account_name
password = var.password
}
output "DB_NAME" {
value = module.rds.this_db_instance_name
}
output "DB_USER" {
value = module.rds.this_db_database_account
}
output "DB_PORT" {
value = module.rds.this_db_instance_port
}
output "DB_HOST" {
value = module.rds.this_db_instance_connection_string
}
output "DB_PASSWORD" {
value = module.rds.this_db_instance_port
}
variable "instance_name" {
description = "RDS instance name"
type = string
default = "poc"
}
variable "account_name" {
description = "RDS instance user account name"
type = "string"
default = "oam"
}
variable "password" {
description = "RDS instance account password"
type = "string"
default = "xxx"
}
variable "intVar" {
type = "number"
}
variable "boolVar" {
type = "bool"
}
variable "listVar" {
type = "list"
}
variable "mapVar" {
type = "map"
}`
schema, err := GetOpenAPISchemaFromTerraformComponentDefinition(configuration)
assert.NilError(t, err)
data := string(schema)
assert.Equal(t, strings.Contains(data, "account_name"), true)
assert.Equal(t, strings.Contains(data, "intVar"), true)
}

View File

@@ -26,7 +26,6 @@ import (
"time"
"github.com/crossplane/crossplane-runtime/pkg/fieldpath"
"github.com/crossplane/crossplane-runtime/pkg/logging"
mapset "github.com/deckarep/golang-set"
"github.com/mitchellh/hashstructure/v2"
appsv1 "k8s.io/api/apps/v1"
@@ -38,6 +37,7 @@ import (
"k8s.io/apimachinery/pkg/util/intstr"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/client-go/util/retry"
"k8s.io/klog/v2"
"sigs.k8s.io/controller-runtime/pkg/client"
commontypes "github.com/oam-dev/kubevela/apis/core.oam.dev/common"
@@ -216,14 +216,14 @@ func ExtractRevision(revisionName string) (int, error) {
}
// CompareWithRevision compares a component's spec with the component's latest revision content
func CompareWithRevision(ctx context.Context, c client.Client, logger logging.Logger, componentName, nameSpace,
func CompareWithRevision(ctx context.Context, c client.Client, componentName, nameSpace,
latestRevision string, curCompSpec *v1alpha2.ComponentSpec) (bool, error) {
oldRev := &appsv1.ControllerRevision{}
// retry on NotFound since we update the component last revision first
err := wait.ExponentialBackoff(retry.DefaultBackoff, func() (bool, error) {
err := c.Get(ctx, client.ObjectKey{Namespace: nameSpace, Name: latestRevision}, oldRev)
if err != nil && !kerrors.IsNotFound(err) {
logger.Info(fmt.Sprintf("get old controllerRevision %s error %v",
klog.InfoS(fmt.Sprintf("get old controllerRevision %s error %v",
latestRevision, err), "componentName", componentName)
return false, err
}
@@ -234,8 +234,7 @@ func CompareWithRevision(ctx context.Context, c client.Client, logger logging.Lo
}
oldComp, err := util.UnpackRevisionData(oldRev)
if err != nil {
logger.Info(fmt.Sprintf("Unmarshal old controllerRevision %s error %v",
latestRevision, err), "componentName", componentName)
klog.InfoS("Unmarshal old controllerRevision", latestRevision, "error", err, "componentName", componentName)
return true, err
}
if reflect.DeepEqual(curCompSpec, &oldComp.Spec) {

View File

@@ -25,14 +25,12 @@ import (
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"github.com/crossplane/crossplane-runtime/pkg/logging"
"github.com/crossplane/crossplane-runtime/pkg/test"
"github.com/stretchr/testify/assert"
v12 "k8s.io/api/apps/v1"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/utils/pointer"
controllerruntime "sigs.k8s.io/controller-runtime"
"github.com/oam-dev/kubevela/apis/core.oam.dev/common"
"github.com/oam-dev/kubevela/apis/core.oam.dev/v1alpha2"
@@ -95,7 +93,6 @@ func TestConstructExtract(t *testing.T) {
func TestCompareWithRevision(t *testing.T) {
ctx := context.TODO()
logger := logging.NewLogrLogger(controllerruntime.Log.WithName("util-test"))
componentName := "testComp"
nameSpace := "namespace"
latestRevision := "revision"
@@ -194,7 +191,7 @@ func TestCompareWithRevision(t *testing.T) {
tclient := test.MockClient{
MockGet: test.NewMockGetFn(nil, tt.getFunc),
}
same, err := CompareWithRevision(ctx, &tclient, logger, componentName, nameSpace, latestRevision,
same, err := CompareWithRevision(ctx, &tclient, componentName, nameSpace, latestRevision,
tt.curCompSpec)
if err != tt.expectedErr {
t.Errorf("CompareWithRevision() error = %v, wantErr %v", err, tt.expectedErr)

View File

@@ -58,7 +58,7 @@ const (
type AbstractEngine interface {
Complete(ctx process.Context, abstractTemplate string, params interface{}) error
HealthCheck(ctx process.Context, cli client.Client, ns string, healthPolicyTemplate string) (bool, error)
Status(ctx process.Context, cli client.Client, ns string, customStatusTemplate string) (string, error)
Status(ctx process.Context, cli client.Client, ns string, customStatusTemplate string, parameter interface{}) (string, error)
}
type def struct {
@@ -223,7 +223,7 @@ func checkHealth(templateContext map[string]interface{}, healthPolicyTemplate st
}
// Status get workload status by customStatusTemplate
func (wd *workloadDef) Status(ctx process.Context, cli client.Client, ns string, customStatusTemplate string) (string, error) {
func (wd *workloadDef) Status(ctx process.Context, cli client.Client, ns string, customStatusTemplate string, parameter interface{}) (string, error) {
if customStatusTemplate == "" {
return "", nil
}
@@ -231,15 +231,28 @@ func (wd *workloadDef) Status(ctx process.Context, cli client.Client, ns string,
if err != nil {
return "", errors.WithMessage(err, "get template context")
}
return getStatusMessage(templateContext, customStatusTemplate)
return getStatusMessage(templateContext, customStatusTemplate, parameter)
}
func getStatusMessage(templateContext map[string]interface{}, customStatusTemplate string) (string, error) {
func getStatusMessage(templateContext map[string]interface{}, customStatusTemplate string, parameter interface{}) (string, error) {
var ctxBuff string
var paramBuff = "parameter: {}\n"
bt, err := json.Marshal(templateContext)
if err != nil {
return "", errors.WithMessage(err, "json marshal template context")
}
var buff = "context: " + string(bt) + "\n" + customStatusTemplate
ctxBuff = "context: " + string(bt) + "\n"
bt, err = json.Marshal(parameter)
if err != nil {
return "", errors.WithMessage(err, "json marshal template parameters")
}
if string(bt) != "null" {
paramBuff = "parameter: " + string(bt) + "\n"
}
var buff = ctxBuff + paramBuff + customStatusTemplate
var r cue.Runtime
inst, err := r.Compile("-", buff)
if err != nil {
@@ -392,7 +405,7 @@ func (td *traitDef) getTemplateContext(ctx process.Context, cli client.Reader, n
}
// Status get trait status by customStatusTemplate
func (td *traitDef) Status(ctx process.Context, cli client.Client, ns string, customStatusTemplate string) (string, error) {
func (td *traitDef) Status(ctx process.Context, cli client.Client, ns string, customStatusTemplate string, parameter interface{}) (string, error) {
if customStatusTemplate == "" {
return "", nil
}
@@ -400,7 +413,7 @@ func (td *traitDef) Status(ctx process.Context, cli client.Client, ns string, cu
if err != nil {
return "", errors.WithMessage(err, "get template context")
}
return getStatusMessage(templateContext, customStatusTemplate)
return getStatusMessage(templateContext, customStatusTemplate, parameter)
}
// HealthCheck address health check for trait

View File

@@ -833,6 +833,7 @@ func TestCheckHealth(t *testing.T) {
func TestGetStatus(t *testing.T) {
cases := map[string]struct {
tpContext map[string]interface{}
parameter interface{}
statusTemp string
expMessage string
}{
@@ -893,9 +894,33 @@ if len(context.outputs.ingress.status.loadBalancer.ingress) == 0 {
}`,
expMessage: "Visiting URL: example.com, IP: 10.0.0.1",
},
"status use parameter field": {
tpContext: map[string]interface{}{
"outputs": map[string]interface{}{
"test-name": map[string]interface{}{
"spec": map[string]interface{}{
"type": "NodePort",
"clusterIP": "10.0.0.1",
"ports": []interface{}{
map[string]interface{}{
"port": 80,
},
},
},
},
},
},
parameter: map[string]interface{}{
"configInfo": map[string]string{
"name": "test-name",
},
},
statusTemp: `message: parameter.configInfo.name + ".type: " + context.outputs["\(parameter.configInfo.name)"].spec.type`,
expMessage: "test-name.type: NodePort",
},
}
for message, ca := range cases {
gotMessage, err := getStatusMessage(ca.tpContext, ca.statusTemp)
gotMessage, err := getStatusMessage(ca.tpContext, ca.statusTemp, ca.parameter)
assert.NoError(t, err, message)
assert.Equal(t, ca.expMessage, gotMessage, message)
}

View File

@@ -30,7 +30,6 @@ import (
cpv1alpha1 "github.com/crossplane/crossplane-runtime/apis/core/v1alpha1"
"github.com/davecgh/go-spew/spew"
"github.com/go-logr/logr"
"github.com/pkg/errors"
appsv1 "k8s.io/api/apps/v1"
corev1 "k8s.io/api/core/v1"
@@ -42,6 +41,7 @@ import (
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/rand"
"k8s.io/klog/v2"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/reconcile"
@@ -127,6 +127,9 @@ const (
// HELMDef describe a workload refer to HELM
HELMDef WorkloadType = "HelmDef"
// TerraformDef describes a workload refer to Terraform
TerraformDef WorkloadType = "TerraformDef"
// ReferWorkload describe an existing workload
ReferWorkload WorkloadType = "ReferWorkload"
)
@@ -186,25 +189,25 @@ func LocateParentAppConfig(ctx context.Context, client client.Client, oamObject
}
// FetchWorkload fetch the workload that a trait refers to
func FetchWorkload(ctx context.Context, c client.Client, mLog logr.Logger, oamTrait oam.Trait) (
func FetchWorkload(ctx context.Context, c client.Client, oamTrait oam.Trait) (
*unstructured.Unstructured, error) {
var workload unstructured.Unstructured
workloadRef := oamTrait.GetWorkloadReference()
if len(workloadRef.Kind) == 0 || len(workloadRef.APIVersion) == 0 || len(workloadRef.Name) == 0 {
err := errors.New("no workload reference")
mLog.Error(err, ErrLocateWorkload)
klog.InfoS(ErrLocateWorkload, "err", err)
return nil, err
}
workload.SetAPIVersion(workloadRef.APIVersion)
workload.SetKind(workloadRef.Kind)
wn := client.ObjectKey{Name: workloadRef.Name, Namespace: oamTrait.GetNamespace()}
if err := c.Get(ctx, wn, &workload); err != nil {
mLog.Error(err, "Workload not find", "kind", workloadRef.Kind, "workload name", workloadRef.Name)
klog.InfoS("Failed to find workload", "kind", workloadRef.Kind, "workload name", workloadRef.Name,
"err", err)
return nil, err
}
mLog.Info("Get the workload the trait is pointing to", "workload name", workload.GetName(),
"workload APIVersion", workload.GetAPIVersion(), "workload Kind", workload.GetKind(), "workload UID",
workload.GetUID())
klog.InfoS("Get the workload the trait is pointing to", "workload", klog.KRef(workload.GetNamespace(), workload.GetName()),
"APIVersion", workload.GetAPIVersion(), "Kind", workload.GetKind(), "UID", workload.GetUID())
return &workload, nil
}
@@ -391,7 +394,7 @@ func checkRequestNamespaceError(err error) bool {
}
// FetchWorkloadChildResources fetch corresponding child resources given a workload
func FetchWorkloadChildResources(ctx context.Context, mLog logr.Logger, r client.Reader,
func FetchWorkloadChildResources(ctx context.Context, r client.Reader,
dm discoverymapper.DiscoveryMapper, workload *unstructured.Unstructured) ([]*unstructured.Unstructured, error) {
// Fetch the corresponding workloadDefinition CR
workloadDefinition, err := FetchWorkloadDefinition(ctx, r, dm, workload)
@@ -402,10 +405,10 @@ func FetchWorkloadChildResources(ctx context.Context, mLog logr.Logger, r client
}
return nil, err
}
return fetchChildResources(ctx, mLog, r, workload, workloadDefinition.Spec.ChildResourceKinds)
return fetchChildResources(ctx, r, workload, workloadDefinition.Spec.ChildResourceKinds)
}
func fetchChildResources(ctx context.Context, mLog logr.Logger, r client.Reader, workload *unstructured.Unstructured,
func fetchChildResources(ctx context.Context, r client.Reader, workload *unstructured.Unstructured,
wcrl []common.ChildResourceKind) ([]*unstructured.Unstructured, error) {
var childResources []*unstructured.Unstructured
// list by each child resource type with namespace and possible label selector
@@ -413,20 +416,21 @@ func fetchChildResources(ctx context.Context, mLog logr.Logger, r client.Reader,
crs := unstructured.UnstructuredList{}
crs.SetAPIVersion(wcr.APIVersion)
crs.SetKind(wcr.Kind)
mLog.Info("List child resource kind", "APIVersion", wcr.APIVersion, "Type", wcr.Kind, "owner UID",
klog.InfoS("List child resources", "apiVersion", wcr.APIVersion, "kind", wcr.Kind, "owner UID",
workload.GetUID())
if err := r.List(ctx, &crs, client.InNamespace(workload.GetNamespace()),
client.MatchingLabels(wcr.Selector)); err != nil {
mLog.Error(err, "failed to list object", "api version", crs.GetAPIVersion(), "kind", crs.GetKind())
klog.InfoS("Failed to list object", "apiVersion", crs.GetAPIVersion(), "kind", crs.GetKind(),
"err", err)
return nil, err
}
// pick the ones that is owned by the workload
for _, cr := range crs.Items {
for _, owner := range cr.GetOwnerReferences() {
if owner.UID == workload.GetUID() {
mLog.Info("Find a child resource we are looking for",
"APIVersion", cr.GetAPIVersion(), "Kind", cr.GetKind(),
"Name", cr.GetName(), "owner", owner.UID)
klog.InfoS("Find a child resource we are looking for", "child resource",
klog.KRef(cr.GetNamespace(), cr.GetName()), "apiVersion", cr.GetAPIVersion(),
"kind", cr.GetKind(), "owner", owner.UID)
or := cr // have to do a copy as the range variable is a reference and will change
childResources = append(childResources, &or)
}

View File

@@ -37,7 +37,6 @@ import (
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/apimachinery/pkg/types"
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/yaml"
@@ -179,7 +178,6 @@ func TestLocateParentAppConfig(t *testing.T) {
func TestFetchWorkloadTraitReference(t *testing.T) {
t.Log("Setting up variables")
log := ctrl.Log.WithName("ManualScalarTraitReconciler")
noRefNameTrait := v1alpha2.ManualScalerTrait{
TypeMeta: metav1.TypeMeta{
APIVersion: v1alpha2.SchemeGroupVersion.String(),
@@ -258,7 +256,7 @@ func TestFetchWorkloadTraitReference(t *testing.T) {
for name, tc := range cases {
tclient := test.NewMockClient()
tclient.MockGet = test.NewMockGetFn(nil, tc.fields.getFunc)
gotWL, err := util.FetchWorkload(ctx, tclient, log, tc.fields.trait)
gotWL, err := util.FetchWorkload(ctx, tclient, tc.fields.trait)
t.Log(fmt.Sprint("Running test: ", name))
if tc.want.err == nil {
assert.NoError(t, err)
@@ -585,7 +583,6 @@ func TestChildResources(t *testing.T) {
},
}
log := ctrl.Log.WithName("ManualScalarTraitReconciler")
crkl := []common.ChildResourceKind{
{
Kind: "Deployment",
@@ -727,7 +724,7 @@ func TestChildResources(t *testing.T) {
MockGet: test.NewMockGetFn(nil, tc.fields.getFunc),
MockList: test.NewMockListFn(nil, tc.fields.listFunc),
}
got, err := util.FetchWorkloadChildResources(ctx, log, &tclient, mock.NewMockDiscoveryMapper(), unstructuredWorkload)
got, err := util.FetchWorkloadChildResources(ctx, &tclient, mock.NewMockDiscoveryMapper(), unstructuredWorkload)
t.Log(fmt.Sprint("Running test: ", name))
assert.Equal(t, tc.want.err, err)
assert.Equal(t, tc.want.crks, got)

View File

@@ -33,6 +33,8 @@ import (
"cuelang.org/go/encoding/openapi"
"github.com/AlecAivazis/survey/v2"
"github.com/ghodss/yaml"
"github.com/hashicorp/hcl/v2/hclparse"
"github.com/oam-dev/terraform-config-inspect/tfconfig"
terraformv1beta1 "github.com/oam-dev/terraform-controller/api/v1beta1"
kruise "github.com/openkruise/kruise-api/apps/v1alpha1"
certmanager "github.com/wonderflow/cert-manager-api/pkg/apis/certmanager/v1"
@@ -190,3 +192,18 @@ func ReadYamlToObject(path string, object k8sruntime.Object) error {
}
return yaml.Unmarshal(data, object)
}
// ParseTerraformVariables get variables from Terraform Configuration
func ParseTerraformVariables(configuration string) (map[string]*tfconfig.Variable, error) {
p := hclparse.NewParser()
hclFile, diagnostic := p.ParseHCL([]byte(configuration), "")
if diagnostic != nil {
return nil, errors.New(diagnostic.Error())
}
mod := tfconfig.Module{Variables: map[string]*tfconfig.Variable{}}
diagnostic = tfconfig.LoadModuleFromFile(hclFile, &mod)
if diagnostic != nil {
return nil, errors.New(diagnostic.Error())
}
return mod.Variables, nil
}

View File

@@ -239,3 +239,75 @@ func TestRealtimePrintCommandOutput(t *testing.T) {
assert.Contains(t, string(data), hello)
os.Remove(logFile)
}
func TestParseTerraformVariables(t *testing.T) {
configuration := `
module "rds" {
source = "terraform-alicloud-modules/rds/alicloud"
engine = "MySQL"
engine_version = "8.0"
instance_type = "rds.mysql.c1.large"
instance_storage = "20"
instance_name = var.instance_name
account_name = var.account_name
password = var.password
}
output "DB_NAME" {
value = module.rds.this_db_instance_name
}
output "DB_USER" {
value = module.rds.this_db_database_account
}
output "DB_PORT" {
value = module.rds.this_db_instance_port
}
output "DB_HOST" {
value = module.rds.this_db_instance_connection_string
}
output "DB_PASSWORD" {
value = module.rds.this_db_instance_port
}
variable "instance_name" {
description = "RDS instance name"
type = string
default = "poc"
}
variable "account_name" {
description = "RDS instance user account name"
type = "string"
default = "oam"
}
variable "password" {
description = "RDS instance account password"
type = "string"
default = "xxx"
}
variable "intVar" {
type = "number"
}
variable "boolVar" {
type = "bool"
}
variable "listVar" {
type = "list"
}
variable "mapVar" {
type = "map"
}`
variables, err := ParseTerraformVariables(configuration)
assert.NoError(t, err)
_, passwordExisted := variables["password"]
assert.True(t, passwordExisted)
_, intVarExisted := variables["password"]
assert.True(t, intVarExisted)
}

View File

@@ -27,8 +27,8 @@ import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/apimachinery/pkg/types"
"k8s.io/klog/v2"
"sigs.k8s.io/controller-runtime/pkg/client"
logf "sigs.k8s.io/controller-runtime/pkg/log"
"sigs.k8s.io/controller-runtime/pkg/manager"
"sigs.k8s.io/controller-runtime/pkg/runtime/inject"
"sigs.k8s.io/controller-runtime/pkg/webhook"
@@ -54,9 +54,6 @@ type MutatingHandler struct {
Decoder *admission.Decoder
}
// log is for logging in this package.
var mutatelog = logf.Log.WithName("applicationconfiguration mutate webhook")
var _ admission.Handler = &MutatingHandler{}
// Handle handles admission requests.
@@ -69,10 +66,12 @@ func (h *MutatingHandler) Handle(ctx context.Context, req admission.Request) adm
}
// mutate the object
if err := h.Mutate(obj); err != nil {
mutatelog.Error(err, "failed to mutate the applicationConfiguration", "name", obj.Name)
klog.InfoS("Failed to mutate the applicationConfiguration", "applicationConfiguration", klog.KObj(obj),
"err", err)
return admission.Errored(http.StatusBadRequest, err)
}
mutatelog.Info("Print the mutated obj", "obj name", obj.Name, "mutated obj", string(util.JSONMarshal(obj.Spec)))
klog.InfoS("Print the mutated applicationConfiguration", "applicationConfiguration",
klog.KObj(obj), "mutated applicationConfiguration", string(util.JSONMarshal(obj.Spec)))
marshalled, err := json.Marshal(obj)
if err != nil {
@@ -81,15 +80,15 @@ func (h *MutatingHandler) Handle(ctx context.Context, req admission.Request) adm
resp := admission.PatchResponseFromRaw(req.AdmissionRequest.Object.Raw, marshalled)
if len(resp.Patches) > 0 {
mutatelog.Info("admit ApplicationConfiguration",
"namespace", obj.Namespace, "name", obj.Name, "patches", util.JSONMarshal(resp.Patches))
klog.InfoS("Admit applicationConfiguration", "applicationConfiguration", klog.KObj(obj),
"patches", util.JSONMarshal(resp.Patches))
}
return resp
}
// Mutate sets all the default value for the Component
func (h *MutatingHandler) Mutate(obj *v1alpha2.ApplicationConfiguration) error {
mutatelog.Info("mutate", "name", obj.Name)
klog.InfoS("Mutate applicationConfiguration", "applicationConfiguration", klog.KObj(obj))
for compIdx, comp := range obj.Spec.Components {
var updated bool
@@ -125,7 +124,7 @@ func (h *MutatingHandler) mutateTrait(content map[string]interface{}, compName s
if !ok {
return nil, false, fmt.Errorf("name of trait should be string instead of %s", reflect.TypeOf(content[TraitTypeField]))
}
mutatelog.Info("the trait refers to traitDefinition by name", "compName", compName, "trait name", traitType)
klog.InfoS("Trait refers to traitDefinition by name", "compName", compName, "trait name", traitType)
// Fetch the corresponding traitDefinition CR, the traitDefinition crd is cluster scoped
traitDefinition := &v1alpha2.TraitDefinition{}
if err := h.Client.Get(context.TODO(), types.NamespacedName{Name: traitType}, traitDefinition); err != nil {
@@ -154,7 +153,7 @@ func (h *MutatingHandler) mutateTrait(content map[string]interface{}, compName s
}.String()
trait.SetAPIVersion(apiVersion)
trait.SetKind(customResourceDefinition.Spec.Names.Kind)
mutatelog.Info("Set the trait GVK", "trait api version", trait.GetAPIVersion(), "trait Kind", trait.GetKind())
klog.InfoS("Set the trait GVK", "trait apiVersion", trait.GetAPIVersion(), "trait Kind", trait.GetKind())
// add traitType label
trait.SetLabels(util.MergeMapOverrideWithDst(trait.GetLabels(), map[string]string{oam.TraitTypeLabel: traitType}))
// copy back the object

View File

@@ -60,8 +60,8 @@ func (h *MutatingHandler) Handle(ctx context.Context, req admission.Request) adm
}
resp := admission.PatchResponseFromRaw(req.AdmissionRequest.Object.Raw, marshalled)
if len(resp.Patches) > 0 {
klog.V(common.LogDebugWithContent).Infof("Admit AppRollout %s/%s patches: %v", obj.Namespace, obj.Name,
util.DumpJSON(resp.Patches))
klog.V(common.LogDebugWithContent).InfoS("Admit appRollout", "appRollout", klog.KObj(obj),
"patches", util.DumpJSON(resp.Patches))
}
return resp
}

View File

@@ -25,8 +25,8 @@ import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/apimachinery/pkg/types"
"k8s.io/klog/v2"
"sigs.k8s.io/controller-runtime/pkg/client"
logf "sigs.k8s.io/controller-runtime/pkg/log"
"sigs.k8s.io/controller-runtime/pkg/manager"
"sigs.k8s.io/controller-runtime/pkg/runtime/inject"
"sigs.k8s.io/controller-runtime/pkg/webhook"
@@ -53,9 +53,6 @@ type MutatingHandler struct {
Decoder *admission.Decoder
}
// log is for logging in this package.
var mutatelog = logf.Log.WithName("component mutate webhook")
var _ admission.Handler = &MutatingHandler{}
// Handle handles admission requests.
@@ -68,10 +65,10 @@ func (h *MutatingHandler) Handle(ctx context.Context, req admission.Request) adm
}
// mutate the object
if err := h.Mutate(obj); err != nil {
mutatelog.Error(err, "failed to mutate the component", "name", obj.Name)
klog.InfoS("Failed to mutate the component", "component", klog.KObj(obj), "err", err)
return admission.Errored(http.StatusBadRequest, err)
}
mutatelog.Info("Print the mutated obj", "obj name", obj.Name, "mutated obj", string(obj.Spec.Workload.Raw))
klog.InfoS("Print the mutated obj", "obj name", obj.Name, "mutated obj", string(obj.Spec.Workload.Raw))
marshalled, err := json.Marshal(obj)
if err != nil {
@@ -80,15 +77,14 @@ func (h *MutatingHandler) Handle(ctx context.Context, req admission.Request) adm
resp := admission.PatchResponseFromRaw(req.AdmissionRequest.Object.Raw, marshalled)
if len(resp.Patches) > 0 {
mutatelog.Info("admit Component",
"namespace", obj.Namespace, "name", obj.Name, "patches", util.JSONMarshal(resp.Patches))
klog.InfoS("Admit component", "component", klog.KObj(obj), "patches", util.JSONMarshal(resp.Patches))
}
return resp
}
// Mutate sets all the default value for the Component
func (h *MutatingHandler) Mutate(obj *v1alpha2.Component) error {
mutatelog.Info("mutate", "name", obj.Name)
klog.InfoS("Mutate component", "component", klog.KObj(obj))
var content map[string]interface{}
if err := json.Unmarshal(obj.Spec.Workload.Raw, &content); err != nil {
return err
@@ -98,7 +94,7 @@ func (h *MutatingHandler) Mutate(obj *v1alpha2.Component) error {
if !ok {
return fmt.Errorf("workload content has an unknown type field")
}
mutatelog.Info("the component refers to workoadDefinition by type", "name", obj.Name, "workload type", workloadType)
klog.InfoS("Component refers to workoadDefinition by type", "name", obj.Name, "workload type", workloadType)
// Fetch the corresponding workloadDefinition CR, the workloadDefinition crd is cluster scoped
workloadDefinition := &v1alpha2.WorkloadDefinition{}
if err := h.Client.Get(context.TODO(), types.NamespacedName{Name: workloadType}, workloadDefinition); err != nil {
@@ -120,7 +116,7 @@ func (h *MutatingHandler) Mutate(obj *v1alpha2.Component) error {
}.String()
workload.SetAPIVersion(apiVersion)
workload.SetKind(gvk.Kind)
mutatelog.Info("Set the component workload GVK", "workload api version", workload.GetAPIVersion(), "workload Kind", workload.GetKind())
klog.InfoS("Set the component workload GVK", "workload apiVersion", workload.GetAPIVersion(), "workload Kind", workload.GetKind())
// copy namespace/label/annotation to the workload and add workloadType label
workload.SetNamespace(obj.GetNamespace())
workload.SetLabels(util.MergeMapOverrideWithDst(obj.GetLabels(), map[string]string{oam.WorkloadTypeLabel: workloadType}))

View File

@@ -25,7 +25,7 @@ import (
apimachineryvalidation "k8s.io/apimachinery/pkg/api/validation"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/apimachinery/pkg/util/validation/field"
logf "sigs.k8s.io/controller-runtime/pkg/log"
"k8s.io/klog/v2"
"sigs.k8s.io/controller-runtime/pkg/manager"
"sigs.k8s.io/controller-runtime/pkg/webhook"
"sigs.k8s.io/controller-runtime/pkg/webhook/admission"
@@ -45,9 +45,6 @@ type ValidatingHandler struct {
Decoder *admission.Decoder
}
// log is for logging in this package.
var validatelog = logf.Log.WithName("component validate webhook")
var _ admission.Handler = &ValidatingHandler{}
// Handle handles admission requests.
@@ -56,20 +53,20 @@ func (h *ValidatingHandler) Handle(ctx context.Context, req admission.Request) a
err := h.Decoder.Decode(req, obj)
if err != nil {
validatelog.Error(err, "decoder failed", "req operation", req.AdmissionRequest.Operation, "req",
req.AdmissionRequest)
klog.InfoS("Failed to decode component", "req operation", req.AdmissionRequest.Operation, "req",
req.AdmissionRequest, "err", err)
return admission.Denied(err.Error())
}
switch req.AdmissionRequest.Operation { //nolint:exhaustive
case admissionv1beta1.Create:
if allErrs := ValidateComponentObject(obj); len(allErrs) > 0 {
validatelog.Info("create failed", "name", obj.Name, "errMsg", allErrs.ToAggregate().Error())
klog.InfoS("Failed to create component", "component", klog.KObj(obj), "err", allErrs.ToAggregate().Error())
return admission.Denied(allErrs.ToAggregate().Error())
}
case admissionv1beta1.Update:
if allErrs := ValidateComponentObject(obj); len(allErrs) > 0 {
validatelog.Info("update failed", "name", obj.Name, "errMsg", allErrs.ToAggregate().Error())
klog.InfoS("Failed to update component", "component", klog.KObj(obj), "err", allErrs.ToAggregate().Error())
return admission.Denied(allErrs.ToAggregate().Error())
}
}
@@ -79,7 +76,7 @@ func (h *ValidatingHandler) Handle(ctx context.Context, req admission.Request) a
// ValidateComponentObject validates the Component on creation
func ValidateComponentObject(obj *v1alpha2.Component) field.ErrorList {
validatelog.Info("validate component", "name", obj.Name)
klog.InfoS("Validate component", "component", klog.KObj(obj))
allErrs := apimachineryvalidation.ValidateObjectMeta(&obj.ObjectMeta, true,
apimachineryvalidation.NameIsDNSSubdomain, field.NewPath("metadata"))
fldPath := field.NewPath("spec")

View File

@@ -21,10 +21,9 @@ import (
"encoding/json"
"net/http"
"k8s.io/klog"
"k8s.io/klog/v2"
"k8s.io/utils/pointer"
"sigs.k8s.io/controller-runtime/pkg/client"
logf "sigs.k8s.io/controller-runtime/pkg/log"
"sigs.k8s.io/controller-runtime/pkg/runtime/inject"
"sigs.k8s.io/controller-runtime/pkg/webhook/admission"
@@ -40,9 +39,6 @@ type MutatingHandler struct {
Decoder *admission.Decoder
}
// log is for logging in this package.
var mutatelog = logf.Log.WithName("PodSpecWorkload-mutate")
var _ admission.Handler = &MutatingHandler{}
// Handle handles admission requests.
@@ -61,16 +57,16 @@ func (h *MutatingHandler) Handle(ctx context.Context, req admission.Request) adm
}
resp := admission.PatchResponseFromRaw(req.AdmissionRequest.Object.Raw, marshalled)
if len(resp.Patches) > 0 {
klog.V(5).Infof("Admit PodSpecWorkload %s/%s patches: %v", obj.Namespace, obj.Name, util.DumpJSON(resp.Patches))
klog.V(5).InfoS("Admit PodSpecWorkload", "podSpecWorkload", klog.KObj(obj), "patches", util.DumpJSON(resp.Patches))
}
return resp
}
// DefaultPodSpecWorkload will set the default value for the PodSpecWorkload
func DefaultPodSpecWorkload(obj *v1alpha1.PodSpecWorkload) {
mutatelog.Info("default", "name", obj.Name)
klog.InfoS("Set the default value for the PodSpecWorkload", "podSpecWorkload", klog.KObj(obj))
if obj.Spec.Replicas == nil {
mutatelog.Info("default replicas as 1")
klog.InfoS("Set default replicas as 1")
obj.Spec.Replicas = pointer.Int32Ptr(1)
}
}

View File

@@ -23,8 +23,8 @@ import (
admissionv1beta1 "k8s.io/api/admission/v1beta1"
apimachineryvalidation "k8s.io/apimachinery/pkg/api/validation"
"k8s.io/apimachinery/pkg/util/validation/field"
"k8s.io/klog/v2"
"sigs.k8s.io/controller-runtime/pkg/client"
logf "sigs.k8s.io/controller-runtime/pkg/log"
"sigs.k8s.io/controller-runtime/pkg/runtime/inject"
"sigs.k8s.io/controller-runtime/pkg/webhook/admission"
@@ -39,9 +39,6 @@ type ValidatingHandler struct {
Decoder *admission.Decoder
}
// log is for logging in this package.
var validatelog = logf.Log.WithName("PodSpecWorkload-validate")
var _ admission.Handler = &ValidatingHandler{}
// Handle handles admission requests.
@@ -50,8 +47,8 @@ func (h *ValidatingHandler) Handle(ctx context.Context, req admission.Request) a
err := h.Decoder.Decode(req, obj)
if err != nil {
validatelog.Error(err, "decoder failed", "req operation", req.AdmissionRequest.Operation, "req",
req.AdmissionRequest)
klog.InfoS("Failed to decode", "req operation", req.AdmissionRequest.Operation, "req",
req.AdmissionRequest, "err", err)
return admission.Errored(http.StatusBadRequest, err)
}
@@ -78,7 +75,7 @@ func (h *ValidatingHandler) Handle(ctx context.Context, req admission.Request) a
// ValidateCreate validates the PodSpecWorkload on creation
func ValidateCreate(r *v1alpha1.PodSpecWorkload) field.ErrorList {
validatelog.Info("validate create", "name", r.Name)
klog.InfoS("Validate create podSpecWorkload", "podSpecWorkload", klog.KObj(r))
allErrs := apimachineryvalidation.ValidateObjectMeta(&r.ObjectMeta, true,
apimachineryvalidation.NameIsDNSSubdomain, field.NewPath("metadata"))
@@ -97,13 +94,13 @@ func ValidateCreate(r *v1alpha1.PodSpecWorkload) field.ErrorList {
// ValidateUpdate validates the PodSpecWorkload on update
func ValidateUpdate(r *v1alpha1.PodSpecWorkload, _ *v1alpha1.PodSpecWorkload) field.ErrorList {
validatelog.Info("validate update", "name", r.Name)
klog.InfoS("Validate update podSpecWorkload", "podSpecWorkload", klog.KObj(r))
return ValidateCreate(r)
}
// ValidateDelete validates the PodSpecWorkload on delete
func ValidateDelete(r *v1alpha1.PodSpecWorkload) field.ErrorList {
validatelog.Info("validate delete", "name", r.Name)
klog.InfoS("Validate delete PodSpecWorkload", "podSpecWorkload", klog.KObj(r))
return nil
}

View File

@@ -28,8 +28,6 @@ import (
"cuelang.org/go/cue"
"github.com/getkin/kin-openapi/openapi3"
"github.com/hashicorp/hcl/v2/hclparse"
"github.com/oam-dev/terraform-config-inspect/tfconfig"
"github.com/olekukonko/tablewriter"
"github.com/pkg/errors"
v1 "k8s.io/api/core/v1"
@@ -588,21 +586,6 @@ func (ref *ParseReference) parseParameters(paraValue cue.Value, paramKey string,
return nil
}
// parseTerraformVariables get variables from Terraform Configuration
func (ref *ParseReference) parseTerraformVariables(configuration string) (map[string]*tfconfig.Variable, error) {
p := hclparse.NewParser()
hclFile, diagnostic := p.ParseHCL([]byte(configuration), "")
if diagnostic != nil {
return nil, errors.New(diagnostic.Error())
}
mod := tfconfig.Module{Variables: map[string]*tfconfig.Variable{}}
diagnostic = tfconfig.LoadModuleFromFile(hclFile, &mod)
if diagnostic != nil {
return nil, errors.New(diagnostic.Error())
}
return mod.Variables, nil
}
// getCUEPrintableDefaultValue converts the value in `interface{}` type to be printable
func (ref *ParseReference) getCUEPrintableDefaultValue(v interface{}) string {
if v == nil {
@@ -729,7 +712,7 @@ func (ref *ParseReference) parseTerraformCapabilityParameters(capability types.C
writeConnectionSecretToRefReferenceParameter.Required = false
writeConnectionSecretToRefReferenceParameter.Usage = "The secret which the cloud resource connection will be written to"
variables, err := ref.parseTerraformVariables(capability.TerraformConfiguration)
variables, err := common.ParseTerraformVariables(capability.TerraformConfiguration)
if err != nil {
return nil, errors.Wrap(err, "failed to generate capability properties")
}

View File

@@ -158,6 +158,37 @@ var _ = Describe("Application Normal tests", func() {
verifyWorkloadRunningExpected("myweb", 1, "stefanprodan/podinfo:5.0.2")
})
It("Test app have component with multiple same type traits", func() {
traitDef := new(v1beta1.TraitDefinition)
Expect(common.ReadYamlToObject("testdata/app/trait_config.yaml", traitDef)).Should(BeNil())
traitDef.Namespace = namespaceName
Expect(k8sClient.Create(ctx, traitDef)).Should(BeNil())
By("apply application")
applyApp("app7.yaml")
appName := "test-worker"
By("check application status")
testApp := new(v1beta1.Application)
Eventually(func() error {
err := k8sClient.Get(ctx, client.ObjectKey{Namespace: namespaceName, Name: appName}, testApp)
if err != nil {
return err
}
if len(testApp.Status.Services) != 1 {
return fmt.Errorf("error ComponentStatus number wants %d, actually %d", 1, len(testApp.Status.Services))
}
if len(testApp.Status.Services[0].Traits) != 2 {
return fmt.Errorf("error TraitStatus number wants %d, actually %d", 2, len(testApp.Status.Services[0].Traits))
}
return nil
}, 5*time.Second).Should(BeNil())
By("check trait status")
Expect(testApp.Status.Services[0].Traits[0].Message).Should(Equal("configMap:app-file-html"))
Expect(testApp.Status.Services[0].Traits[1].Message).Should(Equal("secret:app-env-config"))
})
It("Test app have rollout-template false annotation", func() {
By("Apply an application")
var newApp v1beta1.Application

View File

@@ -83,6 +83,35 @@ var _ = Describe("Test application of the specified definition version", func()
return nil
}, 40*time.Second, time.Second).Should(BeNil())
labelV2DefRev := new(v1beta1.DefinitionRevision)
Eventually(func() error {
return k8sClient.Get(ctx, client.ObjectKey{Name: "label-v2", Namespace: namespace}, labelV2DefRev)
}, 15*time.Second, time.Second).Should(BeNil())
webserviceV1 := webServiceWithNoTemplate.DeepCopy()
webserviceV1.Spec.Schematic.CUE.Template = webServiceV1Template
webserviceV1.SetNamespace(namespace)
Expect(k8sClient.Create(ctx, webserviceV1)).Should(Succeed())
webserviceV1DefRev := new(v1beta1.DefinitionRevision)
Eventually(func() error {
return k8sClient.Get(ctx, client.ObjectKey{Name: "webservice-v1", Namespace: namespace}, webserviceV1DefRev)
}, 15*time.Second, time.Second).Should(BeNil())
webserviceV2 := new(v1beta1.ComponentDefinition)
Eventually(func() error {
err := k8sClient.Get(ctx, client.ObjectKey{Name: "webservice", Namespace: namespace}, webserviceV2)
if err != nil {
return err
}
webserviceV2.Spec.Schematic.CUE.Template = webServiceV2Template
return k8sClient.Update(ctx, webserviceV2)
}, 15*time.Second, time.Second).Should(BeNil())
webserviceV2DefRev := new(v1beta1.DefinitionRevision)
Eventually(func() error {
return k8sClient.Get(ctx, client.ObjectKey{Name: "webservice-v2", Namespace: namespace}, webserviceV2DefRev)
}, 15*time.Second, time.Second).Should(BeNil())
})
AfterEach(func() {
@@ -146,37 +175,6 @@ var _ = Describe("Test application of the specified definition version", func()
return nil
}, 40*time.Second, time.Second).Should(BeNil())
webserviceV1 := webServiceWithNoTemplate.DeepCopy()
webserviceV1.Spec.Schematic.CUE.Template = webServiceV1Template
webserviceV1.SetNamespace(namespace)
Expect(k8sClient.Create(ctx, webserviceV1)).Should(Succeed())
Eventually(func() error {
err := k8sClient.Get(ctx, client.ObjectKey{Name: "webservice", Namespace: namespace}, webserviceV1)
if err != nil {
return err
}
webserviceV1.Spec.Schematic.CUE.Template = webServiceV2Template
return k8sClient.Update(ctx, webserviceV1)
}, 15*time.Second, time.Second).Should(BeNil())
webserviceDefRevList := new(v1beta1.DefinitionRevisionList)
webserviceDefRevListOpts := []client.ListOption{
client.InNamespace(namespace),
client.MatchingLabels{
oam.LabelComponentDefinitionName: "webservice",
},
}
Eventually(func() error {
err := k8sClient.List(ctx, webserviceDefRevList, webserviceDefRevListOpts...)
if err != nil {
return err
}
if len(webserviceDefRevList.Items) != 2 {
return fmt.Errorf("error defRevison number wants %d, actually %d", 2, len(webserviceDefRevList.Items))
}
return nil
}, 40*time.Second, time.Second).Should(BeNil())
app := v1beta1.Application{
ObjectMeta: metav1.ObjectMeta{
Name: appName,
@@ -709,6 +707,91 @@ var _ = Describe("Test application of the specified definition version", func()
Expect(k8sClient.Patch(ctx, &app, client.Merge)).Should(HaveOccurred())
})
// refer to https://github.com/oam-dev/kubevela/discussions/1810#discussioncomment-914295
It("Test k8s resources created by application whether with correct label", func() {
var (
appName = "test-resources-labels"
compName = "web"
)
exposeV1 := exposeWithNoTemplate.DeepCopy()
exposeV1.Spec.Schematic.CUE.Template = exposeV1Template
exposeV1.SetNamespace(namespace)
Expect(k8sClient.Create(ctx, exposeV1)).Should(Succeed())
exposeV1DefRev := new(v1beta1.DefinitionRevision)
Eventually(func() error {
return k8sClient.Get(ctx, client.ObjectKey{Name: "expose-v1", Namespace: namespace}, exposeV1DefRev)
}, 15*time.Second, time.Second).Should(BeNil())
exposeV2 := new(v1beta1.TraitDefinition)
Eventually(func() error {
err := k8sClient.Get(ctx, client.ObjectKey{Name: "expose", Namespace: namespace}, exposeV2)
if err != nil {
return err
}
exposeV2.Spec.Schematic.CUE.Template = exposeV2Templae
return k8sClient.Update(ctx, exposeV2)
}, 15*time.Second, time.Second).Should(BeNil())
exposeV2DefRev := new(v1beta1.DefinitionRevision)
Eventually(func() error {
return k8sClient.Get(ctx, client.ObjectKey{Name: "expose-v2", Namespace: namespace}, exposeV2DefRev)
}, 15*time.Second, time.Second).Should(BeNil())
app := v1beta1.Application{
ObjectMeta: metav1.ObjectMeta{
Name: appName,
Namespace: namespace,
},
Spec: v1beta1.ApplicationSpec{
Components: []v1beta1.ApplicationComponent{
{
Name: compName,
Type: "webservice@v1",
Properties: util.Object2RawExtension(map[string]interface{}{
"image": "crccheck/hello-world",
"port": 8000,
}),
Traits: []v1beta1.ApplicationTrait{
{
Type: "expose@v1",
Properties: util.Object2RawExtension(map[string]interface{}{
"port": []int{8000},
}),
},
},
},
},
},
}
By("Create application")
Eventually(func() error {
return k8sClient.Create(ctx, app.DeepCopy())
}, 10*time.Second, 500*time.Millisecond).Should(Succeed())
By("Verify the workload(deployment) is created successfully")
webServiceDeploy := &appsv1.Deployment{}
deployName := compName
Eventually(func() error {
return k8sClient.Get(ctx, client.ObjectKey{Name: deployName, Namespace: namespace}, webServiceDeploy)
}, 30*time.Second, 3*time.Second).Should(Succeed())
By("Verify the workload label generated by KubeVela")
workloadLabel := webServiceDeploy.GetLabels()[oam.WorkloadTypeLabel]
Expect(workloadLabel).Should(Equal("webservice-v1"))
By("Verify the trait(service) is created successfully")
exposeSVC := &corev1.Service{}
Eventually(func() error {
return k8sClient.Get(ctx, client.ObjectKey{Name: compName, Namespace: namespace}, exposeSVC)
}, 30*time.Second, 3*time.Second).Should(Succeed())
By("Verify the trait label generated by KubeVela")
traitLabel := exposeSVC.GetLabels()[oam.TraitTypeLabel]
Expect(traitLabel).Should(Equal("expose-v1"))
})
})
var webServiceWithNoTemplate = &v1beta1.ComponentDefinition{
@@ -788,6 +871,23 @@ var labelWithNoTemplate = &v1beta1.TraitDefinition{
},
}
var exposeWithNoTemplate = &v1beta1.TraitDefinition{
TypeMeta: metav1.TypeMeta{
Kind: "TraitDefinition",
APIVersion: "core.oam.dev/v1beta1",
},
ObjectMeta: metav1.ObjectMeta{
Name: "expose",
},
Spec: v1beta1.TraitDefinitionSpec{
Schematic: &common.Schematic{
CUE: &common.CUE{
Template: "",
},
},
},
}
func generateTemplate(template string) runtime.RawExtension {
b, _ := yaml.YAMLToJSON([]byte(template))
return runtime.RawExtension{Raw: b}
@@ -1032,3 +1132,74 @@ spec:
- "sleep"
- "1000"
`
var exposeV1Template = `
outputs: service: {
apiVersion: "v1"
kind: "Service"
metadata:
name: context.name
spec: {
selector:
"app.oam.dev/component": context.name
ports: [
for p in parameter.port {
port: p
targetPort: p
},
]
}
}
parameter: {
// +usage=Specify the exposion ports
port: [...int]
}
`
var exposeV2Templae = `
outputs: service: {
apiVersion: "v1"
kind: "Service"
metadata:
name: context.name
spec: {
selector: {
"app.oam.dev/component": context.name
}
ports: [
for k, v in parameter.http {
port: v
targetPort: v
},
]
}
}
outputs: ingress: {
apiVersion: "networking.k8s.io/v1beta1"
kind: "Ingress"
metadata:
name: context.name
spec: {
rules: [{
host: parameter.domain
http: {
paths: [
for k, v in parameter.http {
path: k
backend: {
serviceName: context.name
servicePort: v
}
},
]
}
}]
}
}
parameter: {
domain: string
http: [string]: int
}
`

25
test/e2e-test/testdata/app/app7.yaml vendored Normal file
View File

@@ -0,0 +1,25 @@
apiVersion: core.oam.dev/v1beta1
kind: Application
metadata:
name: test-worker
spec:
components:
- name: myworker
type: worker
properties:
image: nginx
traits:
- type: config
properties:
kind: "configMap"
name: "app-file-html"
configname: "file"
data:
test: "demo-app"
- type: config
properties:
kind: "secret"
name: "app-env-config"
configname: "env"
data:
test: "TXlQQHNzMTIz"

View File

@@ -0,0 +1,45 @@
# Code generated by KubeVela templates. DO NOT EDIT.
apiVersion: core.oam.dev/v1beta1
kind: TraitDefinition
metadata:
name: config
spec:
status:
customStatus: |-
message: parameter.kind + ":" + context.outputs["\(parameter.configname)"].metadata.name
appliesToWorkloads:
- deployments.apps
podDisruptive: true
schematic:
cue:
template: |-
outputs: "\(parameter.configname)": {
if parameter.kind == "configMap" {
apiVersion: "v1"
kind: "ConfigMap"
metadata: name: parameter.name
data: {
for k, v in parameter.data {
"\(k)": v
}
}
}
if parameter.kind == "secret" {
apiVersion: "v1"
kind: "Secret"
metadata: name: parameter.name
data: {
for k, v in parameter.data {
"\(k)": v
}
}
}
}
parameter: {
kind: string
name: string
configname: string
data: [string]: string
}