mirror of
https://github.com/kubevela/kubevela.git
synced 2026-02-14 18:10:21 +00:00
Some checks failed
Webhook Upgrade Validation / webhook-upgrade-check (push) Failing after 1m46s
* exploring context data passing Signed-off-by: Amit Singh <singhamitch@outlook.com> Signed-off-by: semmet95 <singhamitch@outlook.com> Signed-off-by: Vishal Kumar <vishal210893@gmail.com> * adds output status fetch logic Signed-off-by: Amit Singh <singhamitch@outlook.com> Signed-off-by: semmet95 <singhamitch@outlook.com> Signed-off-by: Vishal Kumar <vishal210893@gmail.com> * fix: standardize import in dispatcher. Signed-off-by: Chaitanyareddy0702 <chaitanyareddy0702@gmail.com> Signed-off-by: semmet95 <singhamitch@outlook.com> Signed-off-by: Vishal Kumar <vishal210893@gmail.com> * feat: Allow traits to access workload output status in CUE context Signed-off-by: Chaitanyareddy0702 <chaitanyareddy0702@gmail.com> Signed-off-by: semmet95 <singhamitch@outlook.com> Signed-off-by: Vishal Kumar <vishal210893@gmail.com> * feat: Implement PostDispatch traits that apply after component health is confirmed. Signed-off-by: Chaitanyareddy0702 <chaitanyareddy0702@gmail.com> Signed-off-by: semmet95 <singhamitch@outlook.com> Signed-off-by: Vishal Kumar <vishal210893@gmail.com> * feat: Refactor trait handling and status propagation in application dispatch. Signed-off-by: Chaitanyareddy0702 <chaitanyareddy0702@gmail.com> Signed-off-by: semmet95 <singhamitch@outlook.com> Signed-off-by: Vishal Kumar <vishal210893@gmail.com> * fix: run make reviewable Signed-off-by: Chaitanyareddy0702 <chaitanyareddy0702@gmail.com> Signed-off-by: semmet95 <singhamitch@outlook.com> Signed-off-by: Vishal Kumar <vishal210893@gmail.com> * feat: Implement and document PostDispatch traits, applying them after component health is confirmed and guarded by a feature flag, along with new example applications. Signed-off-by: Chaitanyareddy0702 <chaitanyareddy0702@gmail.com> Signed-off-by: semmet95 <singhamitch@outlook.com> Signed-off-by: Vishal Kumar <vishal210893@gmail.com> * feat: Add comments Signed-off-by: Chaitanyareddy0702 <chaitanyareddy0702@gmail.com> Signed-off-by: semmet95 <singhamitch@outlook.com> Signed-off-by: Vishal Kumar <vishal210893@gmail.com> * Fix: Restore the status field in ctx. Signed-off-by: Vaibhav Agrawal <vaibhav.agrawal0096@gmail.com> Signed-off-by: semmet95 <singhamitch@outlook.com> Signed-off-by: Vishal Kumar <vishal210893@gmail.com> * Fix: Error for evaluating the status of the trait Signed-off-by: Chaitanyareddy0702 <chaitanyareddy0702@gmail.com> Signed-off-by: semmet95 <singhamitch@outlook.com> Signed-off-by: Vishal Kumar <vishal210893@gmail.com> * refactor: removes minor unnecessary changes Signed-off-by: Amit Singh <singhamitch@outlook.com> Signed-off-by: semmet95 <singhamitch@outlook.com> Signed-off-by: Vishal Kumar <vishal210893@gmail.com> * refactor: minor linter changes Signed-off-by: Amit Singh <singhamitch@outlook.com> Signed-off-by: Vishal Kumar <vishal210893@gmail.com> * test: Add comprehensive tests for PostDispatch traits and their status handling Signed-off-by: Reetika Malhotra <malhotra.reetika25@gmail.com> Signed-off-by: Vishal Kumar <vishal210893@gmail.com> * Fix: Increase multi-cluster test time Signed-off-by: Amit Singh <singhamitch@outlook.com> Signed-off-by: Vishal Kumar <vishal210893@gmail.com> * Chore: Add focus and print the application status Signed-off-by: Amit Singh <singhamitch@outlook.com> Signed-off-by: Vishal Kumar <vishal210893@gmail.com> * Chore: print deployment status in the multicluster test Signed-off-by: Amit Singh <singhamitch@outlook.com> Signed-off-by: Vishal Kumar <vishal210893@gmail.com> * Chore: add labels for the deployment Signed-off-by: Amit Singh <singhamitch@outlook.com> Signed-off-by: Vishal Kumar <vishal210893@gmail.com> * debugging test failure Signed-off-by: Amit Singh <singhamitch@outlook.com> Signed-off-by: Vishal Kumar <vishal210893@gmail.com> * debugging test failure by updating multi cluster ctx Signed-off-by: Amit Singh <singhamitch@outlook.com> Signed-off-by: Vishal Kumar <vishal210893@gmail.com> * undoes multi cluster ctx change Signed-off-by: Amit Singh <singhamitch@outlook.com> Signed-off-by: Vishal Kumar <vishal210893@gmail.com> * Feat: enable MultiStageComponentApply feature by default Signed-off-by: Chaitanya Reddy Onteddu <chaitanyareddy0702@gmail.com> Signed-off-by: Vishal Kumar <vishal210893@gmail.com> * Feat: implement post-dispatch traits application in workflow states Signed-off-by: Amit Singh <singhamitch@outlook.com> Signed-off-by: Vishal Kumar <vishal210893@gmail.com> * Chore: remove unnecessary blank lines in application_controller.go Signed-off-by: Amit Singh <singhamitch@outlook.com> Signed-off-by: Vishal Kumar <vishal210893@gmail.com> * Feat: enhance output readiness handling in health checks Signed-off-by: Vishal Kumar <vishal210893@gmail.com> * Feat: add logic to determine need for post-dispatch outputs in workload processing Signed-off-by: Vishal Kumar <vishal210893@gmail.com> * Feat: enhance output extraction and dependency checking for post-dispatch traits Signed-off-by: Vishal Kumar <vishal210893@gmail.com> * fix code to exclude validation of post dispatch trait in webhook Signed-off-by: Vishal Kumar <vishal210893@gmail.com> * fix code to exclude validation of post dispatch trait in webhook Signed-off-by: Vishal Kumar <vishal210893@gmail.com> * commit for running the test again Signed-off-by: Vishal Kumar <vishal210893@gmail.com> * commit for running the test again Signed-off-by: Vishal Kumar <vishal210893@gmail.com> * commit for running the test again Signed-off-by: Vishal Kumar <vishal210893@gmail.com> * triggering checks Signed-off-by: Amit Singh <singhamitch@outlook.com> * chore: adds explanation comments Signed-off-by: Amit Singh <singhamitch@outlook.com> * chore: adds errors to context Signed-off-by: Amit Singh <singhamitch@outlook.com> * chore: minor improvements Signed-off-by: Amit Singh <singhamitch@outlook.com> * fix: update output handling for pending PostDispatch traits Signed-off-by: Vishal Kumar <vishal210893@gmail.com> * fix: improve output handling for PostDispatch traits in deploy process Signed-off-by: Vishal Kumar <vishal210893@gmail.com> * fix: streamline output handling in PostDispatch process Signed-off-by: Vishal Kumar <vishal210893@gmail.com> * chore: commit to re run the pipeline Signed-off-by: Vishal Kumar <vishal210893@gmail.com> * chore: commit to re run the pipeline Signed-off-by: Vishal Kumar <vishal210893@gmail.com> * chore: commit to re run the pipeline Signed-off-by: Vishal Kumar <vishal210893@gmail.com> * fix: enhance output status handling in PostDispatch context for multi-stage support Signed-off-by: Vishal Kumar <vishal210893@gmail.com> * chore: commit to re run the pipeline Signed-off-by: Vishal Kumar <vishal210893@gmail.com> * fix: increase timeout for PostDispatch trait verification in tests Signed-off-by: Vishal Kumar <vishal210893@gmail.com> * fix: enhance output status handling in PostDispatch context for multi-stage support Signed-off-by: Vishal Kumar <vishal210893@gmail.com> * chore: commit to re run the pipeline Signed-off-by: Vishal Kumar <vishal210893@gmail.com> --------- Signed-off-by: Amit Singh <singhamitch@outlook.com> Signed-off-by: semmet95 <singhamitch@outlook.com> Signed-off-by: Vishal Kumar <vishal210893@gmail.com> Signed-off-by: Chaitanyareddy0702 <chaitanyareddy0702@gmail.com> Signed-off-by: Vaibhav Agrawal <vaibhav.agrawal0096@gmail.com> Signed-off-by: Reetika Malhotra <malhotra.reetika25@gmail.com> Signed-off-by: Chaitanya Reddy Onteddu <chaitanyareddy0702@gmail.com> Co-authored-by: Chitanya Reddy Onteddu <chaitanyareddy0702@gmail.com> Co-authored-by: Vishal Kumar <vishal210893@gmail.com>
453 lines
15 KiB
Go
453 lines
15 KiB
Go
/*
|
|
Copyright 2022 The KubeVela Authors.
|
|
|
|
Licensed under the Apache License, Version 2.0 (the "License");
|
|
you may not use this file except in compliance with the License.
|
|
You may obtain a copy of the License at
|
|
|
|
http://www.apache.org/licenses/LICENSE-2.0
|
|
|
|
Unless required by applicable law or agreed to in writing, software
|
|
distributed under the License is distributed on an "AS IS" BASIS,
|
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
See the License for the specific language governing permissions and
|
|
limitations under the License.
|
|
*/
|
|
|
|
package multicluster
|
|
|
|
import (
|
|
"context"
|
|
"fmt"
|
|
"strings"
|
|
"sync"
|
|
|
|
"cuelang.org/go/cue"
|
|
"cuelang.org/go/cue/cuecontext"
|
|
pkgmaps "github.com/kubevela/pkg/util/maps"
|
|
"github.com/kubevela/pkg/util/slices"
|
|
"github.com/kubevela/workflow/pkg/cue/model/value"
|
|
"github.com/pkg/errors"
|
|
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
|
"k8s.io/utils/ptr"
|
|
"sigs.k8s.io/controller-runtime/pkg/client"
|
|
|
|
workflowerrors "github.com/kubevela/workflow/pkg/errors"
|
|
|
|
"github.com/oam-dev/kubevela/apis/core.oam.dev/common"
|
|
"github.com/oam-dev/kubevela/apis/core.oam.dev/v1alpha1"
|
|
"github.com/oam-dev/kubevela/apis/core.oam.dev/v1beta1"
|
|
"github.com/oam-dev/kubevela/apis/types"
|
|
"github.com/oam-dev/kubevela/pkg/appfile"
|
|
"github.com/oam-dev/kubevela/pkg/oam"
|
|
pkgpolicy "github.com/oam-dev/kubevela/pkg/policy"
|
|
"github.com/oam-dev/kubevela/pkg/policy/envbinding"
|
|
"github.com/oam-dev/kubevela/pkg/resourcekeeper"
|
|
"github.com/oam-dev/kubevela/pkg/utils"
|
|
velaerrors "github.com/oam-dev/kubevela/pkg/utils/errors"
|
|
oamprovidertypes "github.com/oam-dev/kubevela/pkg/workflow/providers/types"
|
|
)
|
|
|
|
// DeployParameter is the parameter of deploy workflow step
|
|
type DeployParameter struct {
|
|
// Declare the policies that used for this deployment. If not specified, the components will be deployed to the hub cluster.
|
|
Policies []string `json:"policies,omitempty"`
|
|
// Maximum number of concurrent delivered components.
|
|
Parallelism int64 `json:"parallelism"`
|
|
// If set false, this step will apply the components with the terraform workload.
|
|
IgnoreTerraformComponent bool `json:"ignoreTerraformComponent"`
|
|
// The policies that embeds in the `deploy` step directly
|
|
InlinePolicies []v1beta1.AppPolicy `json:"inlinePolicies,omitempty"`
|
|
}
|
|
|
|
// DeployWorkflowStepExecutor executor to run deploy workflow step
|
|
type DeployWorkflowStepExecutor interface {
|
|
Deploy(ctx context.Context) (healthy bool, reason string, err error)
|
|
}
|
|
|
|
// NewDeployWorkflowStepExecutor .
|
|
func NewDeployWorkflowStepExecutor(cli client.Client, af *appfile.Appfile, apply oamprovidertypes.ComponentApply, healthCheck oamprovidertypes.ComponentHealthCheck, renderer oamprovidertypes.WorkloadRender, parameter DeployParameter) DeployWorkflowStepExecutor {
|
|
return &deployWorkflowStepExecutor{
|
|
cli: cli,
|
|
af: af,
|
|
apply: apply,
|
|
healthCheck: healthCheck,
|
|
renderer: renderer,
|
|
parameter: parameter,
|
|
}
|
|
}
|
|
|
|
type deployWorkflowStepExecutor struct {
|
|
cli client.Client
|
|
af *appfile.Appfile
|
|
apply oamprovidertypes.ComponentApply
|
|
healthCheck oamprovidertypes.ComponentHealthCheck
|
|
renderer oamprovidertypes.WorkloadRender
|
|
parameter DeployParameter
|
|
}
|
|
|
|
// Deploy execute deploy workflow step
|
|
func (executor *deployWorkflowStepExecutor) Deploy(ctx context.Context) (bool, string, error) {
|
|
policies, err := selectPolicies(executor.af.Policies, executor.parameter.Policies)
|
|
if err != nil {
|
|
return false, "", err
|
|
}
|
|
policies = append(policies, fillInlinePolicyNames(executor.parameter.InlinePolicies)...)
|
|
components, err := loadComponents(ctx, executor.renderer, executor.cli, executor.af, executor.af.Components, executor.parameter.IgnoreTerraformComponent)
|
|
if err != nil {
|
|
return false, "", err
|
|
}
|
|
|
|
// Dealing with topology, override and replication policies in order.
|
|
placements, err := pkgpolicy.GetPlacementsFromTopologyPolicies(ctx, executor.cli, executor.af.Namespace, policies, resourcekeeper.AllowCrossNamespaceResource)
|
|
if err != nil {
|
|
return false, "", err
|
|
}
|
|
components, err = overrideConfiguration(policies, components)
|
|
if err != nil {
|
|
return false, "", err
|
|
}
|
|
components, err = pkgpolicy.ReplicateComponents(policies, components)
|
|
if err != nil {
|
|
return false, "", err
|
|
}
|
|
return applyComponents(ctx, executor.apply, executor.healthCheck, components, placements, int(executor.parameter.Parallelism))
|
|
}
|
|
|
|
func selectPolicies(policies []v1beta1.AppPolicy, policyNames []string) ([]v1beta1.AppPolicy, error) {
|
|
policyMap := make(map[string]v1beta1.AppPolicy)
|
|
for _, policy := range policies {
|
|
policyMap[policy.Name] = policy
|
|
}
|
|
var selectedPolicies []v1beta1.AppPolicy
|
|
for _, policyName := range policyNames {
|
|
if policy, found := policyMap[policyName]; found {
|
|
selectedPolicies = append(selectedPolicies, policy)
|
|
} else {
|
|
return nil, errors.Errorf("policy %s not found", policyName)
|
|
}
|
|
}
|
|
return selectedPolicies, nil
|
|
}
|
|
|
|
func fillInlinePolicyNames(policies []v1beta1.AppPolicy) []v1beta1.AppPolicy {
|
|
for i := range policies {
|
|
if policies[i].Name == "" {
|
|
policies[i].Name = fmt.Sprintf("inline-%s-policy-%d", policies[i].Type, i)
|
|
}
|
|
}
|
|
return policies
|
|
}
|
|
|
|
func loadComponents(ctx context.Context, render oamprovidertypes.WorkloadRender, cli client.Client, af *appfile.Appfile, components []common.ApplicationComponent, ignoreTerraformComponent bool) ([]common.ApplicationComponent, error) {
|
|
var loadedComponents []common.ApplicationComponent
|
|
for _, comp := range components {
|
|
loadedComp, err := af.LoadDynamicComponent(ctx, cli, comp.DeepCopy())
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
if ignoreTerraformComponent {
|
|
wl, err := render(ctx, comp)
|
|
if err != nil {
|
|
return nil, errors.Wrapf(err, "failed to render component into workload")
|
|
}
|
|
if wl.CapabilityCategory == types.TerraformCategory {
|
|
continue
|
|
}
|
|
}
|
|
loadedComponents = append(loadedComponents, *loadedComp)
|
|
}
|
|
return loadedComponents, nil
|
|
}
|
|
|
|
func overrideConfiguration(policies []v1beta1.AppPolicy, components []common.ApplicationComponent) ([]common.ApplicationComponent, error) {
|
|
var err error
|
|
for _, policy := range policies {
|
|
if policy.Type == v1alpha1.OverridePolicyType {
|
|
if policy.Properties == nil {
|
|
return nil, fmt.Errorf("override policy %s must not have empty properties", policy.Name)
|
|
}
|
|
overrideSpec := &v1alpha1.OverridePolicySpec{}
|
|
if err := utils.StrictUnmarshal(policy.Properties.Raw, overrideSpec); err != nil {
|
|
return nil, errors.Wrapf(err, "failed to parse override policy %s", policy.Name)
|
|
}
|
|
components, err = envbinding.PatchComponents(components, overrideSpec.Components, overrideSpec.Selector)
|
|
if err != nil {
|
|
return nil, errors.Wrapf(err, "failed to apply override policy %s", policy.Name)
|
|
}
|
|
}
|
|
}
|
|
return components, nil
|
|
}
|
|
|
|
type valueBuilder func(s string) cue.Value
|
|
|
|
type applyTask struct {
|
|
component common.ApplicationComponent
|
|
placement v1alpha1.PlacementDecision
|
|
healthy *bool
|
|
}
|
|
|
|
func (t *applyTask) key() string {
|
|
return fmt.Sprintf("%s/%s/%s/%s", t.placement.Cluster, t.placement.Namespace, t.component.ReplicaKey, t.component.Name)
|
|
}
|
|
|
|
func (t *applyTask) varKey(v string) string {
|
|
return fmt.Sprintf("%s/%s/%s/%s", t.placement.Cluster, t.placement.Namespace, t.component.ReplicaKey, v)
|
|
}
|
|
|
|
func (t *applyTask) varKeyWithoutReplica(v string) string {
|
|
return fmt.Sprintf("%s/%s/%s/%s", t.placement.Cluster, t.placement.Namespace, "", v)
|
|
}
|
|
|
|
func (t *applyTask) getVar(from string, cache *pkgmaps.SyncMap[string, cue.Value]) cue.Value {
|
|
key := t.varKey(from)
|
|
keyWithNoReplica := t.varKeyWithoutReplica(from)
|
|
var val cue.Value
|
|
var ok bool
|
|
if val, ok = cache.Get(key); !ok {
|
|
if val, ok = cache.Get(keyWithNoReplica); !ok {
|
|
return cue.Value{}
|
|
}
|
|
}
|
|
return val
|
|
}
|
|
|
|
func (t *applyTask) fillInputs(inputs *pkgmaps.SyncMap[string, cue.Value], build valueBuilder) error {
|
|
if len(t.component.Inputs) == 0 {
|
|
return nil
|
|
}
|
|
var err error
|
|
x := component2Value(t.component, build)
|
|
for _, input := range t.component.Inputs {
|
|
var inputVal cue.Value
|
|
if inputVal = t.getVar(input.From, inputs); inputVal == (cue.Value{}) {
|
|
return fmt.Errorf("input %s is not ready", input)
|
|
}
|
|
|
|
x, err = value.SetValueByScript(x, inputVal, fieldPathToComponent(input.ParameterKey))
|
|
if err != nil {
|
|
return errors.Wrap(err, "fill value to component")
|
|
}
|
|
}
|
|
newComp, err := value2Component(x)
|
|
if err != nil {
|
|
return err
|
|
}
|
|
t.component = *newComp
|
|
return nil
|
|
}
|
|
|
|
func (t *applyTask) generateOutput(output *unstructured.Unstructured, outputs []*unstructured.Unstructured, cache *pkgmaps.SyncMap[string, cue.Value], build valueBuilder) error {
|
|
if len(t.component.Outputs) == 0 {
|
|
return nil
|
|
}
|
|
|
|
var cueString string
|
|
if output != nil {
|
|
outputJSON, err := output.MarshalJSON()
|
|
if err != nil {
|
|
return errors.Wrap(err, "marshal output")
|
|
}
|
|
cueString += fmt.Sprintf("output:%s\n", string(outputJSON))
|
|
}
|
|
componentVal := build(cueString)
|
|
|
|
for _, os := range outputs {
|
|
name := os.GetLabels()[oam.TraitResource]
|
|
if name != "" {
|
|
componentVal = componentVal.FillPath(cue.ParsePath(fmt.Sprintf("outputs.%s", name)), os.Object)
|
|
}
|
|
}
|
|
|
|
for _, o := range t.component.Outputs {
|
|
pathToSetVar := t.varKey(o.Name)
|
|
actualOutput := componentVal.LookupPath(cue.ParsePath(o.ValueFrom))
|
|
if !actualOutput.Exists() {
|
|
return workflowerrors.LookUpNotFoundErr(o.ValueFrom)
|
|
}
|
|
cache.Set(pathToSetVar, actualOutput)
|
|
}
|
|
return nil
|
|
}
|
|
|
|
func (t *applyTask) allDependsReady(healthyMap map[string]bool) bool {
|
|
for _, d := range t.component.DependsOn {
|
|
dKey := fmt.Sprintf("%s/%s/%s/%s", t.placement.Cluster, t.placement.Namespace, t.component.ReplicaKey, d)
|
|
dKeyWithoutReplica := fmt.Sprintf("%s/%s/%s/%s", t.placement.Cluster, t.placement.Namespace, "", d)
|
|
if !healthyMap[dKey] && !healthyMap[dKeyWithoutReplica] {
|
|
return false
|
|
}
|
|
}
|
|
return true
|
|
}
|
|
|
|
func (t *applyTask) allInputReady(cache *pkgmaps.SyncMap[string, cue.Value]) bool {
|
|
for _, in := range t.component.Inputs {
|
|
if val := t.getVar(in.From, cache); val == (cue.Value{}) {
|
|
return false
|
|
}
|
|
}
|
|
|
|
return true
|
|
}
|
|
|
|
type applyTaskResult struct {
|
|
healthy bool
|
|
err error
|
|
task *applyTask
|
|
// outputReady indicates whether all declared outputs are ready
|
|
outputReady bool
|
|
}
|
|
|
|
// applyComponents will apply components to placements.
|
|
// nolint:gocyclo
|
|
func applyComponents(ctx context.Context, apply oamprovidertypes.ComponentApply, healthCheck oamprovidertypes.ComponentHealthCheck, components []common.ApplicationComponent, placements []v1alpha1.PlacementDecision, parallelism int) (bool, string, error) {
|
|
var tasks []*applyTask
|
|
var cache = pkgmaps.NewSyncMap[string, cue.Value]()
|
|
rootValue := cuecontext.New().CompileString("{}")
|
|
if rootValue.Err() != nil {
|
|
return false, "", rootValue.Err()
|
|
}
|
|
var cueMutex sync.Mutex
|
|
var makeValue = func(s string) cue.Value {
|
|
cueMutex.Lock()
|
|
defer cueMutex.Unlock()
|
|
return rootValue.Context().CompileString(s)
|
|
}
|
|
|
|
taskHealthyMap := map[string]bool{}
|
|
for _, comp := range components {
|
|
for _, pl := range placements {
|
|
tasks = append(tasks, &applyTask{component: comp, placement: pl})
|
|
}
|
|
}
|
|
unhealthyResults := make([]*applyTaskResult, 0)
|
|
maxHealthCheckTimes := len(tasks)
|
|
outputNotReadyReasons := make([]string, 0)
|
|
outputsReady := true
|
|
HealthCheck:
|
|
for i := 0; i < maxHealthCheckTimes; i++ {
|
|
checkTasks := make([]*applyTask, 0)
|
|
for _, task := range tasks {
|
|
if task.healthy == nil && task.allDependsReady(taskHealthyMap) && task.allInputReady(cache) {
|
|
task.healthy = new(bool)
|
|
err := task.fillInputs(cache, makeValue)
|
|
if err != nil {
|
|
taskHealthyMap[task.key()] = false
|
|
unhealthyResults = append(unhealthyResults, &applyTaskResult{healthy: false, err: err, task: task})
|
|
continue
|
|
}
|
|
checkTasks = append(checkTasks, task)
|
|
}
|
|
}
|
|
if len(checkTasks) == 0 {
|
|
break HealthCheck
|
|
}
|
|
checkResults := slices.ParMap[*applyTask, *applyTaskResult](checkTasks, func(task *applyTask) *applyTaskResult {
|
|
healthy, _, output, outputs, err := healthCheck(ctx, task.component, nil, task.placement.Cluster, task.placement.Namespace)
|
|
task.healthy = ptr.To(healthy)
|
|
if healthy {
|
|
if errOutput := task.generateOutput(output, outputs, cache, makeValue); errOutput != nil {
|
|
var notFound workflowerrors.LookUpNotFoundErr
|
|
if errors.As(errOutput, ¬Found) && strings.HasPrefix(string(notFound), "outputs.") && len(outputs) == 0 {
|
|
// PostDispatch traits are not rendered/applied yet, so trait outputs are unavailable.
|
|
// Skip blocking the deploy step; the outputs will be populated after PostDispatch runs.
|
|
errOutput = nil
|
|
}
|
|
err = errOutput
|
|
}
|
|
}
|
|
return &applyTaskResult{healthy: healthy, err: err, task: task, outputReady: true}
|
|
}, slices.Parallelism(parallelism))
|
|
|
|
for _, res := range checkResults {
|
|
taskHealthyMap[res.task.key()] = res.healthy
|
|
if !res.outputReady {
|
|
outputsReady = false
|
|
outputNotReadyReasons = append(outputNotReadyReasons, fmt.Sprintf("%s outputs not ready", res.task.key()))
|
|
}
|
|
if !res.healthy || res.err != nil {
|
|
unhealthyResults = append(unhealthyResults, res)
|
|
}
|
|
}
|
|
}
|
|
|
|
var pendingTasks []*applyTask
|
|
var todoTasks []*applyTask
|
|
|
|
for _, task := range tasks {
|
|
if healthy, ok := taskHealthyMap[task.key()]; healthy && ok {
|
|
continue
|
|
}
|
|
if task.allDependsReady(taskHealthyMap) && task.allInputReady(cache) {
|
|
todoTasks = append(todoTasks, task)
|
|
} else {
|
|
pendingTasks = append(pendingTasks, task)
|
|
}
|
|
}
|
|
var results []*applyTaskResult
|
|
if len(todoTasks) > 0 {
|
|
results = slices.ParMap[*applyTask, *applyTaskResult](todoTasks, func(task *applyTask) *applyTaskResult {
|
|
err := task.fillInputs(cache, makeValue)
|
|
if err != nil {
|
|
return &applyTaskResult{healthy: false, err: err, task: task, outputReady: true}
|
|
}
|
|
_, _, healthy, err := apply(ctx, task.component, nil, task.placement.Cluster, task.placement.Namespace)
|
|
if err != nil {
|
|
return &applyTaskResult{healthy: healthy, err: err, task: task, outputReady: true}
|
|
}
|
|
return &applyTaskResult{healthy: healthy, err: err, task: task, outputReady: true}
|
|
}, slices.Parallelism(parallelism))
|
|
}
|
|
var errs []error
|
|
var allHealthy = true
|
|
var reasons []string
|
|
for _, res := range unhealthyResults {
|
|
if res.err != nil {
|
|
errs = append(errs, fmt.Errorf("error health check from %s: %w", res.task.key(), res.err))
|
|
}
|
|
}
|
|
for _, res := range results {
|
|
if res.err != nil {
|
|
errs = append(errs, fmt.Errorf("error encountered in cluster %s: %w", res.task.placement.Cluster, res.err))
|
|
}
|
|
if !res.healthy {
|
|
allHealthy = false
|
|
reasons = append(reasons, fmt.Sprintf("%s is not healthy", res.task.key()))
|
|
}
|
|
}
|
|
|
|
reasons = append(reasons, outputNotReadyReasons...)
|
|
|
|
for _, t := range pendingTasks {
|
|
reasons = append(reasons, fmt.Sprintf("%s is waiting dependents", t.key()))
|
|
}
|
|
|
|
return allHealthy && outputsReady && len(pendingTasks) == 0, strings.Join(reasons, ","), velaerrors.AggregateErrors(errs)
|
|
}
|
|
|
|
func fieldPathToComponent(input string) string {
|
|
return fmt.Sprintf("properties.%s", strings.TrimSpace(input))
|
|
}
|
|
|
|
func component2Value(comp common.ApplicationComponent, build valueBuilder) cue.Value {
|
|
x := build("")
|
|
x = x.FillPath(cue.ParsePath(""), comp)
|
|
// Component.ReplicaKey have no json tag, so we need to set it manually
|
|
x = x.FillPath(cue.ParsePath("replicaKey"), comp.ReplicaKey)
|
|
return x
|
|
}
|
|
|
|
func value2Component(v cue.Value) (*common.ApplicationComponent, error) {
|
|
var comp common.ApplicationComponent
|
|
err := value.UnmarshalTo(v, &comp)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
if rk, err := v.LookupPath(cue.ParsePath("replicaKey")).String(); err == nil {
|
|
comp.ReplicaKey = rk
|
|
}
|
|
return &comp, nil
|
|
}
|