mirror of
https://github.com/kubevela/kubevela.git
synced 2026-02-14 18:10:21 +00:00
add e2e test for vela init
modify e2e setup to wait oam-runtime pod running add k8sclient in e2e test refine tracking status of vela init Signed-off-by: roy wang <seiwy2010@gmail.com>
This commit is contained in:
1
Makefile
1
Makefile
@@ -75,6 +75,7 @@ docker-push:
|
||||
e2e-setup:
|
||||
ginkgo version
|
||||
ginkgo -v -r e2e/setup
|
||||
kubectl wait --for=condition=Ready pod -l app.kubernetes.io/name=vela-core,app.kubernetes.io/instance=kubevela -n vela-system --timeout=300s
|
||||
bin/vela dashboard &
|
||||
|
||||
e2e-test:
|
||||
|
||||
@@ -13,6 +13,7 @@ var (
|
||||
workloadType = "webservice"
|
||||
applicationName = "app-basic"
|
||||
traitAlias = "scale"
|
||||
appNameForInit = "initmyapp"
|
||||
)
|
||||
|
||||
var _ = ginkgo.Describe("Application", func() {
|
||||
@@ -26,7 +27,8 @@ var _ = ginkgo.Describe("Application", func() {
|
||||
e2e.TraitManualScalerAttachContext("vela attach scale trait", traitAlias, applicationName)
|
||||
e2e.ApplicationShowContext("app show", applicationName, workloadType)
|
||||
e2e.ApplicationStatusContext("app status", applicationName, workloadType)
|
||||
//TODO(roywang) fix e2e-test for 'vela comp status'
|
||||
// e2e.ApplicationCompStatusContext("comp status", applicationName, workloadType)
|
||||
e2e.ApplicationCompStatusContext("comp status", applicationName, workloadType, envName)
|
||||
e2e.ApplicationInitIntercativeCliContext("init", appNameForInit, workloadType)
|
||||
e2e.WorkloadDeleteContext("delete", applicationName)
|
||||
e2e.WorkloadDeleteContext("delete", appNameForInit)
|
||||
})
|
||||
|
||||
56
e2e/cli.go
56
e2e/cli.go
@@ -7,9 +7,17 @@ import (
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/Netflix/go-expect"
|
||||
"github.com/hinshun/vt10x"
|
||||
"github.com/onsi/ginkgo"
|
||||
"github.com/onsi/gomega"
|
||||
"github.com/onsi/gomega/gexec"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client/config"
|
||||
|
||||
oamcore "github.com/crossplane/oam-kubernetes-runtime/apis/core"
|
||||
k8sruntime "k8s.io/apimachinery/pkg/runtime"
|
||||
clientgoscheme "k8s.io/client-go/kubernetes/scheme"
|
||||
)
|
||||
|
||||
var rudrPath = GetCliBinary()
|
||||
@@ -48,9 +56,57 @@ func AsyncExec(cli string) (*gexec.Session, error) {
|
||||
return session, err
|
||||
}
|
||||
|
||||
func InteractiveExec(cli string, consoleFn func(*expect.Console)) (string, error) {
|
||||
var output []byte
|
||||
console, _, err := vt10x.NewVT10XConsole(expect.WithStdout(ginkgo.GinkgoWriter))
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
defer console.Close()
|
||||
doneC := make(chan struct{})
|
||||
|
||||
go func() {
|
||||
defer ginkgo.GinkgoRecover()
|
||||
defer close(doneC)
|
||||
consoleFn(console)
|
||||
}()
|
||||
|
||||
c := strings.Fields(cli)
|
||||
commandName := path.Join(rudrPath, c[0])
|
||||
command := exec.Command(commandName, c[1:]...)
|
||||
command.Stdin = console.Tty()
|
||||
|
||||
session, err := gexec.Start(command, console.Tty(), console.Tty())
|
||||
s := session.Wait(30 * time.Second)
|
||||
console.Tty().Close()
|
||||
<-doneC
|
||||
if err != nil {
|
||||
return string(output), err
|
||||
}
|
||||
return string(s.Out.Contents()) + string(s.Err.Contents()), nil
|
||||
}
|
||||
|
||||
func BeforeSuit() {
|
||||
_, err := Exec("vela install")
|
||||
gomega.Expect(err).ShouldNot(gomega.HaveOccurred())
|
||||
//Without this line, will hit issue like `<string>: Error: unknown command "scale" for "vela"`
|
||||
_, _ = Exec("vela system update")
|
||||
}
|
||||
|
||||
func newK8sClient() (client.Client, error) {
|
||||
conf, err := config.GetConfig()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
scheme := k8sruntime.NewScheme()
|
||||
if err := clientgoscheme.AddToScheme(scheme); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := oamcore.AddToScheme(scheme); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
k8sclient, err := client.New(conf, client.Options{Scheme: scheme})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return k8sclient, nil
|
||||
}
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
package e2e
|
||||
|
||||
import (
|
||||
ctx "context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
@@ -8,8 +9,11 @@ import (
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/Netflix/go-expect"
|
||||
corev1alpha2 "github.com/crossplane/oam-kubernetes-runtime/apis/core/v1alpha2"
|
||||
"github.com/oam-dev/kubevela/pkg/server/apis"
|
||||
"github.com/oam-dev/kubevela/pkg/server/util"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
|
||||
"github.com/onsi/ginkgo"
|
||||
"github.com/onsi/gomega"
|
||||
@@ -187,9 +191,20 @@ var (
|
||||
})
|
||||
}
|
||||
|
||||
ApplicationCompStatusContext = func(context string, applicationName string, workloadType string) bool {
|
||||
ApplicationCompStatusContext = func(context string, applicationName, workloadType, envName string) bool {
|
||||
return ginkgo.Context(context, func() {
|
||||
ginkgo.It("should get status for the component", func() {
|
||||
ginkgo.By("init new k8s client")
|
||||
k8sclient, err := newK8sClient()
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
|
||||
ginkgo.By("check AppConfig reconciled ready")
|
||||
gomega.Eventually(func() int {
|
||||
appConfig := &corev1alpha2.ApplicationConfiguration{}
|
||||
_ = k8sclient.Get(ctx.Background(), client.ObjectKey{Name: applicationName, Namespace: "default"}, appConfig)
|
||||
return len(appConfig.Status.Workloads)
|
||||
}, 120*time.Second, 1*time.Second).ShouldNot(gomega.Equal(0))
|
||||
|
||||
cli := fmt.Sprintf("vela comp status %s", applicationName)
|
||||
output, err := LongTimeExec(cli, 120*time.Second)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
@@ -212,6 +227,56 @@ var (
|
||||
})
|
||||
}
|
||||
|
||||
ApplicationInitIntercativeCliContext = func(context string, appName string, workloadType string) bool {
|
||||
return ginkgo.Context(context, func() {
|
||||
ginkgo.It("should init app through interactive questions", func() {
|
||||
cli := "vela init"
|
||||
output, err := InteractiveExec(cli, func(c *expect.Console) {
|
||||
data := []struct {
|
||||
q, a string
|
||||
}{
|
||||
{
|
||||
q: "Do you want to setup a domain for web service: ",
|
||||
a: "testdomain",
|
||||
},
|
||||
{
|
||||
q: "Provide an email for production certification: ",
|
||||
a: "test@mail",
|
||||
},
|
||||
{
|
||||
q: "What would you like to name your application: ",
|
||||
a: appName,
|
||||
},
|
||||
{
|
||||
q: "webservice",
|
||||
a: workloadType,
|
||||
},
|
||||
{
|
||||
q: "What would you name this webservice: ",
|
||||
a: "mysvc",
|
||||
},
|
||||
{
|
||||
q: "specify app image ",
|
||||
a: "nginx:latest",
|
||||
},
|
||||
{
|
||||
q: "specify port for container ",
|
||||
a: "8080",
|
||||
},
|
||||
}
|
||||
for _, qa := range data {
|
||||
_, err := c.ExpectString(qa.q)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
_, err = c.SendLine(qa.a)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
}
|
||||
_, _ = c.ExpectEOF()
|
||||
})
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
gomega.Expect(output).To(gomega.ContainSubstring("Initializing"))
|
||||
})
|
||||
})
|
||||
}
|
||||
// APIEnvInitContext used for test api env
|
||||
APIEnvInitContext = func(context string, envMeta apis.Environment) bool {
|
||||
return ginkgo.Context("Post /envs/", func() {
|
||||
|
||||
4
go.mod
4
go.mod
@@ -5,10 +5,11 @@ go 1.13
|
||||
require (
|
||||
cuelang.org/go v0.2.2
|
||||
github.com/AlecAivazis/survey/v2 v2.1.1
|
||||
github.com/Netflix/go-expect v0.0.0-20180615182759-c93bf25de8e8
|
||||
github.com/briandowns/spinner v1.11.1
|
||||
github.com/coreos/prometheus-operator v0.41.1
|
||||
github.com/crossplane/crossplane-runtime v0.9.0
|
||||
github.com/crossplane/oam-kubernetes-runtime v0.1.1-0.20200923120826-ef46dd6ddc35
|
||||
github.com/crossplane/oam-kubernetes-runtime v0.2.1
|
||||
github.com/fatih/color v1.9.0
|
||||
github.com/gertd/go-pluralize v0.1.7
|
||||
github.com/ghodss/yaml v1.0.0
|
||||
@@ -18,6 +19,7 @@ require (
|
||||
github.com/google/go-cmp v0.5.2
|
||||
github.com/google/go-github/v32 v32.1.0
|
||||
github.com/gosuri/uitable v0.0.4
|
||||
github.com/hinshun/vt10x v0.0.0-20180616224451-1954e6464174
|
||||
github.com/jetstack/cert-manager v0.14.3
|
||||
github.com/kyokomi/emoji v2.2.4+incompatible
|
||||
github.com/mholt/archiver/v3 v3.3.0
|
||||
|
||||
7
go.sum
7
go.sum
@@ -284,10 +284,8 @@ github.com/crossplane/crossplane-runtime v0.8.0/go.mod h1:gNY/21MLBaz5KNP7hmfXbB
|
||||
github.com/crossplane/crossplane-runtime v0.9.0 h1:K6/tLhXKzhsEUUddTvEWWnQLLrawWyw1ptNK7NBDpDU=
|
||||
github.com/crossplane/crossplane-runtime v0.9.0/go.mod h1:gNY/21MLBaz5KNP7hmfXbBXp8reYRbwY5B/97Kp4tgM=
|
||||
github.com/crossplane/crossplane-tools v0.0.0-20200219001116-bb8b2ce46330/go.mod h1:C735A9X0x0lR8iGVOOxb49Mt70Ua4EM2b7PGaRPBLd4=
|
||||
github.com/crossplane/oam-kubernetes-runtime v0.1.1-0.20200909070723-78b84f2c4799 h1:424LLFb7C8Qvy3wFZZ7HzmawlCeF32PNRTXXK5rKOk0=
|
||||
github.com/crossplane/oam-kubernetes-runtime v0.1.1-0.20200909070723-78b84f2c4799/go.mod h1:UZ4eXkl/e4lKrAhK81Pz1sR90wqeuE9PgdwVXr8kDgI=
|
||||
github.com/crossplane/oam-kubernetes-runtime v0.1.1-0.20200923120826-ef46dd6ddc35 h1:w2Ebvm2HPsWINOW+z4sNTh1tmA7NhYPf4nCdVLbcIT4=
|
||||
github.com/crossplane/oam-kubernetes-runtime v0.1.1-0.20200923120826-ef46dd6ddc35/go.mod h1:D+MDS5vrJZWEA5cxr5kyzCSRQwrt1hLD3ONgC7sVMmc=
|
||||
github.com/crossplane/oam-kubernetes-runtime v0.2.1 h1:C0kiSo9Tza/T+OhnjrP1yrQL+huPGxlINK7xK9VCYYo=
|
||||
github.com/crossplane/oam-kubernetes-runtime v0.2.1/go.mod h1:D+MDS5vrJZWEA5cxr5kyzCSRQwrt1hLD3ONgC7sVMmc=
|
||||
github.com/cyphar/filepath-securejoin v0.2.2 h1:jCwT2GTP+PY5nBz3c/YL5PAIbusElVrPujOBSCj8xRg=
|
||||
github.com/cyphar/filepath-securejoin v0.2.2/go.mod h1:FpkQEhXnPnOthhzymB7CGsFk2G9VLXONKD9G7QGMM+4=
|
||||
github.com/daixiang0/gci v0.0.0-20200727065011-66f1df783cb2/go.mod h1:+AV8KmHTGxxwp/pY84TLQfFKp2vuKXXJVzF3kD/hfR4=
|
||||
@@ -892,7 +890,6 @@ github.com/kylelemons/godebug v0.0.0-20160406211939-eadb3ce320cb/go.mod h1:B69LE
|
||||
github.com/kylelemons/godebug v0.0.0-20170820004349-d65d576e9348/go.mod h1:B69LEHPfb2qLo0BaaOLcbitczOKLWTsrBG9LczfCD4k=
|
||||
github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw=
|
||||
github.com/kyoh86/exportloopref v0.1.7/go.mod h1:h1rDl2Kdj97+Kwh4gdz3ujE7XHmH51Q0lUiZ1z4NLj8=
|
||||
github.com/kyokomi/emoji v1.5.1 h1:qp9dub1mW7C4MlvoRENH6EAENb9skEFOvIEbp1Waj38=
|
||||
github.com/kyokomi/emoji v2.2.4+incompatible h1:np0woGKwx9LiHAQmwZx79Oc0rHpNw3o+3evou4BEPv4=
|
||||
github.com/kyokomi/emoji v2.2.4+incompatible/go.mod h1:mZ6aGCD7yk8j6QY6KICwnZ2pxoszVseX1DNoGtU2tBA=
|
||||
github.com/lann/builder v0.0.0-20180802200727-47ae307949d0 h1:SOEGU9fKiNWd/HOJuq6+3iTQz8KNCLtVX6idSoTLdUw=
|
||||
|
||||
@@ -4,16 +4,12 @@ import (
|
||||
"context"
|
||||
"fmt"
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
"cuelang.org/go/cue"
|
||||
"github.com/AlecAivazis/survey/v2"
|
||||
"github.com/briandowns/spinner"
|
||||
"github.com/fatih/color"
|
||||
"github.com/kyokomi/emoji"
|
||||
"github.com/spf13/cobra"
|
||||
"github.com/spf13/pflag"
|
||||
"k8s.io/apimachinery/pkg/util/duration"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
|
||||
"github.com/oam-dev/kubevela/api/types"
|
||||
@@ -34,26 +30,6 @@ type appInitOptions struct {
|
||||
workloadType string
|
||||
}
|
||||
|
||||
type CompStatus int
|
||||
|
||||
const (
|
||||
compStatusInitializing CompStatus = iota
|
||||
compStatusInitFail
|
||||
compStatusInitialized
|
||||
compStatusDeploying
|
||||
compStatusDeployFail
|
||||
compStatusDeployed
|
||||
compStatusHealthChecking
|
||||
compStatusHealthCheckDone
|
||||
compStatusUnknown
|
||||
)
|
||||
|
||||
var (
|
||||
emojiSucceed = emoji.Sprint(":check_mark_button:")
|
||||
emojiFail = emoji.Sprint(":cross_mark:")
|
||||
emojiTimeout = emoji.Sprint(":heavy_exclamation_mark:")
|
||||
)
|
||||
|
||||
// NewInitCommand init application
|
||||
func NewInitCommand(c types.Args, ioStreams cmdutil.IOStreams) *cobra.Command {
|
||||
o := &appInitOptions{IOStreams: ioStreams}
|
||||
@@ -95,67 +71,24 @@ func NewInitCommand(c types.Args, ioStreams cmdutil.IOStreams) *cobra.Command {
|
||||
return err
|
||||
}
|
||||
|
||||
tInit := time.Now()
|
||||
sInit := spinner.New(spinner.CharSets[14], 100*time.Millisecond,
|
||||
spinner.WithColor("green"),
|
||||
spinner.WithFinalMSG(""),
|
||||
spinner.WithHiddenCursor(true),
|
||||
spinner.WithSuffix(color.New(color.Bold, color.FgGreen).Sprintf(" %s", "Initializing ...")))
|
||||
sInit.Start()
|
||||
TrackInitLoop:
|
||||
for {
|
||||
time.Sleep(2 * time.Second)
|
||||
if time.Since(tInit) > initTimeout {
|
||||
ioStreams.Info(red.Sprintf("\n%sInitialization Timeout After %s!", emojiTimeout, duration.HumanDuration(time.Since(tInit))))
|
||||
ioStreams.Info(red.Sprint("Please make sure oam-core-controller is installed."))
|
||||
sInit.Stop()
|
||||
return nil
|
||||
}
|
||||
initStatus, failMsg, err := trackInitializeStatus(context.Background(), o.client, o.workloadName, o.appName, o.Env)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
switch initStatus {
|
||||
case compStatusInitializing:
|
||||
continue
|
||||
case compStatusInitialized:
|
||||
ioStreams.Info(green.Sprintf("\n%sInitialization Succeed!", emojiSucceed))
|
||||
sInit.Stop()
|
||||
break TrackInitLoop
|
||||
case compStatusInitFail:
|
||||
ioStreams.Info(red.Sprintf("\n%sInitialization Failed!", emojiFail))
|
||||
ioStreams.Info(red.Sprintf("Reason: %s", failMsg))
|
||||
sInit.Stop()
|
||||
return nil
|
||||
}
|
||||
ctx := context.Background()
|
||||
initStatus, err := printTrackingInitStatus(ctx, o.client, o.IOStreams, o.workloadName, o.appName, o.Env)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if initStatus != compStatusInitialized {
|
||||
return nil
|
||||
}
|
||||
|
||||
sDeploy := spinner.New(spinner.CharSets[14], 100*time.Millisecond,
|
||||
spinner.WithColor("green"),
|
||||
spinner.WithHiddenCursor(true),
|
||||
spinner.WithSuffix(color.New(color.Bold, color.FgGreen).Sprintf(" %s", "Deploying ...")))
|
||||
sDeploy.Start()
|
||||
TrackDeployLoop:
|
||||
for {
|
||||
time.Sleep(2 * time.Second)
|
||||
deployStatus, failMsg, err := trackDeployStatus(context.Background(), o.client, o.workloadName, o.appName, o.Env)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
switch deployStatus {
|
||||
case compStatusDeploying:
|
||||
continue
|
||||
case compStatusDeployed:
|
||||
ioStreams.Info(green.Sprintf("\n%sDeployment Succeed!", emojiSucceed))
|
||||
sDeploy.Stop()
|
||||
break TrackDeployLoop
|
||||
case compStatusDeployFail:
|
||||
ioStreams.Info(red.Sprintf("\n%sDeployment Failed!", emojiFail))
|
||||
ioStreams.Info(red.Sprintf("Reason: %s", failMsg))
|
||||
sDeploy.Stop()
|
||||
return nil
|
||||
}
|
||||
deployStatus, err := printTrackingDeployStatus(ctx, o.client, o.IOStreams, o.workloadName, o.appName, o.Env)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if deployStatus != compStatusDeployed {
|
||||
return nil
|
||||
}
|
||||
|
||||
//TODO(wonderflow) Wait for app running, and print trait info such as route, domain
|
||||
return printComponentStatus(context.Background(), o.client, o.IOStreams, o.workloadName, o.appName, o.Env)
|
||||
},
|
||||
}
|
||||
|
||||
@@ -14,11 +14,13 @@ import (
|
||||
"github.com/fatih/color"
|
||||
"github.com/ghodss/yaml"
|
||||
"github.com/gosuri/uitable"
|
||||
"github.com/kyokomi/emoji"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/spf13/cobra"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||
"k8s.io/apimachinery/pkg/util/duration"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
|
||||
"github.com/oam-dev/kubevela/api/types"
|
||||
@@ -43,17 +45,36 @@ const (
|
||||
HealthStatusUnknown = v1alpha2.StatusUnknown
|
||||
)
|
||||
|
||||
const (
|
||||
ErrNotLoadAppConfig = "cannot load the application"
|
||||
ErrFmtNotInitialized = "oam-core-controller cannot initilize the component: %s"
|
||||
)
|
||||
|
||||
// WorkloadHealthCondition holds health status of any resource
|
||||
type WorkloadHealthCondition = v1alpha2.WorkloadHealthCondition
|
||||
|
||||
// ScopeHealthCondition holds health condition of a scope
|
||||
type ScopeHealthCondition = v1alpha2.ScopeHealthCondition
|
||||
|
||||
var (
|
||||
kindHealthScope = reflect.TypeOf(v1alpha2.HealthScope{}).Name()
|
||||
)
|
||||
|
||||
// CompStatus represents the status of a component during "vela init"
|
||||
type CompStatus int
|
||||
|
||||
const (
|
||||
compStatusInitializing CompStatus = iota
|
||||
compStatusInitFail
|
||||
compStatusInitialized
|
||||
compStatusDeploying
|
||||
compStatusDeployFail
|
||||
compStatusDeployed
|
||||
compStatusHealthChecking
|
||||
compStatusHealthCheckDone
|
||||
compStatusUnknown
|
||||
)
|
||||
|
||||
const (
|
||||
ErrNotLoadAppConfig = "cannot load the application"
|
||||
ErrFmtNotInitialized = "oam-core-controller cannot initilize the component: %s"
|
||||
)
|
||||
|
||||
const (
|
||||
firstElemPrefix = `├─`
|
||||
lastElemPrefix = `└─`
|
||||
@@ -70,10 +91,13 @@ var (
|
||||
)
|
||||
|
||||
var (
|
||||
kindHealthScope = reflect.TypeOf(v1alpha2.HealthScope{}).Name()
|
||||
emojiSucceed = emoji.Sprint(":check_mark_button:")
|
||||
emojiFail = emoji.Sprint(":cross_mark:")
|
||||
emojiTimeout = emoji.Sprint(":heavy_exclamation_mark:")
|
||||
)
|
||||
|
||||
const (
|
||||
trackingInterval time.Duration = 1 * time.Second
|
||||
initTimeout time.Duration = 30 * time.Second
|
||||
deployTimeout time.Duration = 30 * time.Second
|
||||
healthCheckBufferTime time.Duration = 120 * time.Second
|
||||
@@ -226,7 +250,7 @@ func NewCompStatusCommand(c types.Args, ioStreams cmdutil.IOStreams) *cobra.Comm
|
||||
}
|
||||
|
||||
func printComponentStatus(ctx context.Context, c client.Client, ioStreams cmdutil.IOStreams, compName, appName string, env *types.EnvMeta) error {
|
||||
app, appConfig, err := getAppAndApplicationConfiguration(ctx, c, compName, appName, env)
|
||||
app, appConfig, err := getApp(ctx, c, compName, appName, env)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -236,25 +260,29 @@ func printComponentStatus(ctx context.Context, c client.Client, ioStreams cmduti
|
||||
|
||||
wlStatus, foundWlStatus := getWorkloadStatusFromAppConfig(appConfig, compName)
|
||||
if !foundWlStatus {
|
||||
ioStreams.Info("\nThe component has not been initialized by oam-core-controller correctly.")
|
||||
appConfigReconcileStatus := appConfig.Status.GetCondition(runtimev1alpha1.TypeSynced).Status
|
||||
switch appConfigReconcileStatus {
|
||||
case corev1.ConditionUnknown:
|
||||
ioStreams.Info("\nUnknown error occurs during component initialization. \nPlease check OAM controller ...")
|
||||
case corev1.ConditionTrue:
|
||||
ioStreams.Info("\nThe component is still under initialization, please try again later ...")
|
||||
case corev1.ConditionFalse:
|
||||
appConfigConditionMsg := appConfig.Status.GetCondition(runtimev1alpha1.TypeSynced).Message
|
||||
ioStreams.Info("\nError occurs in OAM runtime during component initialization.")
|
||||
ioStreams.Infof("\nOAM controller condition message: %s \n", appConfigConditionMsg)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
var (
|
||||
healthColor *color.Color
|
||||
healthStatus HealthStatus
|
||||
healthInfo string
|
||||
workloadType string
|
||||
)
|
||||
var healthInfo string
|
||||
var healthStatus HealthStatus
|
||||
|
||||
sHealthCheck := spinner.New(spinner.CharSets[14], 100*time.Millisecond,
|
||||
spinner.WithColor("green"),
|
||||
spinner.WithSuffix(color.New(color.Bold, color.FgGreen).Sprintf(" %s", "Checking health status ...")))
|
||||
sHealthCheck := newTrackingSpinner("Checking health status ...")
|
||||
sHealthCheck.Start()
|
||||
|
||||
HealthCheckLoop:
|
||||
for {
|
||||
time.Sleep(2 * time.Second)
|
||||
time.Sleep(trackingInterval)
|
||||
var healthcheckStatus CompStatus
|
||||
healthcheckStatus, healthStatus, healthInfo, err = trackHealthCheckingStatus(ctx, c, compName, appName, env)
|
||||
if err != nil {
|
||||
@@ -267,10 +295,11 @@ HealthCheckLoop:
|
||||
break HealthCheckLoop
|
||||
}
|
||||
}
|
||||
|
||||
ioStreams.Infof("Showing status of Component %s deployed in Environment %s\n", compName, env.Name)
|
||||
ioStreams.Infof(white.Sprint("Component Status:\n"))
|
||||
workloadType = wlStatus.Reference.Kind
|
||||
healthColor = getHealthStatusColor(healthStatus)
|
||||
workloadType := wlStatus.Reference.Kind
|
||||
healthColor := getHealthStatusColor(healthStatus)
|
||||
healthInfo = strings.ReplaceAll(healthInfo, "\n", "\n\t") // formart healthInfo output
|
||||
ioStreams.Infof("\tName: %s %s(type) %s %s\n",
|
||||
compName, workloadType, healthColor.Sprint(healthStatus), healthColor.Sprint(healthInfo))
|
||||
@@ -313,38 +342,6 @@ HealthCheckLoop:
|
||||
return nil
|
||||
}
|
||||
|
||||
func printPrefix(p string) string {
|
||||
if strings.HasSuffix(p, firstElemPrefix) {
|
||||
p = strings.Replace(p, firstElemPrefix, pipe, strings.Count(p, firstElemPrefix)-1)
|
||||
} else {
|
||||
p = strings.ReplaceAll(p, firstElemPrefix, pipe)
|
||||
}
|
||||
|
||||
if strings.HasSuffix(p, lastElemPrefix) {
|
||||
p = strings.Replace(p, lastElemPrefix, strings.Repeat(" ", len([]rune(lastElemPrefix))), strings.Count(p, lastElemPrefix)-1)
|
||||
} else {
|
||||
p = strings.ReplaceAll(p, lastElemPrefix, strings.Repeat(" ", len([]rune(lastElemPrefix))))
|
||||
}
|
||||
return p
|
||||
}
|
||||
|
||||
func getHealthStatusColor(s HealthStatus) *color.Color {
|
||||
var c *color.Color
|
||||
switch s {
|
||||
case HealthStatusHealthy:
|
||||
c = green
|
||||
case HealthStatusUnhealthy:
|
||||
c = red
|
||||
case HealthStatusUnknown:
|
||||
c = yellow
|
||||
case HealthStatusNotDiagnosed:
|
||||
c = yellow
|
||||
default:
|
||||
c = red
|
||||
}
|
||||
return c
|
||||
}
|
||||
|
||||
func getWorkloadInstanceStatusAndCreationTime(ctx context.Context, c client.Client, ns string, wlRef runtimev1alpha1.TypedReference) (string, bool, metav1.Time, error) {
|
||||
wlUnstruct := unstructured.Unstructured{}
|
||||
wlUnstruct.SetGroupVersionKind(wlRef.GroupVersionKind())
|
||||
@@ -365,8 +362,43 @@ func getWorkloadInstanceStatusAndCreationTime(ctx context.Context, c client.Clie
|
||||
return "", false, ct, nil
|
||||
}
|
||||
|
||||
func printTrackingInitStatus(ctx context.Context, c client.Client, ioStreams cmdutil.IOStreams, compName, appName string, env *types.EnvMeta) (CompStatus, error) {
|
||||
tInit := time.Now()
|
||||
sInit := newTrackingSpinner("Initializing ...")
|
||||
sInit.Start()
|
||||
TrackInitLoop:
|
||||
for {
|
||||
time.Sleep(trackingInterval)
|
||||
if time.Since(tInit) > initTimeout {
|
||||
ioStreams.Info(red.Sprintf("\n%sInitialization Timeout After %s!",
|
||||
emojiTimeout, duration.HumanDuration(time.Since(tInit))))
|
||||
ioStreams.Info(red.Sprint("Please make sure oam-core-controller is installed."))
|
||||
sInit.Stop()
|
||||
return compStatusUnknown, nil
|
||||
}
|
||||
initStatus, failMsg, err := trackInitializeStatus(ctx, c, compName, appName, env)
|
||||
if err != nil {
|
||||
return compStatusUnknown, err
|
||||
}
|
||||
switch initStatus {
|
||||
case compStatusInitializing:
|
||||
continue
|
||||
case compStatusInitialized:
|
||||
ioStreams.Info(green.Sprintf("\n%sInitialization Succeed!", emojiSucceed))
|
||||
sInit.Stop()
|
||||
break TrackInitLoop
|
||||
case compStatusInitFail:
|
||||
ioStreams.Info(red.Sprintf("\n%sInitialization Failed!", emojiFail))
|
||||
ioStreams.Info(red.Sprintf("Reason: %s", failMsg))
|
||||
sInit.Stop()
|
||||
return compStatusInitFail, nil
|
||||
}
|
||||
}
|
||||
return compStatusInitialized, nil
|
||||
}
|
||||
|
||||
func trackInitializeStatus(ctx context.Context, c client.Client, compName, appName string, env *types.EnvMeta) (CompStatus, string, error) {
|
||||
app, appConfig, err := getAppAndApplicationConfiguration(ctx, c, compName, appName, env)
|
||||
app, appConfig, err := getApp(ctx, c, compName, appName, env)
|
||||
if err != nil {
|
||||
return compStatusUnknown, "", err
|
||||
}
|
||||
@@ -390,8 +422,35 @@ func trackInitializeStatus(ctx context.Context, c client.Client, compName, appNa
|
||||
return compStatusInitializing, "", nil
|
||||
}
|
||||
|
||||
func printTrackingDeployStatus(ctx context.Context, c client.Client, ioStreams cmdutil.IOStreams, compName, appName string, env *types.EnvMeta) (CompStatus, error) {
|
||||
sDeploy := newTrackingSpinner("Deploying ...")
|
||||
sDeploy.Start()
|
||||
TrackDeployLoop:
|
||||
for {
|
||||
time.Sleep(trackingInterval)
|
||||
deployStatus, failMsg, err := trackDeployStatus(ctx, c, compName, appName, env)
|
||||
if err != nil {
|
||||
return compStatusUnknown, err
|
||||
}
|
||||
switch deployStatus {
|
||||
case compStatusDeploying:
|
||||
continue
|
||||
case compStatusDeployed:
|
||||
ioStreams.Info(green.Sprintf("\n%sDeployment Succeed!", emojiSucceed))
|
||||
sDeploy.Stop()
|
||||
break TrackDeployLoop
|
||||
case compStatusDeployFail:
|
||||
ioStreams.Info(red.Sprintf("\n%sDeployment Failed!", emojiFail))
|
||||
ioStreams.Info(red.Sprintf("Reason: %s", failMsg))
|
||||
sDeploy.Stop()
|
||||
return compStatusDeployFail, nil
|
||||
}
|
||||
}
|
||||
return compStatusDeployed, nil
|
||||
}
|
||||
|
||||
func trackDeployStatus(ctx context.Context, c client.Client, compName, appName string, env *types.EnvMeta) (CompStatus, string, error) {
|
||||
app, appConfig, err := getAppAndApplicationConfiguration(ctx, c, compName, appName, env)
|
||||
app, appConfig, err := getApp(ctx, c, compName, appName, env)
|
||||
if err != nil {
|
||||
return compStatusUnknown, "", err
|
||||
}
|
||||
@@ -407,17 +466,19 @@ func trackDeployStatus(ctx context.Context, c client.Client, compName, appName s
|
||||
}
|
||||
wlRef := wlStatus.Reference
|
||||
|
||||
//TODO(roywang) temporarily use status to judge workload controller is running
|
||||
// even not every workload has `status` field
|
||||
//TODO(roywang) check whether traits are ready
|
||||
_, foundStatus, ct, err := getWorkloadInstanceStatusAndCreationTime(ctx, c, env.Namespace, wlRef)
|
||||
if err != nil {
|
||||
return compStatusUnknown, "", err
|
||||
}
|
||||
if foundStatus {
|
||||
//TODO(roywang) temporarily use status to judge workload controller is running
|
||||
// even not every workload has `status` field
|
||||
return compStatusDeployed, "", nil
|
||||
}
|
||||
|
||||
// use age to judge whether the worload controller is running
|
||||
// if not found workload status in AppConfig
|
||||
// then use age to check whether the worload controller is running
|
||||
if time.Since(ct.Time) > deployTimeout {
|
||||
return compStatusDeployFail, fmt.Sprintf("The controller of [%s] is not installed or running.",
|
||||
wlStatus.Reference.GroupVersionKind().String()), nil
|
||||
@@ -426,7 +487,7 @@ func trackDeployStatus(ctx context.Context, c client.Client, compName, appName s
|
||||
}
|
||||
|
||||
func trackHealthCheckingStatus(ctx context.Context, c client.Client, compName, appName string, env *types.EnvMeta) (CompStatus, HealthStatus, string, error) {
|
||||
app, appConfig, err := getAppAndApplicationConfiguration(ctx, c, compName, appName, env)
|
||||
app, appConfig, err := getApp(ctx, c, compName, appName, env)
|
||||
if err != nil {
|
||||
return compStatusUnknown, HealthStatusNotDiagnosed, "", err
|
||||
}
|
||||
@@ -488,7 +549,7 @@ func trackHealthCheckingStatus(ctx context.Context, c client.Client, compName, a
|
||||
return compStatusHealthCheckDone, healthStatus, wlhc.Diagnosis, nil
|
||||
}
|
||||
|
||||
func getAppAndApplicationConfiguration(ctx context.Context, c client.Client, compName, appName string, env *types.EnvMeta) (*application.Application, *v1alpha2.ApplicationConfiguration, error) {
|
||||
func getApp(ctx context.Context, c client.Client, compName, appName string, env *types.EnvMeta) (*application.Application, *v1alpha2.ApplicationConfiguration, error) {
|
||||
var app *application.Application
|
||||
var err error
|
||||
if appName != "" {
|
||||
@@ -522,3 +583,45 @@ func getWorkloadStatusFromAppConfig(appConfig *v1alpha2.ApplicationConfiguration
|
||||
}
|
||||
return wlStatus, foundWlStatus
|
||||
}
|
||||
|
||||
func newTrackingSpinner(suffix string) *spinner.Spinner {
|
||||
suffixColor := color.New(color.Bold, color.FgGreen)
|
||||
return spinner.New(
|
||||
spinner.CharSets[14],
|
||||
100*time.Millisecond,
|
||||
spinner.WithColor("green"),
|
||||
spinner.WithHiddenCursor(true),
|
||||
spinner.WithSuffix(suffixColor.Sprintf(" %s", suffix)))
|
||||
}
|
||||
|
||||
func printPrefix(p string) string {
|
||||
if strings.HasSuffix(p, firstElemPrefix) {
|
||||
p = strings.Replace(p, firstElemPrefix, pipe, strings.Count(p, firstElemPrefix)-1)
|
||||
} else {
|
||||
p = strings.ReplaceAll(p, firstElemPrefix, pipe)
|
||||
}
|
||||
|
||||
if strings.HasSuffix(p, lastElemPrefix) {
|
||||
p = strings.Replace(p, lastElemPrefix, strings.Repeat(" ", len([]rune(lastElemPrefix))), strings.Count(p, lastElemPrefix)-1)
|
||||
} else {
|
||||
p = strings.ReplaceAll(p, lastElemPrefix, strings.Repeat(" ", len([]rune(lastElemPrefix))))
|
||||
}
|
||||
return p
|
||||
}
|
||||
|
||||
func getHealthStatusColor(s HealthStatus) *color.Color {
|
||||
var c *color.Color
|
||||
switch s {
|
||||
case HealthStatusHealthy:
|
||||
c = green
|
||||
case HealthStatusUnhealthy:
|
||||
c = red
|
||||
case HealthStatusUnknown:
|
||||
c = yellow
|
||||
case HealthStatusNotDiagnosed:
|
||||
c = yellow
|
||||
default:
|
||||
c = red
|
||||
}
|
||||
return c
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user