mirror of
https://github.com/kubevela/kubevela.git
synced 2026-02-14 18:10:21 +00:00
Feat: refactor CLI commands related to resources (#4500)
* Feat: refactor CLI commands related to resources Signed-off-by: barnettZQG <barnett.zqg@gmail.com> * Fix: remove the old test case. Signed-off-by: barnettZQG <barnett.zqg@gmail.com> * Fix: e2e test Signed-off-by: barnettZQG <barnett.zqg@gmail.com> * Fix: optimize test cases Signed-off-by: barnettZQG <barnett.zqg@gmail.com> * Feat: rename 'vela pods' to 'vela status --pod' Signed-off-by: barnettZQG <barnett.zqg@gmail.com> * Feat: optimize the e2e test case Signed-off-by: barnettZQG <barnett.zqg@gmail.com> * Fix: sort the objects Signed-off-by: barnettZQG <barnett.zqg@gmail.com> * Fix: optimize the e2e test case Signed-off-by: barnettZQG <barnett.zqg@gmail.com> * Fix: list the pod by the labels Signed-off-by: barnettZQG <barnett.zqg@gmail.com> * Fix: order the tree resource Signed-off-by: barnettZQG <barnett.zqg@gmail.com> * Fix: set multicluster config Signed-off-by: barnettZQG <barnett.zqg@gmail.com>
This commit is contained in:
75
charts/vela-core/templates/velaql/component-pod.yaml
Normal file
75
charts/vela-core/templates/velaql/component-pod.yaml
Normal file
@@ -0,0 +1,75 @@
|
||||
apiVersion: v1
|
||||
data:
|
||||
template: |
|
||||
import (
|
||||
"vela/ql"
|
||||
)
|
||||
|
||||
parameter: {
|
||||
appName: string
|
||||
appNs: string
|
||||
name?: string
|
||||
cluster?: string
|
||||
clusterNs?: string
|
||||
}
|
||||
|
||||
result: ql.#CollectPods & {
|
||||
app: {
|
||||
name: parameter.appName
|
||||
namespace: parameter.appNs
|
||||
filter: {
|
||||
if parameter.cluster != _|_ {
|
||||
cluster: parameter.cluster
|
||||
}
|
||||
if parameter.clusterNs != _|_ {
|
||||
clusterNamespace: parameter.clusterNs
|
||||
}
|
||||
if parameter.name != _|_ {
|
||||
components: [parameter.name]
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if result.err == _|_ {
|
||||
status: {
|
||||
podList: [ for pod in result.list if pod.object != _|_ {
|
||||
cluster: pod.cluster
|
||||
workload: pod.workload
|
||||
component: pod.component
|
||||
metadata: {
|
||||
name: pod.object.metadata.name
|
||||
namespace: pod.object.metadata.namespace
|
||||
creationTime: pod.object.metadata.creationTimestamp
|
||||
labels: pod.object.metadata.labels
|
||||
version: {
|
||||
if pod.publishVersion != _|_ {
|
||||
publishVersion: pod.publishVersion
|
||||
}
|
||||
if pod.deployVersion != _|_ {
|
||||
deployVersion: pod.deployVersion
|
||||
}
|
||||
}
|
||||
}
|
||||
status: {
|
||||
phase: pod.object.status.phase
|
||||
// refer to https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#pod-phase
|
||||
if phase != "Pending" && phase != "Unknown" {
|
||||
podIP: pod.object.status.podIP
|
||||
hostIP: pod.object.status.hostIP
|
||||
nodeName: pod.object.spec.nodeName
|
||||
}
|
||||
}
|
||||
}]
|
||||
}
|
||||
}
|
||||
|
||||
if result.err != _|_ {
|
||||
status: {
|
||||
error: result.err
|
||||
}
|
||||
}
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: component-pod-view
|
||||
namespace: {{ include "systemDefinitionNamespace" . }}
|
||||
48
charts/vela-core/templates/velaql/component-service.yaml
Normal file
48
charts/vela-core/templates/velaql/component-service.yaml
Normal file
@@ -0,0 +1,48 @@
|
||||
apiVersion: v1
|
||||
data:
|
||||
template: |
|
||||
import (
|
||||
"vela/ql"
|
||||
)
|
||||
|
||||
parameter: {
|
||||
appName: string
|
||||
appNs: string
|
||||
name?: string
|
||||
cluster?: string
|
||||
clusterNs?: string
|
||||
}
|
||||
|
||||
result: ql.#CollectServices & {
|
||||
app: {
|
||||
name: parameter.appName
|
||||
namespace: parameter.appNs
|
||||
filter: {
|
||||
if parameter.cluster != _|_ {
|
||||
cluster: parameter.cluster
|
||||
}
|
||||
if parameter.clusterNs != _|_ {
|
||||
clusterNamespace: parameter.clusterNs
|
||||
}
|
||||
if parameter.name != _|_ {
|
||||
components: [parameter.name]
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if result.err == _|_ {
|
||||
status: {
|
||||
services: result.list
|
||||
}
|
||||
}
|
||||
|
||||
if result.err != _|_ {
|
||||
status: {
|
||||
error: result.err
|
||||
}
|
||||
}
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: component-service-view
|
||||
namespace: {{ include "systemDefinitionNamespace" . }}
|
||||
@@ -42,10 +42,10 @@ var (
|
||||
applicationName = "app-basic"
|
||||
traitAlias = "scaler"
|
||||
appNameForInit = "initmyapp"
|
||||
jsonAppFile = `{"name":"nginx-vela","services":{"nginx":{"type":"webservice","image":"nginx:1.9.4","port":80}}}`
|
||||
testDeleteJsonAppFile = `{"name":"test-vela-delete","services":{"nginx-test":{"type":"webservice","image":"nginx:1.9.4","port":80}}}`
|
||||
appbasicJsonAppFile = `{"name":"app-basic","services":{"app-basic":{"type":"webservice","image":"nginx:1.9.4","port":80}}}`
|
||||
appbasicAddTraitJsonAppFile = `{"name":"app-basic","services":{"app-basic":{"type":"webservice","image":"nginx:1.9.4","port":80,"scaler":{"replicas":2}}}}`
|
||||
jsonAppFile = `{"name":"nginx-vela","services":{"nginx":{"type":"webservice","image":"nginx:1.9.4","ports":[{port: 80, expose: true}]}}}`
|
||||
testDeleteJsonAppFile = `{"name":"test-vela-delete","services":{"nginx-test":{"type":"webservice","image":"nginx:1.9.4","ports":[{port: 80, expose: true}]}}}`
|
||||
appbasicJsonAppFile = `{"name":"app-basic","services":{"app-basic":{"type":"webservice","image":"nginx:1.9.4","ports":[{port: 80, expose: true}]}}}`
|
||||
appbasicAddTraitJsonAppFile = `{"name":"app-basic","services":{"app-basic":{"type":"webservice","image":"nginx:1.9.4","ports":[{port: 80, expose: true}],"scaler":{"replicas":2}}}}`
|
||||
velaQL = "test-component-pod-view{appNs=default,appName=nginx-vela,name=nginx}"
|
||||
)
|
||||
|
||||
@@ -303,7 +303,7 @@ var VelaQLPodListContext = func(context string, velaQL string) bool {
|
||||
gomega.Expect(v.Workload.ApiVersion).To(gomega.ContainSubstring("apps/v1"))
|
||||
}
|
||||
if v.Workload.Kind != "" {
|
||||
gomega.Expect(v.Workload.Kind).To(gomega.ContainSubstring("Deployment"))
|
||||
gomega.Expect(v.Workload.Kind).To(gomega.ContainSubstring("ReplicaSet"))
|
||||
}
|
||||
}
|
||||
})
|
||||
|
||||
@@ -7,7 +7,6 @@ data:
|
||||
template: |
|
||||
import (
|
||||
"vela/ql"
|
||||
"vela/op"
|
||||
)
|
||||
|
||||
parameter: {
|
||||
@@ -18,7 +17,7 @@ data:
|
||||
clusterNs?: string
|
||||
}
|
||||
|
||||
application: ql.#ListResourcesInApp & {
|
||||
result: ql.#CollectPods & {
|
||||
app: {
|
||||
name: parameter.appName
|
||||
namespace: parameter.appNs
|
||||
@@ -36,63 +35,41 @@ data:
|
||||
}
|
||||
}
|
||||
|
||||
if application.err != _|_ {
|
||||
status: error: application.err
|
||||
}
|
||||
|
||||
if application.err == _|_ {
|
||||
resources: application.list
|
||||
|
||||
podsMap: op.#Steps & {
|
||||
for i, resource in resources {
|
||||
"\(i)": ql.#CollectPods & {
|
||||
value: resource.object
|
||||
cluster: resource.cluster
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
podsWithCluster: [ for i, pods in podsMap for podObj in pods.list {
|
||||
cluster: pods.cluster
|
||||
obj: podObj
|
||||
}]
|
||||
|
||||
podStatus: op.#Steps & {
|
||||
for i, pod in podsWithCluster {
|
||||
"\(i)": op.#Steps & {
|
||||
name: pod.obj.metadata.name
|
||||
containers: {for container in pod.obj.status.containerStatuses {
|
||||
"\(container.name)": {
|
||||
image: container.image
|
||||
state: container.state
|
||||
}
|
||||
}}
|
||||
events: ql.#SearchEvents & {
|
||||
value: pod.obj
|
||||
cluster: pod.cluster
|
||||
}
|
||||
metrics: ql.#Read & {
|
||||
cluster: pod.cluster
|
||||
value: {
|
||||
apiVersion: "metrics.k8s.io/v1beta1"
|
||||
kind: "PodMetrics"
|
||||
metadata: {
|
||||
name: pod.obj.metadata.name
|
||||
namespace: pod.obj.metadata.namespace
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if result.err == _|_ {
|
||||
status: {
|
||||
podList: [ for podInfo in podStatus {
|
||||
name: podInfo.name
|
||||
containers: [ for containerName, container in podInfo.containers {
|
||||
containerName
|
||||
}]
|
||||
events: podInfo.events.list
|
||||
podList: [ for pod in result.list if pod.object != _|_ {
|
||||
cluster: pod.cluster
|
||||
workload: pod.workload
|
||||
component: pod.component
|
||||
metadata: {
|
||||
name: pod.object.metadata.name
|
||||
namespace: pod.object.metadata.namespace
|
||||
creationTime: pod.object.metadata.creationTimestamp
|
||||
labels: pod.object.metadata.labels
|
||||
version: {
|
||||
if pod.publishVersion != _|_ {
|
||||
publishVersion: pod.publishVersion
|
||||
}
|
||||
if pod.deployVersion != _|_ {
|
||||
deployVersion: pod.deployVersion
|
||||
}
|
||||
}
|
||||
}
|
||||
status: {
|
||||
phase: pod.object.status.phase
|
||||
// refer to https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#pod-phase
|
||||
if phase != "Pending" && phase != "Unknown" {
|
||||
podIP: pod.object.status.podIP
|
||||
hostIP: pod.object.status.hostIP
|
||||
nodeName: pod.object.spec.nodeName
|
||||
}
|
||||
}
|
||||
}]
|
||||
}
|
||||
}
|
||||
|
||||
if result.err != _|_ {
|
||||
status: {
|
||||
error: result.err
|
||||
}
|
||||
}
|
||||
|
||||
@@ -58,10 +58,40 @@
|
||||
}
|
||||
|
||||
#CollectPods: {
|
||||
#do: "collectPods"
|
||||
#do: "collectResources"
|
||||
#provider: "query"
|
||||
value: {...}
|
||||
cluster: string
|
||||
app: {
|
||||
name: string
|
||||
namespace: string
|
||||
filter?: {
|
||||
cluster?: string
|
||||
clusterNamespace?: string
|
||||
components?: [...string]
|
||||
kind: "Pod"
|
||||
apiVersion: "v1"
|
||||
}
|
||||
withTree: true
|
||||
}
|
||||
list: [...{...}]
|
||||
...
|
||||
}
|
||||
|
||||
#CollectServices: {
|
||||
#do: "collectResources"
|
||||
#provider: "query"
|
||||
app: {
|
||||
name: string
|
||||
namespace: string
|
||||
filter?: {
|
||||
cluster?: string
|
||||
clusterNamespace?: string
|
||||
components?: [...string]
|
||||
kind: "Service"
|
||||
apiVersion: "v1"
|
||||
}
|
||||
withTree: true
|
||||
}
|
||||
list: [...{...}]
|
||||
...
|
||||
}
|
||||
|
||||
@@ -111,6 +141,7 @@
|
||||
clusterNamespace?: string
|
||||
components?: [...string]
|
||||
}
|
||||
withTree: true
|
||||
}
|
||||
list?: [...{
|
||||
endpoint: {
|
||||
@@ -130,7 +161,7 @@
|
||||
}
|
||||
|
||||
#GetApplicationTree: {
|
||||
#do: "getApplicationTree"
|
||||
#do: "listAppliedResources"
|
||||
#provider: "query"
|
||||
app: {
|
||||
name: string
|
||||
@@ -140,6 +171,7 @@
|
||||
clusterNamespace?: string
|
||||
components?: [...string]
|
||||
}
|
||||
withTree: true
|
||||
}
|
||||
list?: [...{
|
||||
name: string
|
||||
|
||||
@@ -10,6 +10,8 @@
|
||||
|
||||
#CollectPods: query.#CollectPods
|
||||
|
||||
#CollectServices: query.#CollectServices
|
||||
|
||||
#SearchEvents: query.#SearchEvents
|
||||
|
||||
#CollectLogsInPod: query.#CollectLogsInPod
|
||||
|
||||
@@ -41,7 +41,6 @@ import (
|
||||
"github.com/oam-dev/terraform-config-inspect/tfconfig"
|
||||
kruise "github.com/openkruise/kruise-api/apps/v1alpha1"
|
||||
kruisev1alpha1 "github.com/openkruise/rollouts/api/v1alpha1"
|
||||
errors2 "github.com/pkg/errors"
|
||||
certmanager "github.com/wonderflow/cert-manager-api/pkg/apis/certmanager/v1"
|
||||
yamlv3 "gopkg.in/yaml.v3"
|
||||
istioclientv1beta1 "istio.io/client-go/pkg/apis/networking/v1beta1"
|
||||
@@ -68,7 +67,6 @@ import (
|
||||
|
||||
oamcore "github.com/oam-dev/kubevela/apis/core.oam.dev"
|
||||
"github.com/oam-dev/kubevela/apis/core.oam.dev/common"
|
||||
"github.com/oam-dev/kubevela/apis/core.oam.dev/v1beta1"
|
||||
oamstandard "github.com/oam-dev/kubevela/apis/standard.oam.dev/v1alpha1"
|
||||
"github.com/oam-dev/kubevela/apis/types"
|
||||
velacue "github.com/oam-dev/kubevela/pkg/cue"
|
||||
@@ -382,19 +380,6 @@ type ResourceLocation struct {
|
||||
|
||||
type clusterObjectReferenceFilter func(common.ClusterObjectReference) bool
|
||||
|
||||
func clusterObjectReferenceTypeFilterGenerator(allowedKinds ...string) clusterObjectReferenceFilter {
|
||||
allowedKindMap := map[string]bool{}
|
||||
for _, allowedKind := range allowedKinds {
|
||||
allowedKindMap[allowedKind] = true
|
||||
}
|
||||
return func(item common.ClusterObjectReference) bool {
|
||||
_, exists := allowedKindMap[item.Kind]
|
||||
return exists
|
||||
}
|
||||
}
|
||||
|
||||
var isWorkloadClusterObjectReferenceFilter = clusterObjectReferenceTypeFilterGenerator("Deployment", "StatefulSet", "CloneSet", "Job", "Configuration")
|
||||
|
||||
var resourceNameClusterObjectReferenceFilter = func(resourceName []string) clusterObjectReferenceFilter {
|
||||
return func(reference common.ClusterObjectReference) bool {
|
||||
if len(resourceName) == 0 {
|
||||
@@ -425,44 +410,6 @@ func filterResource(inputs []common.ClusterObjectReference, filters ...clusterOb
|
||||
return
|
||||
}
|
||||
|
||||
func askToChooseOneResource(app *v1beta1.Application, filters ...clusterObjectReferenceFilter) (*common.ClusterObjectReference, error) {
|
||||
resources := app.Status.AppliedResources
|
||||
if len(resources) == 0 {
|
||||
return nil, fmt.Errorf("no resources in the application deployed yet")
|
||||
}
|
||||
resources = filterResource(resources, filters...)
|
||||
if app.Name == AddonObservabilityApplication {
|
||||
resources = filterClusterObjectRefFromAddonObservability(resources)
|
||||
}
|
||||
// filter locations
|
||||
if len(resources) == 0 {
|
||||
return nil, fmt.Errorf("no supported resources detected in deployed resources")
|
||||
}
|
||||
if len(resources) == 1 {
|
||||
return &resources[0], nil
|
||||
}
|
||||
opMap := ClusterObject2Map(resources)
|
||||
var ops []string
|
||||
for _, r := range opMap {
|
||||
ops = append(ops, r)
|
||||
}
|
||||
prompt := &survey.Select{
|
||||
Message: fmt.Sprintf("You have %d deployed resources in your app. Please choose one:", len(ops)),
|
||||
Options: ops,
|
||||
}
|
||||
var selectedRsc string
|
||||
err := survey.AskOne(prompt, &selectedRsc)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("choosing resource err %w", err)
|
||||
}
|
||||
for k, resource := range ops {
|
||||
if selectedRsc == resource {
|
||||
return &resources[k], nil
|
||||
}
|
||||
}
|
||||
return nil, fmt.Errorf("choosing resource err %w", err)
|
||||
}
|
||||
|
||||
// AskToChooseOneNamespace ask for choose one namespace as env
|
||||
func AskToChooseOneNamespace(c client.Client, envMeta *types.EnvMeta) error {
|
||||
var nsList v1.NamespaceList
|
||||
@@ -522,43 +469,6 @@ func removeEmptyString(items []string) []string {
|
||||
return r
|
||||
}
|
||||
|
||||
// AskToChooseOneEnvResource will ask users to select one applied resource of the application if more than one
|
||||
// resource is a map for component to applied resources
|
||||
// return the selected ClusterObjectReference
|
||||
func AskToChooseOneEnvResource(app *v1beta1.Application, resourceName ...string) (*common.ClusterObjectReference, error) {
|
||||
filters := []clusterObjectReferenceFilter{isWorkloadClusterObjectReferenceFilter}
|
||||
_resourceName := removeEmptyString(resourceName)
|
||||
filters = append(filters, resourceNameClusterObjectReferenceFilter(_resourceName))
|
||||
return askToChooseOneResource(app, filters...)
|
||||
}
|
||||
|
||||
func askToChooseOneInApplication(category string, options []string) (decision string, err error) {
|
||||
if len(options) == 0 {
|
||||
return "", fmt.Errorf("no %s exists in the application", category)
|
||||
}
|
||||
if len(options) == 1 {
|
||||
return options[0], nil
|
||||
}
|
||||
prompt := &survey.Select{
|
||||
Message: fmt.Sprintf("You have multiple %ss in your app. Please choose one %s: ", category, category),
|
||||
Options: options,
|
||||
}
|
||||
if err = survey.AskOne(prompt, &decision); err != nil {
|
||||
return "", errors2.Wrapf(err, "choosing %s failed", category)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// AskToChooseOneService will ask users to select one service of the application if more than one
|
||||
func AskToChooseOneService(svcNames []string) (string, error) {
|
||||
return askToChooseOneInApplication("service", svcNames)
|
||||
}
|
||||
|
||||
// AskToChooseOnePods will ask users to select one pods of the resource if more than one
|
||||
func AskToChooseOnePods(podNames []string) (string, error) {
|
||||
return askToChooseOneInApplication("pod", podNames)
|
||||
}
|
||||
|
||||
// ReadYamlToObject will read a yaml K8s object to runtime.Object
|
||||
func ReadYamlToObject(path string, object k8sruntime.Object) error {
|
||||
data, err := os.ReadFile(filepath.Clean(path))
|
||||
|
||||
@@ -18,21 +18,13 @@ package query
|
||||
|
||||
import (
|
||||
"context"
|
||||
"reflect"
|
||||
"sync"
|
||||
|
||||
"github.com/hashicorp/go-version"
|
||||
kruise "github.com/openkruise/kruise-api/apps/v1alpha1"
|
||||
"github.com/pkg/errors"
|
||||
appsv1 "k8s.io/api/apps/v1"
|
||||
batchv1 "k8s.io/api/batch/v1"
|
||||
batchv1beta1 "k8s.io/api/batch/v1beta1"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
kerrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
"k8s.io/apimachinery/pkg/api/meta"
|
||||
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||
"k8s.io/apimachinery/pkg/fields"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
"k8s.io/klog/v2"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
|
||||
@@ -40,7 +32,6 @@ import (
|
||||
"github.com/oam-dev/kubevela/apis/core.oam.dev/v1beta1"
|
||||
"github.com/oam-dev/kubevela/pkg/multicluster"
|
||||
"github.com/oam-dev/kubevela/pkg/oam"
|
||||
oamutil "github.com/oam-dev/kubevela/pkg/oam/util"
|
||||
"github.com/oam-dev/kubevela/pkg/resourcetracker"
|
||||
"github.com/oam-dev/kubevela/pkg/velaql/providers/query/types"
|
||||
)
|
||||
@@ -102,9 +93,14 @@ func (c *AppCollector) ListApplicationResources(app *v1beta1.Application, queryT
|
||||
for _, managedResource := range rt.Spec.ManagedResources {
|
||||
if isResourceInTargetCluster(c.opt.Filter, managedResource.ClusterObjectReference) &&
|
||||
isResourceInTargetComponent(c.opt.Filter, managedResource.Component) &&
|
||||
isResourceMatchKindAndVersion(c.opt.Filter, managedResource.Kind, managedResource.APIVersion) {
|
||||
(queryTree || isResourceMatchKindAndVersion(c.opt.Filter, managedResource.Kind, managedResource.APIVersion)) {
|
||||
managedResources = append(managedResources, &types.AppliedResource{
|
||||
Cluster: managedResource.Cluster,
|
||||
Cluster: func() string {
|
||||
if managedResource.Cluster != "" {
|
||||
return managedResource.Cluster
|
||||
}
|
||||
return "local"
|
||||
}(),
|
||||
Kind: managedResource.Kind,
|
||||
Component: managedResource.Component,
|
||||
Trait: managedResource.Trait,
|
||||
@@ -139,8 +135,13 @@ func (c *AppCollector) ListApplicationResources(app *v1beta1.Application, queryT
|
||||
return managedResources, err
|
||||
}
|
||||
|
||||
filter := func(node types.ResourceTreeNode) bool {
|
||||
return isResourceMatchKindAndVersion(c.opt.Filter, node.Kind, node.APIVersion)
|
||||
}
|
||||
var matchedResources []*types.AppliedResource
|
||||
// error from leaf nodes won't block the results
|
||||
for _, resource := range managedResources {
|
||||
for i := range managedResources {
|
||||
resource := managedResources[i]
|
||||
root := types.ResourceTreeNode{
|
||||
Cluster: resource.Cluster,
|
||||
APIVersion: resource.APIVersion,
|
||||
@@ -149,7 +150,7 @@ func (c *AppCollector) ListApplicationResources(app *v1beta1.Application, queryT
|
||||
Name: resource.Name,
|
||||
UID: resource.UID,
|
||||
}
|
||||
root.LeafNodes, err = iteratorChildResources(ctx, resource.Cluster, c.k8sClient, root, 1)
|
||||
root.LeafNodes, err = iteratorChildResources(ctx, resource.Cluster, c.k8sClient, root, 1, filter)
|
||||
if err != nil {
|
||||
// if the resource has been deleted, continue access next appliedResource don't break the whole request
|
||||
if kerrors.IsNotFound(err) {
|
||||
@@ -158,6 +159,9 @@ func (c *AppCollector) ListApplicationResources(app *v1beta1.Application, queryT
|
||||
klog.Errorf("query leaf node resource apiVersion=%s kind=%s namespace=%s name=%s failure %s, skip this resource", root.APIVersion, root.Kind, root.Namespace, root.Name, err.Error())
|
||||
continue
|
||||
}
|
||||
if !filter(root) && len(root.LeafNodes) == 0 {
|
||||
continue
|
||||
}
|
||||
rootObject, err := fetchObjectWithResourceTreeNode(ctx, resource.Cluster, c.k8sClient, root)
|
||||
if err != nil {
|
||||
// if the resource has been deleted, continue access next appliedResource don't break the whole request
|
||||
@@ -183,9 +187,11 @@ func (c *AppCollector) ListApplicationResources(app *v1beta1.Application, queryT
|
||||
if !rootObject.GetDeletionTimestamp().IsZero() {
|
||||
root.DeletionTimestamp = rootObject.GetDeletionTimestamp().Time
|
||||
}
|
||||
root.Object = *rootObject
|
||||
resource.ResourceTree = &root
|
||||
matchedResources = append(matchedResources, resource)
|
||||
}
|
||||
return managedResources, nil
|
||||
return matchedResources, nil
|
||||
}
|
||||
|
||||
// FindResourceFromResourceTrackerSpec find resources from ResourceTracker spec
|
||||
@@ -281,275 +287,6 @@ func getObjectCreatedByComponent(cli client.Client, objRef corev1.ObjectReferenc
|
||||
return componentName, obj, nil
|
||||
}
|
||||
|
||||
var standardWorkloads = []schema.GroupVersionKind{
|
||||
appsv1.SchemeGroupVersion.WithKind(reflect.TypeOf(appsv1.Deployment{}).Name()),
|
||||
appsv1.SchemeGroupVersion.WithKind(reflect.TypeOf(appsv1.ReplicaSet{}).Name()),
|
||||
appsv1.SchemeGroupVersion.WithKind(reflect.TypeOf(appsv1.StatefulSet{}).Name()),
|
||||
appsv1.SchemeGroupVersion.WithKind(reflect.TypeOf(appsv1.DaemonSet{}).Name()),
|
||||
batchv1.SchemeGroupVersion.WithKind(reflect.TypeOf(batchv1.Job{}).Name()),
|
||||
kruise.SchemeGroupVersion.WithKind(reflect.TypeOf(kruise.CloneSet{}).Name()),
|
||||
}
|
||||
|
||||
var podCollectorMap = map[schema.GroupVersionKind]PodCollector{
|
||||
batchv1.SchemeGroupVersion.WithKind(reflect.TypeOf(batchv1.CronJob{}).Name()): cronJobPodCollector,
|
||||
batchv1beta1.SchemeGroupVersion.WithKind(reflect.TypeOf(batchv1beta1.CronJob{}).Name()): cronJobPodCollector,
|
||||
}
|
||||
|
||||
// PodCollector collector pod created by workload
|
||||
type PodCollector func(cli client.Client, obj *unstructured.Unstructured, cluster string) ([]*unstructured.Unstructured, error)
|
||||
|
||||
// NewPodCollector create a PodCollector
|
||||
func NewPodCollector(gvk schema.GroupVersionKind) PodCollector {
|
||||
for _, workload := range standardWorkloads {
|
||||
if gvk == workload {
|
||||
return standardWorkloadPodCollector
|
||||
}
|
||||
}
|
||||
if collector, ok := podCollectorMap[gvk]; ok {
|
||||
return collector
|
||||
}
|
||||
return velaComponentPodCollector
|
||||
}
|
||||
|
||||
// standardWorkloadPodCollector collect pods created by standard workload
|
||||
func standardWorkloadPodCollector(cli client.Client, obj *unstructured.Unstructured, cluster string) ([]*unstructured.Unstructured, error) {
|
||||
ctx := multicluster.ContextWithClusterName(context.Background(), cluster)
|
||||
selectorPath := []string{"spec", "selector", "matchLabels"}
|
||||
labels, found, err := unstructured.NestedStringMap(obj.UnstructuredContent(), selectorPath...)
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if !found {
|
||||
return nil, errors.Errorf("fail to find matchLabels from %s %s", obj.GroupVersionKind().String(), klog.KObj(obj))
|
||||
}
|
||||
|
||||
listOpts := []client.ListOption{
|
||||
client.MatchingLabels(labels),
|
||||
client.InNamespace(obj.GetNamespace()),
|
||||
}
|
||||
|
||||
podList := corev1.PodList{}
|
||||
if err := cli.List(ctx, &podList, listOpts...); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
pods := make([]*unstructured.Unstructured, len(podList.Items))
|
||||
for i := range podList.Items {
|
||||
pod, err := oamutil.Object2Unstructured(podList.Items[i])
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
pod.SetGroupVersionKind(
|
||||
corev1.SchemeGroupVersion.WithKind(
|
||||
reflect.TypeOf(corev1.Pod{}).Name(),
|
||||
),
|
||||
)
|
||||
pods[i] = pod
|
||||
}
|
||||
return pods, nil
|
||||
}
|
||||
|
||||
// cronJobPodCollector collect pods created by cronjob
|
||||
func cronJobPodCollector(cli client.Client, obj *unstructured.Unstructured, cluster string) ([]*unstructured.Unstructured, error) {
|
||||
ctx := multicluster.ContextWithClusterName(context.Background(), cluster)
|
||||
|
||||
jobList := new(batchv1.JobList)
|
||||
if err := cli.List(ctx, jobList, client.InNamespace(obj.GetNamespace())); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
uid := obj.GetUID()
|
||||
var jobs []batchv1.Job
|
||||
for _, job := range jobList.Items {
|
||||
for _, owner := range job.GetOwnerReferences() {
|
||||
if owner.Kind == reflect.TypeOf(batchv1.CronJob{}).Name() && owner.UID == uid {
|
||||
jobs = append(jobs, job)
|
||||
}
|
||||
}
|
||||
}
|
||||
var pods []*unstructured.Unstructured
|
||||
podGVK := corev1.SchemeGroupVersion.WithKind(reflect.TypeOf(corev1.Pod{}).Name())
|
||||
for _, job := range jobs {
|
||||
labels := job.Spec.Selector.MatchLabels
|
||||
listOpts := []client.ListOption{
|
||||
client.MatchingLabels(labels),
|
||||
client.InNamespace(job.GetNamespace()),
|
||||
}
|
||||
podList := corev1.PodList{}
|
||||
if err := cli.List(ctx, &podList, listOpts...); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
items := make([]*unstructured.Unstructured, len(podList.Items))
|
||||
for i := range podList.Items {
|
||||
pod, err := oamutil.Object2Unstructured(podList.Items[i])
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
pod.SetGroupVersionKind(podGVK)
|
||||
items[i] = pod
|
||||
}
|
||||
pods = append(pods, items...)
|
||||
}
|
||||
return pods, nil
|
||||
}
|
||||
|
||||
// HelmReleaseCollector HelmRelease resources collector
|
||||
type HelmReleaseCollector struct {
|
||||
matchLabels map[string]string
|
||||
workloadsGVK []schema.GroupVersionKind
|
||||
cli client.Client
|
||||
}
|
||||
|
||||
// NewHelmReleaseCollector create a HelmRelease collector
|
||||
func NewHelmReleaseCollector(cli client.Client, hr *unstructured.Unstructured) *HelmReleaseCollector {
|
||||
return &HelmReleaseCollector{
|
||||
// matchLabels for resources created by HelmRelease refer to
|
||||
// https://github.com/fluxcd/helm-controller/blob/main/internal/runner/post_renderer_origin_labels.go#L31
|
||||
matchLabels: map[string]string{
|
||||
"helm.toolkit.fluxcd.io/name": hr.GetName(),
|
||||
"helm.toolkit.fluxcd.io/namespace": hr.GetNamespace(),
|
||||
},
|
||||
workloadsGVK: []schema.GroupVersionKind{
|
||||
appsv1.SchemeGroupVersion.WithKind(reflect.TypeOf(appsv1.Deployment{}).Name()),
|
||||
appsv1.SchemeGroupVersion.WithKind(reflect.TypeOf(appsv1.StatefulSet{}).Name()),
|
||||
batchv1.SchemeGroupVersion.WithKind(reflect.TypeOf(batchv1.Job{}).Name()),
|
||||
},
|
||||
cli: cli,
|
||||
}
|
||||
}
|
||||
|
||||
// CollectWorkloads collect workloads of HelmRelease
|
||||
func (c *HelmReleaseCollector) CollectWorkloads(cluster string) ([]unstructured.Unstructured, error) {
|
||||
ctx := multicluster.ContextWithClusterName(context.Background(), cluster)
|
||||
listOptions := []client.ListOption{
|
||||
client.MatchingLabels(c.matchLabels),
|
||||
}
|
||||
workloadsList := make([][]unstructured.Unstructured, len(c.workloadsGVK))
|
||||
wg := sync.WaitGroup{}
|
||||
wg.Add(len(c.workloadsGVK))
|
||||
|
||||
for i, workloadGVK := range c.workloadsGVK {
|
||||
go func(index int, gvk schema.GroupVersionKind) {
|
||||
defer wg.Done()
|
||||
unstructuredObjList := &unstructured.UnstructuredList{}
|
||||
unstructuredObjList.SetGroupVersionKind(gvk)
|
||||
if err := c.cli.List(ctx, unstructuredObjList, listOptions...); err != nil {
|
||||
return
|
||||
}
|
||||
workloadsList[index] = unstructuredObjList.Items
|
||||
}(i, workloadGVK)
|
||||
}
|
||||
wg.Wait()
|
||||
|
||||
var workloads []unstructured.Unstructured
|
||||
for i := range workloadsList {
|
||||
workloads = append(workloads, workloadsList[i]...)
|
||||
}
|
||||
return workloads, nil
|
||||
}
|
||||
|
||||
// CollectServices collect service of HelmRelease
|
||||
func (c *HelmReleaseCollector) CollectServices(ctx context.Context, cluster string) ([]corev1.Service, error) {
|
||||
cctx := multicluster.ContextWithClusterName(ctx, cluster)
|
||||
listOptions := []client.ListOption{
|
||||
client.MatchingLabels(c.matchLabels),
|
||||
}
|
||||
var services corev1.ServiceList
|
||||
if err := c.cli.List(cctx, &services, listOptions...); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return services.Items, nil
|
||||
}
|
||||
|
||||
// CollectIngress collect ingress of HelmRelease
|
||||
func (c *HelmReleaseCollector) CollectIngress(ctx context.Context, cluster string) ([]unstructured.Unstructured, error) {
|
||||
clusterCTX := multicluster.ContextWithClusterName(ctx, cluster)
|
||||
listOptions := []client.ListOption{
|
||||
client.MatchingLabels(c.matchLabels),
|
||||
}
|
||||
var ingresses = new(unstructured.UnstructuredList)
|
||||
ingresses.SetGroupVersionKind(schema.GroupVersionKind{
|
||||
Group: "networking.k8s.io",
|
||||
Version: "v1beta1",
|
||||
Kind: "IngressList",
|
||||
})
|
||||
if err := c.cli.List(clusterCTX, ingresses, listOptions...); err != nil {
|
||||
if meta.IsNoMatchError(err) {
|
||||
ingresses.SetGroupVersionKind(schema.GroupVersionKind{
|
||||
Group: "networking.k8s.io",
|
||||
Version: "v1",
|
||||
Kind: "IngressList",
|
||||
})
|
||||
if err := c.cli.List(clusterCTX, ingresses, listOptions...); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
} else {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
return ingresses.Items, nil
|
||||
}
|
||||
|
||||
// helmReleasePodCollector collect pods created by helmRelease
|
||||
func helmReleasePodCollector(cli client.Client, obj *unstructured.Unstructured, cluster string) ([]*unstructured.Unstructured, error) {
|
||||
hc := NewHelmReleaseCollector(cli, obj)
|
||||
workloads, err := hc.CollectWorkloads(cluster)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
podsList := make([][]*unstructured.Unstructured, len(workloads))
|
||||
wg := sync.WaitGroup{}
|
||||
wg.Add(len(workloads))
|
||||
for i := range workloads {
|
||||
go func(index int) {
|
||||
defer wg.Done()
|
||||
collector := NewPodCollector(workloads[index].GroupVersionKind())
|
||||
pods, err := collector(cli, &workloads[index], cluster)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
podsList[index] = pods
|
||||
}(i)
|
||||
}
|
||||
wg.Wait()
|
||||
var collectedPods []*unstructured.Unstructured
|
||||
for i := range podsList {
|
||||
collectedPods = append(collectedPods, podsList[i]...)
|
||||
}
|
||||
return collectedPods, nil
|
||||
}
|
||||
|
||||
func velaComponentPodCollector(cli client.Client, obj *unstructured.Unstructured, cluster string) ([]*unstructured.Unstructured, error) {
|
||||
ctx := multicluster.ContextWithClusterName(context.Background(), cluster)
|
||||
|
||||
listOpts := []client.ListOption{
|
||||
client.MatchingLabels(map[string]string{"app.oam.dev/component": obj.GetName()}),
|
||||
client.InNamespace(obj.GetNamespace()),
|
||||
}
|
||||
|
||||
podList := corev1.PodList{}
|
||||
if err := cli.List(ctx, &podList, listOpts...); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
pods := make([]*unstructured.Unstructured, len(podList.Items))
|
||||
for i := range podList.Items {
|
||||
pod, err := oamutil.Object2Unstructured(podList.Items[i])
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
pod.SetGroupVersionKind(
|
||||
corev1.SchemeGroupVersion.WithKind(
|
||||
reflect.TypeOf(corev1.Pod{}).Name(),
|
||||
),
|
||||
)
|
||||
pods[i] = pod
|
||||
}
|
||||
return pods, nil
|
||||
}
|
||||
|
||||
func getEventFieldSelector(obj *unstructured.Unstructured) fields.Selector {
|
||||
field := fields.Set{}
|
||||
field["involvedObject.name"] = obj.GetName()
|
||||
|
||||
@@ -27,7 +27,6 @@ import (
|
||||
networkv1beta1 "k8s.io/api/networking/v1beta1"
|
||||
kerrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
"k8s.io/klog"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
@@ -36,7 +35,6 @@ import (
|
||||
"github.com/oam-dev/kubevela/apis/core.oam.dev/v1beta1"
|
||||
apis "github.com/oam-dev/kubevela/apis/types"
|
||||
"github.com/oam-dev/kubevela/pkg/apiserver/utils/log"
|
||||
helmapi "github.com/oam-dev/kubevela/pkg/appfile/helm/flux2apis"
|
||||
"github.com/oam-dev/kubevela/pkg/cue/model/value"
|
||||
"github.com/oam-dev/kubevela/pkg/multicluster"
|
||||
"github.com/oam-dev/kubevela/pkg/utils"
|
||||
@@ -67,7 +65,7 @@ func (h *provider) GeneratorServiceEndpoints(wfctx wfContext.Context, v *value.V
|
||||
serviceEndpoints := make([]querytypes.ServiceEndpoint, 0)
|
||||
var clusterGatewayNodeIP = make(map[string]string)
|
||||
collector := NewAppCollector(h.cli, opt)
|
||||
resources, err := collector.ListApplicationResources(app, true)
|
||||
resources, err := collector.ListApplicationResources(app, opt.WithTree)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -88,6 +86,7 @@ func (h *provider) GeneratorServiceEndpoints(wfctx wfContext.Context, v *value.V
|
||||
} else {
|
||||
serviceEndpoints = append(serviceEndpoints, getServiceEndpoints(ctx, h.cli, resource.GroupVersionKind(), resource.Name, resource.Namespace, resource.Cluster, resource.Component, cachedSelectorNodeIP)...)
|
||||
}
|
||||
|
||||
}
|
||||
return fillQueryResult(v, serviceEndpoints, "list")
|
||||
}
|
||||
@@ -127,32 +126,6 @@ func getServiceEndpoints(ctx context.Context, cli client.Client, gvk schema.Grou
|
||||
return nil
|
||||
}
|
||||
serviceEndpoints = append(serviceEndpoints, generatorFromService(service, cachedSelectorNodeIP, cluster, component, "")...)
|
||||
case helmapi.HelmReleaseGVK.Kind:
|
||||
obj := new(unstructured.Unstructured)
|
||||
obj.SetNamespace(namespace)
|
||||
obj.SetName(name)
|
||||
hc := NewHelmReleaseCollector(cli, obj)
|
||||
services, err := hc.CollectServices(ctx, cluster)
|
||||
if err != nil {
|
||||
klog.Error(err, "collect service by helm release failure", "helmRelease", name, "namespace", namespace, "cluster", cluster)
|
||||
return nil
|
||||
}
|
||||
for _, service := range services {
|
||||
serviceEndpoints = append(serviceEndpoints, generatorFromService(service, cachedSelectorNodeIP, cluster, component, "")...)
|
||||
}
|
||||
ingresses, err := hc.CollectIngress(ctx, cluster)
|
||||
if err != nil {
|
||||
klog.Error(err, "collect ingres by helm release failure", "helmRelease", name, "namespace", namespace, "cluster", cluster)
|
||||
return nil
|
||||
}
|
||||
for _, uns := range ingresses {
|
||||
var ingress networkv1beta1.Ingress
|
||||
if err := runtime.DefaultUnstructuredConverter.FromUnstructured(uns.UnstructuredContent(), &ingress); err != nil {
|
||||
klog.Errorf("fail to convert unstructured to ingress %s", err.Error())
|
||||
continue
|
||||
}
|
||||
serviceEndpoints = append(serviceEndpoints, generatorFromIngress(ingress, cluster, component)...)
|
||||
}
|
||||
case "SeldonDeployment":
|
||||
obj := new(unstructured.Unstructured)
|
||||
obj.SetGroupVersionKind(gvk)
|
||||
|
||||
@@ -221,6 +221,7 @@ var _ = Describe("Test Query Provider", func() {
|
||||
cluster: "",
|
||||
clusterNamespace: "default",
|
||||
}
|
||||
withTree: true
|
||||
}`
|
||||
v, err := value.NewValue(opt, nil, "")
|
||||
Expect(err).Should(BeNil())
|
||||
|
||||
@@ -29,13 +29,14 @@ import (
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
apimachinerytypes "k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
"k8s.io/client-go/rest"
|
||||
"k8s.io/klog"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
|
||||
"github.com/oam-dev/kubevela/apis/core.oam.dev/v1beta1"
|
||||
"github.com/oam-dev/kubevela/pkg/apiserver/utils/log"
|
||||
"github.com/oam-dev/kubevela/pkg/cue/model/value"
|
||||
"github.com/oam-dev/kubevela/pkg/multicluster"
|
||||
querytypes "github.com/oam-dev/kubevela/pkg/velaql/providers/query/types"
|
||||
@@ -54,8 +55,6 @@ const (
|
||||
annoAmbassadorServiceNamespace = "ambassador.service/namespace"
|
||||
)
|
||||
|
||||
var fluxcdGroupVersion = schema.GroupVersion{Group: "helm.toolkit.fluxcd.io", Version: "v2beta1"}
|
||||
|
||||
type provider struct {
|
||||
cli client.Client
|
||||
cfg *rest.Config
|
||||
@@ -77,6 +76,9 @@ type Option struct {
|
||||
// WithStatus means query the object from the cluster and get the latest status
|
||||
// This field only suitable for ListResourcesInApp
|
||||
WithStatus bool `json:"withStatus,omitempty"`
|
||||
|
||||
// WithTree means recursively query the resource tree.
|
||||
WithTree bool `json:"withTree,omitempty"`
|
||||
}
|
||||
|
||||
// FilterOption filter resource created by component
|
||||
@@ -125,7 +127,7 @@ func (h *provider) ListAppliedResources(ctx wfContext.Context, v *value.Value, a
|
||||
if err = h.cli.Get(context.Background(), appKey, app); err != nil {
|
||||
return v.FillObject(err.Error(), "err")
|
||||
}
|
||||
appResList, err := collector.ListApplicationResources(app, false)
|
||||
appResList, err := collector.ListApplicationResources(app, opt.WithTree)
|
||||
if err != nil {
|
||||
return v.FillObject(err.Error(), "err")
|
||||
}
|
||||
@@ -135,11 +137,10 @@ func (h *provider) ListAppliedResources(ctx wfContext.Context, v *value.Value, a
|
||||
return fillQueryResult(v, appResList, "list")
|
||||
}
|
||||
|
||||
// GetApplicationResourceTree get resource tree of application
|
||||
func (h *provider) GetApplicationResourceTree(ctx wfContext.Context, v *value.Value, act types.Action) error {
|
||||
func (h *provider) CollectResources(ctx wfContext.Context, v *value.Value, act types.Action) error {
|
||||
val, err := v.LookupValue("app")
|
||||
if err != nil {
|
||||
return v.FillObject(err.Error(), "err")
|
||||
return err
|
||||
}
|
||||
opt := Option{}
|
||||
if err = val.UnmarshalTo(&opt); err != nil {
|
||||
@@ -151,45 +152,31 @@ func (h *provider) GetApplicationResourceTree(ctx wfContext.Context, v *value.Va
|
||||
if err = h.cli.Get(context.Background(), appKey, app); err != nil {
|
||||
return v.FillObject(err.Error(), "err")
|
||||
}
|
||||
appResList, err := collector.ListApplicationResources(app, true)
|
||||
appResList, err := collector.ListApplicationResources(app, opt.WithTree)
|
||||
if err != nil {
|
||||
return v.FillObject(err.Error(), "err")
|
||||
}
|
||||
if appResList == nil {
|
||||
appResList = []*querytypes.AppliedResource{}
|
||||
var resources = make([]querytypes.ResourceItem, 0)
|
||||
for _, res := range appResList {
|
||||
if res.ResourceTree != nil {
|
||||
resources = append(resources, buildResourceArray(*res, res.ResourceTree, res.ResourceTree, opt.Filter.Kind, opt.Filter.APIVersion)...)
|
||||
} else if res.Kind == opt.Filter.Kind && res.APIVersion == opt.Filter.APIVersion {
|
||||
var object unstructured.Unstructured
|
||||
object.SetAPIVersion(opt.Filter.APIVersion)
|
||||
object.SetKind(opt.Filter.Kind)
|
||||
if err := h.cli.Get(context.Background(), apimachinerytypes.NamespacedName{Namespace: res.Namespace, Name: res.Name}, &object); err == nil {
|
||||
resources = append(resources, buildResourceItem(*res, querytypes.Workload{
|
||||
APIVersion: app.APIVersion,
|
||||
Kind: app.Kind,
|
||||
Name: app.Name,
|
||||
Namespace: app.Namespace,
|
||||
}, object))
|
||||
} else {
|
||||
log.Logger.Errorf("failed to get the service:%s", err.Error())
|
||||
}
|
||||
}
|
||||
}
|
||||
return fillQueryResult(v, appResList, "list")
|
||||
}
|
||||
|
||||
func (h *provider) CollectPods(ctx wfContext.Context, v *value.Value, act types.Action) error {
|
||||
val, err := v.LookupValue("value")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
cluster, err := v.GetString("cluster")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
obj := new(unstructured.Unstructured)
|
||||
if err = val.UnmarshalTo(obj); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var pods []*unstructured.Unstructured
|
||||
var collector PodCollector
|
||||
|
||||
switch obj.GroupVersionKind() {
|
||||
case fluxcdGroupVersion.WithKind(HelmReleaseKind):
|
||||
collector = helmReleasePodCollector
|
||||
default:
|
||||
collector = NewPodCollector(obj.GroupVersionKind())
|
||||
}
|
||||
|
||||
pods, err = collector(h.cli, obj, cluster)
|
||||
if err != nil {
|
||||
return v.FillObject(err.Error(), "err")
|
||||
}
|
||||
return fillQueryResult(v, pods, "list")
|
||||
return fillQueryResult(v, resources, "list")
|
||||
}
|
||||
|
||||
func (h *provider) SearchEvents(ctx wfContext.Context, v *value.Value, act types.Action) error {
|
||||
@@ -315,10 +302,9 @@ func Install(p providers.Providers, cli client.Client, cfg *rest.Config) {
|
||||
p.Register(ProviderName, map[string]providers.Handler{
|
||||
"listResourcesInApp": prd.ListResourcesInApp,
|
||||
"listAppliedResources": prd.ListAppliedResources,
|
||||
"collectPods": prd.CollectPods,
|
||||
"collectResources": prd.CollectResources,
|
||||
"searchEvents": prd.SearchEvents,
|
||||
"collectLogsInPod": prd.CollectLogsInPod,
|
||||
"collectServiceEndpoints": prd.GeneratorServiceEndpoints,
|
||||
"getApplicationTree": prd.GetApplicationResourceTree,
|
||||
})
|
||||
}
|
||||
|
||||
@@ -18,7 +18,6 @@ package query
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"time"
|
||||
@@ -385,80 +384,6 @@ var _ = Describe("Test Query Provider", func() {
|
||||
})
|
||||
})
|
||||
|
||||
Context("Test CollectPods", func() {
|
||||
It("Test collect pod from workload deployment", func() {
|
||||
deploy := baseDeploy.DeepCopy()
|
||||
deploy.SetName("test-collect-pod")
|
||||
deploy.Spec.Selector = &metav1.LabelSelector{
|
||||
MatchLabels: map[string]string{
|
||||
oam.LabelAppComponent: "test",
|
||||
},
|
||||
}
|
||||
deploy.Spec.Template.ObjectMeta.SetLabels(map[string]string{
|
||||
oam.LabelAppComponent: "test",
|
||||
})
|
||||
Expect(k8sClient.Create(ctx, deploy)).Should(BeNil())
|
||||
for i := 1; i <= 5; i++ {
|
||||
pod := basePod.DeepCopy()
|
||||
pod.SetName(fmt.Sprintf("test-collect-pod-%d", i))
|
||||
pod.SetLabels(map[string]string{
|
||||
oam.LabelAppComponent: "test",
|
||||
})
|
||||
Expect(k8sClient.Create(ctx, pod)).Should(BeNil())
|
||||
}
|
||||
|
||||
prd := provider{cli: k8sClient}
|
||||
unstructuredDeploy, err := util.Object2Unstructured(deploy)
|
||||
Expect(err).Should(BeNil())
|
||||
unstructuredDeploy.SetGroupVersionKind((&corev1.ObjectReference{
|
||||
APIVersion: "apps/v1",
|
||||
Kind: "Deployment",
|
||||
}).GroupVersionKind())
|
||||
|
||||
deployJson, err := json.Marshal(unstructuredDeploy)
|
||||
Expect(err).Should(BeNil())
|
||||
opt := fmt.Sprintf(`value: %s
|
||||
cluster: ""`, deployJson)
|
||||
v, err := value.NewValue(opt, nil, "")
|
||||
Expect(err).Should(BeNil())
|
||||
Expect(prd.CollectPods(nil, v, nil)).Should(BeNil())
|
||||
|
||||
podList := new(PodList)
|
||||
Expect(v.UnmarshalTo(podList)).Should(BeNil())
|
||||
Expect(len(podList.List)).Should(Equal(5))
|
||||
for _, pod := range podList.List {
|
||||
Expect(pod.GroupVersionKind()).Should(Equal((&corev1.ObjectReference{
|
||||
APIVersion: "v1",
|
||||
Kind: "Pod",
|
||||
}).GroupVersionKind()))
|
||||
}
|
||||
})
|
||||
|
||||
It("Test collect pod with incomplete parameter", func() {
|
||||
emptyOpt := ""
|
||||
prd := provider{cli: k8sClient}
|
||||
v, err := value.NewValue(emptyOpt, nil, "")
|
||||
Expect(err).Should(BeNil())
|
||||
err = prd.CollectPods(nil, v, nil)
|
||||
Expect(err).ShouldNot(BeNil())
|
||||
Expect(err.Error()).Should(Equal("var(path=value) not exist"))
|
||||
|
||||
optWithoutCluster := `value: {}`
|
||||
v, err = value.NewValue(optWithoutCluster, nil, "")
|
||||
Expect(err).Should(BeNil())
|
||||
err = prd.CollectPods(nil, v, nil)
|
||||
Expect(err).ShouldNot(BeNil())
|
||||
Expect(err.Error()).Should(Equal("var(path=cluster) not exist"))
|
||||
|
||||
optWithWrongValue := `value: {test: 1}
|
||||
cluster: "test"`
|
||||
v, err = value.NewValue(optWithWrongValue, nil, "")
|
||||
Expect(err).Should(BeNil())
|
||||
err = prd.CollectPods(nil, v, nil)
|
||||
Expect(err).ShouldNot(BeNil())
|
||||
})
|
||||
})
|
||||
|
||||
Context("Test search event from k8s object", func() {
|
||||
It("Test search event with incomplete parameter", func() {
|
||||
emptyOpt := ""
|
||||
@@ -555,9 +480,12 @@ options: {
|
||||
h, ok := p.GetHandler("query", "listResourcesInApp")
|
||||
Expect(h).ShouldNot(BeNil())
|
||||
Expect(ok).Should(Equal(true))
|
||||
h, ok = p.GetHandler("query", "collectPods")
|
||||
h, ok = p.GetHandler("query", "collectResources")
|
||||
Expect(h).ShouldNot(BeNil())
|
||||
Expect(ok).Should(Equal(true))
|
||||
l, ok := p.GetHandler("query", "listAppliedResources")
|
||||
Expect(l).ShouldNot(BeNil())
|
||||
Expect(ok).Should(Equal(true))
|
||||
h, ok = p.GetHandler("query", "searchEvents")
|
||||
Expect(ok).Should(Equal(true))
|
||||
Expect(h).ShouldNot(BeNil())
|
||||
@@ -623,7 +551,7 @@ options: {
|
||||
APIVersion: "helm.toolkit.fluxcd.io/v2beta1",
|
||||
Kind: helmapi.HelmReleaseGVK.Kind,
|
||||
Namespace: "default",
|
||||
Name: "helmRelease",
|
||||
Name: "helm-release",
|
||||
},
|
||||
},
|
||||
{
|
||||
@@ -702,6 +630,13 @@ options: {
|
||||
err = k8sClient.Create(context.TODO(), rt)
|
||||
Expect(err).Should(BeNil())
|
||||
|
||||
helmRelease := &unstructured.Unstructured{}
|
||||
helmRelease.SetName("helm-release")
|
||||
helmRelease.SetNamespace("default")
|
||||
helmRelease.SetGroupVersionKind(helmapi.HelmReleaseGVK)
|
||||
err = k8sClient.Create(context.TODO(), helmRelease)
|
||||
Expect(err).Should(BeNil())
|
||||
|
||||
testServiceList := []map[string]interface{}{
|
||||
{
|
||||
"name": "clusterip",
|
||||
@@ -745,7 +680,7 @@ options: {
|
||||
},
|
||||
"type": corev1.ServiceTypeNodePort,
|
||||
"labels": map[string]string{
|
||||
"helm.toolkit.fluxcd.io/name": "helmRelease",
|
||||
"helm.toolkit.fluxcd.io/name": "helm-release",
|
||||
"helm.toolkit.fluxcd.io/namespace": "default",
|
||||
},
|
||||
},
|
||||
@@ -799,6 +734,7 @@ options: {
|
||||
Expect(err).Should(BeNil())
|
||||
}
|
||||
}
|
||||
|
||||
var prefixbeta = networkv1beta1.PathTypePrefix
|
||||
testIngress := []client.Object{
|
||||
&networkv1beta1.Ingress{
|
||||
@@ -908,7 +844,7 @@ options: {
|
||||
Name: "ingress-helm",
|
||||
Namespace: "default",
|
||||
Labels: map[string]string{
|
||||
"helm.toolkit.fluxcd.io/name": "helmRelease",
|
||||
"helm.toolkit.fluxcd.io/name": "helm-release",
|
||||
"helm.toolkit.fluxcd.io/namespace": "default",
|
||||
},
|
||||
},
|
||||
@@ -985,6 +921,7 @@ options: {
|
||||
cluster: "",
|
||||
clusterNamespace: "default",
|
||||
}
|
||||
withTree: true
|
||||
}`
|
||||
v, err := value.NewValue(opt, nil, "")
|
||||
Expect(err).Should(BeNil())
|
||||
@@ -1016,11 +953,11 @@ options: {
|
||||
var endpoints []querytypes.ServiceEndpoint
|
||||
err = endValue.Decode(&endpoints)
|
||||
Expect(err).Should(BeNil())
|
||||
var edps []string
|
||||
for _, e := range endpoints {
|
||||
edps = append(edps, e.String())
|
||||
Expect(len(urls)).Should(Equal(len(endpoints)))
|
||||
for i, e := range endpoints {
|
||||
fmt.Println(e.String())
|
||||
Expect(urls[i]).Should(Equal(e.String()))
|
||||
}
|
||||
Expect(edps).Should(BeEquivalentTo(urls))
|
||||
})
|
||||
})
|
||||
|
||||
|
||||
@@ -48,6 +48,7 @@ var _ = BeforeSuite(func(done Done) {
|
||||
"./testdata/gateway/crds",
|
||||
"../../../../charts/vela-core/crds",
|
||||
"./testdata/machinelearning.seldon.io_seldondeployments.yaml",
|
||||
"./testdata/helm-release-crd.yaml",
|
||||
},
|
||||
}
|
||||
|
||||
|
||||
736
pkg/velaql/providers/query/testdata/helm-release-crd.yaml
vendored
Normal file
736
pkg/velaql/providers/query/testdata/helm-release-crd.yaml
vendored
Normal file
@@ -0,0 +1,736 @@
|
||||
apiVersion: apiextensions.k8s.io/v1
|
||||
kind: CustomResourceDefinition
|
||||
metadata:
|
||||
name: helmreleases.helm.toolkit.fluxcd.io
|
||||
spec:
|
||||
conversion:
|
||||
strategy: None
|
||||
group: helm.toolkit.fluxcd.io
|
||||
names:
|
||||
kind: HelmRelease
|
||||
listKind: HelmReleaseList
|
||||
plural: helmreleases
|
||||
shortNames:
|
||||
- hr
|
||||
singular: helmrelease
|
||||
scope: Namespaced
|
||||
versions:
|
||||
- additionalPrinterColumns:
|
||||
- jsonPath: .status.conditions[?(@.type=="Ready")].status
|
||||
name: Ready
|
||||
type: string
|
||||
- jsonPath: .status.conditions[?(@.type=="Ready")].message
|
||||
name: Status
|
||||
type: string
|
||||
- jsonPath: .metadata.creationTimestamp
|
||||
name: Age
|
||||
type: date
|
||||
name: v2beta1
|
||||
schema:
|
||||
openAPIV3Schema:
|
||||
description: HelmRelease is the Schema for the helmreleases API
|
||||
properties:
|
||||
apiVersion:
|
||||
description: 'APIVersion defines the versioned schema of this representation
|
||||
of an object. Servers should convert recognized schemas to the latest
|
||||
internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
|
||||
type: string
|
||||
kind:
|
||||
description: 'Kind is a string value representing the REST resource this
|
||||
object represents. Servers may infer this from the endpoint the client
|
||||
submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
|
||||
type: string
|
||||
metadata:
|
||||
type: object
|
||||
spec:
|
||||
description: HelmReleaseSpec defines the desired state of a Helm release.
|
||||
properties:
|
||||
chart:
|
||||
description: Chart defines the template of the v1beta1.HelmChart that
|
||||
should be created for this HelmRelease.
|
||||
properties:
|
||||
spec:
|
||||
description: Spec holds the template for the v1beta1.HelmChartSpec
|
||||
for this HelmRelease.
|
||||
properties:
|
||||
chart:
|
||||
description: The name or path the Helm chart is available
|
||||
at in the SourceRef.
|
||||
type: string
|
||||
interval:
|
||||
description: Interval at which to check the v1beta1.Source
|
||||
for updates. Defaults to 'HelmReleaseSpec.Interval'.
|
||||
type: string
|
||||
sourceRef:
|
||||
description: The name and namespace of the v1beta1.Source
|
||||
the chart is available at.
|
||||
properties:
|
||||
apiVersion:
|
||||
description: APIVersion of the referent.
|
||||
type: string
|
||||
kind:
|
||||
description: Kind of the referent.
|
||||
enum:
|
||||
- HelmRepository
|
||||
- GitRepository
|
||||
- Bucket
|
||||
type: string
|
||||
name:
|
||||
description: Name of the referent.
|
||||
maxLength: 253
|
||||
minLength: 1
|
||||
type: string
|
||||
namespace:
|
||||
description: Namespace of the referent.
|
||||
maxLength: 63
|
||||
minLength: 1
|
||||
type: string
|
||||
required:
|
||||
- name
|
||||
type: object
|
||||
valuesFile:
|
||||
description: Alternative values file to use as the default
|
||||
chart values, expected to be a relative path in the SourceRef.
|
||||
Deprecated in favor of ValuesFiles, for backwards compatibility
|
||||
the file defined here is merged before the ValuesFiles items.
|
||||
Ignored when omitted.
|
||||
type: string
|
||||
valuesFiles:
|
||||
description: Alternative list of values files to use as the
|
||||
chart values (values.yaml is not included by default), expected
|
||||
to be a relative path in the SourceRef. Values files are
|
||||
merged in the order of this list with the last file overriding
|
||||
the first. Ignored when omitted.
|
||||
items:
|
||||
type: string
|
||||
type: array
|
||||
version:
|
||||
default: '*'
|
||||
description: Version semver expression, ignored for charts
|
||||
from v1beta1.GitRepository and v1beta1.Bucket sources. Defaults
|
||||
to latest when omitted.
|
||||
type: string
|
||||
required:
|
||||
- chart
|
||||
- sourceRef
|
||||
type: object
|
||||
required:
|
||||
- spec
|
||||
type: object
|
||||
dependsOn:
|
||||
description: DependsOn may contain a dependency.CrossNamespaceDependencyReference
|
||||
slice with references to HelmRelease resources that must be ready
|
||||
before this HelmRelease can be reconciled.
|
||||
items:
|
||||
description: CrossNamespaceDependencyReference holds the reference
|
||||
to a dependency.
|
||||
properties:
|
||||
name:
|
||||
description: Name holds the name reference of a dependency.
|
||||
type: string
|
||||
namespace:
|
||||
description: Namespace holds the namespace reference of a dependency.
|
||||
type: string
|
||||
required:
|
||||
- name
|
||||
type: object
|
||||
type: array
|
||||
install:
|
||||
description: Install holds the configuration for Helm install actions
|
||||
for this HelmRelease.
|
||||
properties:
|
||||
crds:
|
||||
description: "CRDs upgrade CRDs from the Helm Chart's crds directory
|
||||
according to the CRD upgrade policy provided here. Valid values
|
||||
are `Skip`, `Create` or `CreateReplace`. Default is `Create`
|
||||
and if omitted CRDs are installed but not updated. \n Skip:
|
||||
do neither install nor replace (update) any CRDs. \n Create:
|
||||
new CRDs are created, existing CRDs are neither updated nor
|
||||
deleted. \n CreateReplace: new CRDs are created, existing CRDs
|
||||
are updated (replaced) but not deleted. \n By default, CRDs
|
||||
are applied (installed) during Helm install action. With this
|
||||
option users can opt-in to CRD replace existing CRDs on Helm
|
||||
install actions, which is not (yet) natively supported by Helm.
|
||||
https://helm.sh/docs/chart_best_practices/custom_resource_definitions."
|
||||
enum:
|
||||
- Skip
|
||||
- Create
|
||||
- CreateReplace
|
||||
type: string
|
||||
createNamespace:
|
||||
description: CreateNamespace tells the Helm install action to
|
||||
create the HelmReleaseSpec.TargetNamespace if it does not exist
|
||||
yet. On uninstall, the namespace will not be garbage collected.
|
||||
type: boolean
|
||||
disableHooks:
|
||||
description: DisableHooks prevents hooks from running during the
|
||||
Helm install action.
|
||||
type: boolean
|
||||
disableOpenAPIValidation:
|
||||
description: DisableOpenAPIValidation prevents the Helm install
|
||||
action from validating rendered templates against the Kubernetes
|
||||
OpenAPI Schema.
|
||||
type: boolean
|
||||
disableWait:
|
||||
description: DisableWait disables the waiting for resources to
|
||||
be ready after a Helm install has been performed.
|
||||
type: boolean
|
||||
disableWaitForJobs:
|
||||
description: DisableWaitForJobs disables waiting for jobs to complete
|
||||
after a Helm install has been performed.
|
||||
type: boolean
|
||||
remediation:
|
||||
description: Remediation holds the remediation configuration for
|
||||
when the Helm install action for the HelmRelease fails. The
|
||||
default is to not perform any action.
|
||||
properties:
|
||||
ignoreTestFailures:
|
||||
description: IgnoreTestFailures tells the controller to skip
|
||||
remediation when the Helm tests are run after an install
|
||||
action but fail. Defaults to 'Test.IgnoreFailures'.
|
||||
type: boolean
|
||||
remediateLastFailure:
|
||||
description: RemediateLastFailure tells the controller to
|
||||
remediate the last failure, when no retries remain. Defaults
|
||||
to 'false'.
|
||||
type: boolean
|
||||
retries:
|
||||
description: Retries is the number of retries that should
|
||||
be attempted on failures before bailing. Remediation, using
|
||||
an uninstall, is performed between each attempt. Defaults
|
||||
to '0', a negative integer equals to unlimited retries.
|
||||
type: integer
|
||||
type: object
|
||||
replace:
|
||||
description: Replace tells the Helm install action to re-use the
|
||||
'ReleaseName', but only if that name is a deleted release which
|
||||
remains in the history.
|
||||
type: boolean
|
||||
skipCRDs:
|
||||
description: "SkipCRDs tells the Helm install action to not install
|
||||
any CRDs. By default, CRDs are installed if not already present.
|
||||
\n Deprecated use CRD policy (`crds`) attribute with value `Skip`
|
||||
instead."
|
||||
type: boolean
|
||||
timeout:
|
||||
description: Timeout is the time to wait for any individual Kubernetes
|
||||
operation (like Jobs for hooks) during the performance of a
|
||||
Helm install action. Defaults to 'HelmReleaseSpec.Timeout'.
|
||||
type: string
|
||||
type: object
|
||||
interval:
|
||||
description: Interval at which to reconcile the Helm release.
|
||||
type: string
|
||||
kubeConfig:
|
||||
description: KubeConfig for reconciling the HelmRelease on a remote
|
||||
cluster. When specified, KubeConfig takes precedence over ServiceAccountName.
|
||||
properties:
|
||||
secretRef:
|
||||
description: SecretRef holds the name to a secret that contains
|
||||
a 'value' key with the kubeconfig file as the value. It must
|
||||
be in the same namespace as the HelmRelease. It is recommended
|
||||
that the kubeconfig is self-contained, and the secret is regularly
|
||||
updated if credentials such as a cloud-access-token expire.
|
||||
Cloud specific `cmd-path` auth helpers will not function without
|
||||
adding binaries and credentials to the Pod that is responsible
|
||||
for reconciling the HelmRelease.
|
||||
properties:
|
||||
name:
|
||||
description: Name of the referent
|
||||
type: string
|
||||
required:
|
||||
- name
|
||||
type: object
|
||||
type: object
|
||||
maxHistory:
|
||||
description: MaxHistory is the number of revisions saved by Helm for
|
||||
this HelmRelease. Use '0' for an unlimited number of revisions;
|
||||
defaults to '10'.
|
||||
type: integer
|
||||
postRenderers:
|
||||
description: PostRenderers holds an array of Helm PostRenderers, which
|
||||
will be applied in order of their definition.
|
||||
items:
|
||||
description: PostRenderer contains a Helm PostRenderer specification.
|
||||
properties:
|
||||
kustomize:
|
||||
description: Kustomization to apply as PostRenderer.
|
||||
properties:
|
||||
images:
|
||||
description: Images is a list of (image name, new name,
|
||||
new tag or digest) for changing image names, tags or digests.
|
||||
This can also be achieved with a patch, but this operator
|
||||
is simpler to specify.
|
||||
items:
|
||||
description: Image contains an image name, a new name,
|
||||
a new tag or digest, which will replace the original
|
||||
name and tag.
|
||||
properties:
|
||||
digest:
|
||||
description: Digest is the value used to replace the
|
||||
original image tag. If digest is present NewTag
|
||||
value is ignored.
|
||||
type: string
|
||||
name:
|
||||
description: Name is a tag-less image name.
|
||||
type: string
|
||||
newName:
|
||||
description: NewName is the value used to replace
|
||||
the original name.
|
||||
type: string
|
||||
newTag:
|
||||
description: NewTag is the value used to replace the
|
||||
original tag.
|
||||
type: string
|
||||
required:
|
||||
- name
|
||||
type: object
|
||||
type: array
|
||||
patchesJson6902:
|
||||
description: JSON 6902 patches, defined as inline YAML objects.
|
||||
items:
|
||||
description: JSON6902Patch contains a JSON6902 patch and
|
||||
the target the patch should be applied to.
|
||||
properties:
|
||||
patch:
|
||||
description: Patch contains the JSON6902 patch document
|
||||
with an array of operation objects.
|
||||
items:
|
||||
description: JSON6902 is a JSON6902 operation object.
|
||||
https://tools.ietf.org/html/rfc6902#section-4
|
||||
properties:
|
||||
from:
|
||||
type: string
|
||||
op:
|
||||
enum:
|
||||
- test
|
||||
- remove
|
||||
- add
|
||||
- replace
|
||||
- move
|
||||
- copy
|
||||
type: string
|
||||
path:
|
||||
type: string
|
||||
value:
|
||||
x-kubernetes-preserve-unknown-fields: true
|
||||
required:
|
||||
- op
|
||||
- path
|
||||
type: object
|
||||
type: array
|
||||
target:
|
||||
description: Target points to the resources that the
|
||||
patch document should be applied to.
|
||||
properties:
|
||||
annotationSelector:
|
||||
description: AnnotationSelector is a string that
|
||||
follows the label selection expression https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#api
|
||||
It matches with the resource annotations.
|
||||
type: string
|
||||
group:
|
||||
description: Group is the API group to select
|
||||
resources from. Together with Version and Kind
|
||||
it is capable of unambiguously identifying and/or
|
||||
selecting resources. https://github.com/kubernetes/community/blob/master/contributors/design-proposals/api-machinery/api-group.md
|
||||
type: string
|
||||
kind:
|
||||
description: Kind of the API Group to select resources
|
||||
from. Together with Group and Version it is
|
||||
capable of unambiguously identifying and/or
|
||||
selecting resources. https://github.com/kubernetes/community/blob/master/contributors/design-proposals/api-machinery/api-group.md
|
||||
type: string
|
||||
labelSelector:
|
||||
description: LabelSelector is a string that follows
|
||||
the label selection expression https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#api
|
||||
It matches with the resource labels.
|
||||
type: string
|
||||
name:
|
||||
description: Name to match resources with.
|
||||
type: string
|
||||
namespace:
|
||||
description: Namespace to select resources from.
|
||||
type: string
|
||||
version:
|
||||
description: Version of the API Group to select
|
||||
resources from. Together with Group and Kind
|
||||
it is capable of unambiguously identifying and/or
|
||||
selecting resources. https://github.com/kubernetes/community/blob/master/contributors/design-proposals/api-machinery/api-group.md
|
||||
type: string
|
||||
type: object
|
||||
required:
|
||||
- patch
|
||||
- target
|
||||
type: object
|
||||
type: array
|
||||
patchesStrategicMerge:
|
||||
description: Strategic merge patches, defined as inline
|
||||
YAML objects.
|
||||
items:
|
||||
x-kubernetes-preserve-unknown-fields: true
|
||||
type: array
|
||||
type: object
|
||||
type: object
|
||||
type: array
|
||||
releaseName:
|
||||
description: ReleaseName used for the Helm release. Defaults to a
|
||||
composition of '[TargetNamespace-]Name'.
|
||||
maxLength: 53
|
||||
minLength: 1
|
||||
type: string
|
||||
rollback:
|
||||
description: Rollback holds the configuration for Helm rollback actions
|
||||
for this HelmRelease.
|
||||
properties:
|
||||
cleanupOnFail:
|
||||
description: CleanupOnFail allows deletion of new resources created
|
||||
during the Helm rollback action when it fails.
|
||||
type: boolean
|
||||
disableHooks:
|
||||
description: DisableHooks prevents hooks from running during the
|
||||
Helm rollback action.
|
||||
type: boolean
|
||||
disableWait:
|
||||
description: DisableWait disables the waiting for resources to
|
||||
be ready after a Helm rollback has been performed.
|
||||
type: boolean
|
||||
disableWaitForJobs:
|
||||
description: DisableWaitForJobs disables waiting for jobs to complete
|
||||
after a Helm rollback has been performed.
|
||||
type: boolean
|
||||
force:
|
||||
description: Force forces resource updates through a replacement
|
||||
strategy.
|
||||
type: boolean
|
||||
recreate:
|
||||
description: Recreate performs pod restarts for the resource if
|
||||
applicable.
|
||||
type: boolean
|
||||
timeout:
|
||||
description: Timeout is the time to wait for any individual Kubernetes
|
||||
operation (like Jobs for hooks) during the performance of a
|
||||
Helm rollback action. Defaults to 'HelmReleaseSpec.Timeout'.
|
||||
type: string
|
||||
type: object
|
||||
serviceAccountName:
|
||||
description: The name of the Kubernetes service account to impersonate
|
||||
when reconciling this HelmRelease.
|
||||
type: string
|
||||
storageNamespace:
|
||||
description: StorageNamespace used for the Helm storage. Defaults
|
||||
to the namespace of the HelmRelease.
|
||||
maxLength: 63
|
||||
minLength: 1
|
||||
type: string
|
||||
suspend:
|
||||
description: Suspend tells the controller to suspend reconciliation
|
||||
for this HelmRelease, it does not apply to already started reconciliations.
|
||||
Defaults to false.
|
||||
type: boolean
|
||||
targetNamespace:
|
||||
description: TargetNamespace to target when performing operations
|
||||
for the HelmRelease. Defaults to the namespace of the HelmRelease.
|
||||
maxLength: 63
|
||||
minLength: 1
|
||||
type: string
|
||||
test:
|
||||
description: Test holds the configuration for Helm test actions for
|
||||
this HelmRelease.
|
||||
properties:
|
||||
enable:
|
||||
description: Enable enables Helm test actions for this HelmRelease
|
||||
after an Helm install or upgrade action has been performed.
|
||||
type: boolean
|
||||
ignoreFailures:
|
||||
description: IgnoreFailures tells the controller to skip remediation
|
||||
when the Helm tests are run but fail. Can be overwritten for
|
||||
tests run after install or upgrade actions in 'Install.IgnoreTestFailures'
|
||||
and 'Upgrade.IgnoreTestFailures'.
|
||||
type: boolean
|
||||
timeout:
|
||||
description: Timeout is the time to wait for any individual Kubernetes
|
||||
operation during the performance of a Helm test action. Defaults
|
||||
to 'HelmReleaseSpec.Timeout'.
|
||||
type: string
|
||||
type: object
|
||||
timeout:
|
||||
description: Timeout is the time to wait for any individual Kubernetes
|
||||
operation (like Jobs for hooks) during the performance of a Helm
|
||||
action. Defaults to '5m0s'.
|
||||
type: string
|
||||
uninstall:
|
||||
description: Uninstall holds the configuration for Helm uninstall
|
||||
actions for this HelmRelease.
|
||||
properties:
|
||||
disableHooks:
|
||||
description: DisableHooks prevents hooks from running during the
|
||||
Helm rollback action.
|
||||
type: boolean
|
||||
keepHistory:
|
||||
description: KeepHistory tells Helm to remove all associated resources
|
||||
and mark the release as deleted, but retain the release history.
|
||||
type: boolean
|
||||
timeout:
|
||||
description: Timeout is the time to wait for any individual Kubernetes
|
||||
operation (like Jobs for hooks) during the performance of a
|
||||
Helm uninstall action. Defaults to 'HelmReleaseSpec.Timeout'.
|
||||
type: string
|
||||
type: object
|
||||
upgrade:
|
||||
description: Upgrade holds the configuration for Helm upgrade actions
|
||||
for this HelmRelease.
|
||||
properties:
|
||||
cleanupOnFail:
|
||||
description: CleanupOnFail allows deletion of new resources created
|
||||
during the Helm upgrade action when it fails.
|
||||
type: boolean
|
||||
crds:
|
||||
description: "CRDs upgrade CRDs from the Helm Chart's crds directory
|
||||
according to the CRD upgrade policy provided here. Valid values
|
||||
are `Skip`, `Create` or `CreateReplace`. Default is `Skip` and
|
||||
if omitted CRDs are neither installed nor upgraded. \n Skip:
|
||||
do neither install nor replace (update) any CRDs. \n Create:
|
||||
new CRDs are created, existing CRDs are neither updated nor
|
||||
deleted. \n CreateReplace: new CRDs are created, existing CRDs
|
||||
are updated (replaced) but not deleted. \n By default, CRDs
|
||||
are not applied during Helm upgrade action. With this option
|
||||
users can opt-in to CRD upgrade, which is not (yet) natively
|
||||
supported by Helm. https://helm.sh/docs/chart_best_practices/custom_resource_definitions."
|
||||
enum:
|
||||
- Skip
|
||||
- Create
|
||||
- CreateReplace
|
||||
type: string
|
||||
disableHooks:
|
||||
description: DisableHooks prevents hooks from running during the
|
||||
Helm upgrade action.
|
||||
type: boolean
|
||||
disableOpenAPIValidation:
|
||||
description: DisableOpenAPIValidation prevents the Helm upgrade
|
||||
action from validating rendered templates against the Kubernetes
|
||||
OpenAPI Schema.
|
||||
type: boolean
|
||||
disableWait:
|
||||
description: DisableWait disables the waiting for resources to
|
||||
be ready after a Helm upgrade has been performed.
|
||||
type: boolean
|
||||
disableWaitForJobs:
|
||||
description: DisableWaitForJobs disables waiting for jobs to complete
|
||||
after a Helm upgrade has been performed.
|
||||
type: boolean
|
||||
force:
|
||||
description: Force forces resource updates through a replacement
|
||||
strategy.
|
||||
type: boolean
|
||||
preserveValues:
|
||||
description: PreserveValues will make Helm reuse the last release's
|
||||
values and merge in overrides from 'Values'. Setting this flag
|
||||
makes the HelmRelease non-declarative.
|
||||
type: boolean
|
||||
remediation:
|
||||
description: Remediation holds the remediation configuration for
|
||||
when the Helm upgrade action for the HelmRelease fails. The
|
||||
default is to not perform any action.
|
||||
properties:
|
||||
ignoreTestFailures:
|
||||
description: IgnoreTestFailures tells the controller to skip
|
||||
remediation when the Helm tests are run after an upgrade
|
||||
action but fail. Defaults to 'Test.IgnoreFailures'.
|
||||
type: boolean
|
||||
remediateLastFailure:
|
||||
description: RemediateLastFailure tells the controller to
|
||||
remediate the last failure, when no retries remain. Defaults
|
||||
to 'false' unless 'Retries' is greater than 0.
|
||||
type: boolean
|
||||
retries:
|
||||
description: Retries is the number of retries that should
|
||||
be attempted on failures before bailing. Remediation, using
|
||||
'Strategy', is performed between each attempt. Defaults
|
||||
to '0', a negative integer equals to unlimited retries.
|
||||
type: integer
|
||||
strategy:
|
||||
description: Strategy to use for failure remediation. Defaults
|
||||
to 'rollback'.
|
||||
enum:
|
||||
- rollback
|
||||
- uninstall
|
||||
type: string
|
||||
type: object
|
||||
timeout:
|
||||
description: Timeout is the time to wait for any individual Kubernetes
|
||||
operation (like Jobs for hooks) during the performance of a
|
||||
Helm upgrade action. Defaults to 'HelmReleaseSpec.Timeout'.
|
||||
type: string
|
||||
type: object
|
||||
values:
|
||||
description: Values holds the values for this Helm release.
|
||||
x-kubernetes-preserve-unknown-fields: true
|
||||
valuesFrom:
|
||||
description: ValuesFrom holds references to resources containing Helm
|
||||
values for this HelmRelease, and information about how they should
|
||||
be merged.
|
||||
items:
|
||||
description: ValuesReference contains a reference to a resource
|
||||
containing Helm values, and optionally the key they can be found
|
||||
at.
|
||||
properties:
|
||||
kind:
|
||||
description: Kind of the values referent, valid values are ('Secret',
|
||||
'ConfigMap').
|
||||
enum:
|
||||
- Secret
|
||||
- ConfigMap
|
||||
type: string
|
||||
name:
|
||||
description: Name of the values referent. Should reside in the
|
||||
same namespace as the referring resource.
|
||||
maxLength: 253
|
||||
minLength: 1
|
||||
type: string
|
||||
optional:
|
||||
description: Optional marks this ValuesReference as optional.
|
||||
When set, a not found error for the values reference is ignored,
|
||||
but any ValuesKey, TargetPath or transient error will still
|
||||
result in a reconciliation failure.
|
||||
type: boolean
|
||||
targetPath:
|
||||
description: TargetPath is the YAML dot notation path the value
|
||||
should be merged at. When set, the ValuesKey is expected to
|
||||
be a single flat value. Defaults to 'None', which results
|
||||
in the values getting merged at the root.
|
||||
type: string
|
||||
valuesKey:
|
||||
description: ValuesKey is the data key where the values.yaml
|
||||
or a specific value can be found at. Defaults to 'values.yaml'.
|
||||
type: string
|
||||
required:
|
||||
- kind
|
||||
- name
|
||||
type: object
|
||||
type: array
|
||||
required:
|
||||
- chart
|
||||
- interval
|
||||
type: object
|
||||
status:
|
||||
description: HelmReleaseStatus defines the observed state of a HelmRelease.
|
||||
properties:
|
||||
conditions:
|
||||
description: Conditions holds the conditions for the HelmRelease.
|
||||
items:
|
||||
description: "Condition contains details for one aspect of the current
|
||||
state of this API Resource. --- This struct is intended for direct
|
||||
use as an array at the field path .status.conditions. For example,
|
||||
type FooStatus struct{ // Represents the observations of a
|
||||
foo's current state. // Known .status.conditions.type are:
|
||||
\"Available\", \"Progressing\", and \"Degraded\" // +patchMergeKey=type
|
||||
\ // +patchStrategy=merge // +listType=map // +listMapKey=type
|
||||
\ Conditions []metav1.Condition `json:\"conditions,omitempty\"
|
||||
patchStrategy:\"merge\" patchMergeKey:\"type\" protobuf:\"bytes,1,rep,name=conditions\"`
|
||||
\n // other fields }"
|
||||
properties:
|
||||
lastTransitionTime:
|
||||
description: lastTransitionTime is the last time the condition
|
||||
transitioned from one status to another. This should be when
|
||||
the underlying condition changed. If that is not known, then
|
||||
using the time when the API field changed is acceptable.
|
||||
format: date-time
|
||||
type: string
|
||||
message:
|
||||
description: message is a human readable message indicating
|
||||
details about the transition. This may be an empty string.
|
||||
maxLength: 32768
|
||||
type: string
|
||||
observedGeneration:
|
||||
description: observedGeneration represents the .metadata.generation
|
||||
that the condition was set based upon. For instance, if .metadata.generation
|
||||
is currently 12, but the .status.conditions[x].observedGeneration
|
||||
is 9, the condition is out of date with respect to the current
|
||||
state of the instance.
|
||||
format: int64
|
||||
minimum: 0
|
||||
type: integer
|
||||
reason:
|
||||
description: reason contains a programmatic identifier indicating
|
||||
the reason for the condition's last transition. Producers
|
||||
of specific condition types may define expected values and
|
||||
meanings for this field, and whether the values are considered
|
||||
a guaranteed API. The value should be a CamelCase string.
|
||||
This field may not be empty.
|
||||
maxLength: 1024
|
||||
minLength: 1
|
||||
pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$
|
||||
type: string
|
||||
status:
|
||||
description: status of the condition, one of True, False, Unknown.
|
||||
enum:
|
||||
- "True"
|
||||
- "False"
|
||||
- Unknown
|
||||
type: string
|
||||
type:
|
||||
description: type of condition in CamelCase or in foo.example.com/CamelCase.
|
||||
--- Many .condition.type values are consistent across resources
|
||||
like Available, but because arbitrary conditions can be useful
|
||||
(see .node.status.conditions), the ability to deconflict is
|
||||
important. The regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt)
|
||||
maxLength: 316
|
||||
pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$
|
||||
type: string
|
||||
required:
|
||||
- lastTransitionTime
|
||||
- message
|
||||
- reason
|
||||
- status
|
||||
- type
|
||||
type: object
|
||||
type: array
|
||||
failures:
|
||||
description: Failures is the reconciliation failure count against
|
||||
the latest desired state. It is reset after a successful reconciliation.
|
||||
format: int64
|
||||
type: integer
|
||||
helmChart:
|
||||
description: HelmChart is the namespaced name of the HelmChart resource
|
||||
created by the controller for the HelmRelease.
|
||||
type: string
|
||||
installFailures:
|
||||
description: InstallFailures is the install failure count against
|
||||
the latest desired state. It is reset after a successful reconciliation.
|
||||
format: int64
|
||||
type: integer
|
||||
lastAppliedRevision:
|
||||
description: LastAppliedRevision is the revision of the last successfully
|
||||
applied source.
|
||||
type: string
|
||||
lastAttemptedRevision:
|
||||
description: LastAttemptedRevision is the revision of the last reconciliation
|
||||
attempt.
|
||||
type: string
|
||||
lastAttemptedValuesChecksum:
|
||||
description: LastAttemptedValuesChecksum is the SHA1 checksum of the
|
||||
values of the last reconciliation attempt.
|
||||
type: string
|
||||
lastHandledReconcileAt:
|
||||
description: LastHandledReconcileAt holds the value of the most recent
|
||||
reconcile request value, so a change can be detected.
|
||||
type: string
|
||||
lastReleaseRevision:
|
||||
description: LastReleaseRevision is the revision of the last successful
|
||||
Helm release.
|
||||
type: integer
|
||||
observedGeneration:
|
||||
description: ObservedGeneration is the last observed generation.
|
||||
format: int64
|
||||
type: integer
|
||||
upgradeFailures:
|
||||
description: UpgradeFailures is the upgrade failure count against
|
||||
the latest desired state. It is reset after a successful reconciliation.
|
||||
format: int64
|
||||
type: integer
|
||||
type: object
|
||||
type: object
|
||||
served: true
|
||||
storage: true
|
||||
subresources:
|
||||
status: {}
|
||||
@@ -20,6 +20,7 @@ import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"sort"
|
||||
"time"
|
||||
|
||||
appsv1 "k8s.io/api/apps/v1"
|
||||
@@ -52,48 +53,107 @@ var relationshipKey = "rules"
|
||||
// set the iterator max depth is 5
|
||||
var maxDepth = 5
|
||||
|
||||
// RuleList the rule list
|
||||
type RuleList []ChildrenResourcesRule
|
||||
|
||||
// GetRule get the rule by the resource type
|
||||
func (rl *RuleList) GetRule(grt GroupResourceType) (*ChildrenResourcesRule, bool) {
|
||||
for i, r := range *rl {
|
||||
if r.GroupResourceType == grt {
|
||||
return &(*rl)[i], true
|
||||
}
|
||||
}
|
||||
return nil, false
|
||||
}
|
||||
|
||||
// globalRule define the whole relationShip rule
|
||||
var globalRule map[GroupResourceType]ChildrenResourcesRule
|
||||
var globalRule RuleList
|
||||
|
||||
func init() {
|
||||
globalRule = make(map[GroupResourceType]ChildrenResourcesRule)
|
||||
globalRule[GroupResourceType{Group: "apps", Kind: "Deployment"}] = ChildrenResourcesRule{
|
||||
CareResource: map[ResourceType]genListOptionFunc{
|
||||
{APIVersion: "apps/v1", Kind: "ReplicaSet"}: deploy2RsLabelListOption,
|
||||
globalRule = append(globalRule,
|
||||
ChildrenResourcesRule{
|
||||
GroupResourceType: GroupResourceType{Group: "apps", Kind: "Deployment"},
|
||||
CareResources: buildCareResources([]*CareResource{
|
||||
{
|
||||
ResourceType: ResourceType{APIVersion: "apps/v1", Kind: "ReplicaSet"},
|
||||
listOptions: deploy2RsLabelListOption,
|
||||
},
|
||||
}),
|
||||
},
|
||||
}
|
||||
globalRule[GroupResourceType{Group: "apps", Kind: "ReplicaSet"}] = ChildrenResourcesRule{
|
||||
CareResource: map[ResourceType]genListOptionFunc{
|
||||
{APIVersion: "v1", Kind: "Pod"}: rs2PodLabelListOption,
|
||||
ChildrenResourcesRule{
|
||||
CareResources: buildCareResources([]*CareResource{
|
||||
{
|
||||
ResourceType: ResourceType{APIVersion: "v1", Kind: "Pod"},
|
||||
listOptions: rs2PodLabelListOption,
|
||||
},
|
||||
}),
|
||||
GroupResourceType: GroupResourceType{Group: "apps", Kind: "ReplicaSet"},
|
||||
},
|
||||
}
|
||||
globalRule[GroupResourceType{Group: "apps", Kind: "StatefulSet"}] = ChildrenResourcesRule{
|
||||
CareResource: map[ResourceType]genListOptionFunc{
|
||||
{APIVersion: "v1", Kind: "Pod"}: statefulSet2PodListOption,
|
||||
ChildrenResourcesRule{
|
||||
CareResources: buildCareResources([]*CareResource{
|
||||
{
|
||||
ResourceType: ResourceType{APIVersion: "v1", Kind: "Pod"},
|
||||
listOptions: statefulSet2PodListOption,
|
||||
},
|
||||
}),
|
||||
GroupResourceType: GroupResourceType{Group: "apps", Kind: "StatefulSet"},
|
||||
},
|
||||
}
|
||||
globalRule[GroupResourceType{Group: "", Kind: "Service"}] = ChildrenResourcesRule{
|
||||
CareResource: map[ResourceType]genListOptionFunc{
|
||||
{APIVersion: "discovery.k8s.io/v1beta1", Kind: "EndpointSlice"}: nil,
|
||||
{APIVersion: "v1", Kind: "Endpoints"}: service2EndpointListOption,
|
||||
ChildrenResourcesRule{
|
||||
GroupResourceType: GroupResourceType{Group: "", Kind: "Service"},
|
||||
CareResources: buildCareResources([]*CareResource{
|
||||
{
|
||||
ResourceType: ResourceType{APIVersion: "discovery.k8s.io/v1beta1", Kind: "EndpointSlice"},
|
||||
},
|
||||
{
|
||||
ResourceType: ResourceType{APIVersion: "v1", Kind: "Endpoints"},
|
||||
listOptions: service2EndpointListOption,
|
||||
},
|
||||
}),
|
||||
},
|
||||
}
|
||||
globalRule[GroupResourceType{Group: "helm.toolkit.fluxcd.io", Kind: "HelmRelease"}] = ChildrenResourcesRule{
|
||||
CareResource: map[ResourceType]genListOptionFunc{
|
||||
{APIVersion: "apps/v1", Kind: "Deployment"}: nil,
|
||||
{APIVersion: "apps/v1", Kind: "StatefulSet"}: nil,
|
||||
{APIVersion: "v1", Kind: "ConfigMap"}: nil,
|
||||
{APIVersion: "v1", Kind: "Secret"}: nil,
|
||||
{APIVersion: "v1", Kind: "Service"}: nil,
|
||||
{APIVersion: "v1", Kind: "PersistentVolumeClaim"}: nil,
|
||||
{APIVersion: "networking.k8s.io/v1", Kind: "Ingress"}: nil,
|
||||
{APIVersion: "v1", Kind: "ServiceAccount"}: nil,
|
||||
{APIVersion: "rbac.authorization.k8s.io/v1", Kind: "Role"}: nil,
|
||||
{APIVersion: "rbac.authorization.k8s.io/v1", Kind: "RoleBinding"}: nil,
|
||||
ChildrenResourcesRule{
|
||||
GroupResourceType: GroupResourceType{Group: "helm.toolkit.fluxcd.io", Kind: "HelmRelease"},
|
||||
CareResources: buildCareResources([]*CareResource{
|
||||
{
|
||||
ResourceType: ResourceType{APIVersion: "apps/v1", Kind: "Deployment"},
|
||||
},
|
||||
{
|
||||
ResourceType: ResourceType{APIVersion: "apps/v1", Kind: "StatefulSet"},
|
||||
},
|
||||
{
|
||||
ResourceType: ResourceType{APIVersion: "v1", Kind: "ConfigMap"},
|
||||
},
|
||||
{
|
||||
ResourceType: ResourceType{APIVersion: "v1", Kind: "Secret"},
|
||||
},
|
||||
{
|
||||
ResourceType: ResourceType{APIVersion: "v1", Kind: "Service"},
|
||||
},
|
||||
{
|
||||
ResourceType: ResourceType{APIVersion: "v1", Kind: "PersistentVolumeClaim"},
|
||||
},
|
||||
{
|
||||
ResourceType: ResourceType{APIVersion: "networking.k8s.io/v1", Kind: "Ingress"},
|
||||
},
|
||||
{
|
||||
ResourceType: ResourceType{APIVersion: "gateway.networking.k8s.io/v1alpha2", Kind: "HTTPRoute"},
|
||||
},
|
||||
{
|
||||
ResourceType: ResourceType{APIVersion: "gateway.networking.k8s.io/v1alpha2", Kind: "Gateway"},
|
||||
},
|
||||
{
|
||||
ResourceType: ResourceType{APIVersion: "v1", Kind: "ServiceAccount"},
|
||||
},
|
||||
{
|
||||
ResourceType: ResourceType{APIVersion: "rbac.authorization.k8s.io/v1", Kind: "Role"},
|
||||
},
|
||||
{
|
||||
ResourceType: ResourceType{APIVersion: "rbac.authorization.k8s.io/v1", Kind: "RoleBinding"},
|
||||
},
|
||||
}),
|
||||
DefaultGenListOptionFunc: helmRelease2AnyListOption,
|
||||
DisableFilterByOwnerReference: true,
|
||||
},
|
||||
DefaultGenListOptionFunc: helmRelease2AnyListOption,
|
||||
DisableFilterByOwnerReference: true,
|
||||
}
|
||||
)
|
||||
}
|
||||
|
||||
// GroupResourceType define the parent resource type
|
||||
@@ -116,14 +176,45 @@ type customRule struct {
|
||||
|
||||
// ChildrenResourcesRule define the relationShip between parentObject and children resource
|
||||
type ChildrenResourcesRule struct {
|
||||
// GroupResourceType the root resource type
|
||||
GroupResourceType GroupResourceType
|
||||
// every subResourceType can have a specified genListOptionFunc.
|
||||
CareResource map[ResourceType]genListOptionFunc
|
||||
CareResources *CareResources
|
||||
// if specified genListOptionFunc is nil will use use default genListOptionFunc to generate listOption.
|
||||
DefaultGenListOptionFunc genListOptionFunc
|
||||
// DisableFilterByOwnerReference means don't use parent resource's UID filter the result.
|
||||
DisableFilterByOwnerReference bool
|
||||
}
|
||||
|
||||
func buildCareResources(crs []*CareResource) *CareResources {
|
||||
var cr CareResources = crs
|
||||
return &cr
|
||||
}
|
||||
|
||||
// CareResources the care resource definitions
|
||||
type CareResources []*CareResource
|
||||
|
||||
// Get get the care resource by the resource type
|
||||
func (c *CareResources) Get(rt ResourceType) *CareResource {
|
||||
for _, r := range *c {
|
||||
if r.ResourceType == rt {
|
||||
return r
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Put add a care resource to the list
|
||||
func (c *CareResources) Put(cr *CareResource) {
|
||||
*c = append(*c, cr)
|
||||
}
|
||||
|
||||
// CareResource resource type and list options
|
||||
type CareResource struct {
|
||||
ResourceType
|
||||
listOptions genListOptionFunc
|
||||
}
|
||||
|
||||
type genListOptionFunc func(unstructured.Unstructured) (client.ListOptions, error)
|
||||
|
||||
var deploy2RsLabelListOption = func(obj unstructured.Unstructured) (client.ListOptions, error) {
|
||||
@@ -643,6 +734,9 @@ func listItemByRule(clusterCTX context.Context, k8sClient client.Client, resourc
|
||||
}
|
||||
}
|
||||
}
|
||||
sort.Slice(res, func(i, j int) bool {
|
||||
return res[i].GetName() < res[j].GetName()
|
||||
})
|
||||
return res, nil
|
||||
}
|
||||
var listOptions client.ListOptions
|
||||
@@ -676,10 +770,13 @@ func listItemByRule(clusterCTX context.Context, k8sClient client.Client, resourc
|
||||
}
|
||||
return res, nil
|
||||
}
|
||||
sort.Slice(itemList.Items, func(i, j int) bool {
|
||||
return itemList.Items[i].GetName() < itemList.Items[j].GetName()
|
||||
})
|
||||
return itemList.Items, nil
|
||||
}
|
||||
|
||||
func iteratorChildResources(ctx context.Context, cluster string, k8sClient client.Client, parentResource types.ResourceTreeNode, depth int) ([]*types.ResourceTreeNode, error) {
|
||||
func iteratorChildResources(ctx context.Context, cluster string, k8sClient client.Client, parentResource types.ResourceTreeNode, depth int, filter func(node types.ResourceTreeNode) bool) ([]*types.ResourceTreeNode, error) {
|
||||
if depth > maxDepth {
|
||||
log.Logger.Warnf("listing application resource tree has reached the max-depth %d parentObject is %v", depth, parentResource)
|
||||
return nil, nil
|
||||
@@ -691,11 +788,14 @@ func iteratorChildResources(ctx context.Context, cluster string, k8sClient clien
|
||||
group := parentObject.GetObjectKind().GroupVersionKind().Group
|
||||
kind := parentObject.GetObjectKind().GroupVersionKind().Kind
|
||||
|
||||
if rules, ok := globalRule[GroupResourceType{Group: group, Kind: kind}]; ok {
|
||||
if rule, ok := globalRule.GetRule(GroupResourceType{Group: group, Kind: kind}); ok {
|
||||
var resList []*types.ResourceTreeNode
|
||||
for resource, specifiedFunc := range rules.CareResource {
|
||||
for i := range *rule.CareResources {
|
||||
resource := (*rule.CareResources)[i].ResourceType
|
||||
specifiedFunc := (*rule.CareResources)[i].listOptions
|
||||
|
||||
clusterCTX := multicluster.ContextWithClusterName(ctx, cluster)
|
||||
items, err := listItemByRule(clusterCTX, k8sClient, resource, *parentObject, specifiedFunc, rules.DefaultGenListOptionFunc, rules.DisableFilterByOwnerReference)
|
||||
items, err := listItemByRule(clusterCTX, k8sClient, resource, *parentObject, specifiedFunc, rule.DefaultGenListOptionFunc, rule.DisableFilterByOwnerReference)
|
||||
if err != nil {
|
||||
if meta.IsNoMatchError(err) || runtime.IsNotRegisteredError(err) {
|
||||
klog.Errorf("error to list sub-resources: %s err: %v", resource.Kind, err)
|
||||
@@ -703,7 +803,7 @@ func iteratorChildResources(ctx context.Context, cluster string, k8sClient clien
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
for _, item := range items {
|
||||
for i, item := range items {
|
||||
rtn := types.ResourceTreeNode{
|
||||
APIVersion: item.GetAPIVersion(),
|
||||
Kind: item.GroupVersionKind().Kind,
|
||||
@@ -711,14 +811,18 @@ func iteratorChildResources(ctx context.Context, cluster string, k8sClient clien
|
||||
Name: item.GetName(),
|
||||
UID: item.GetUID(),
|
||||
Cluster: cluster,
|
||||
Object: items[i],
|
||||
}
|
||||
if _, ok := globalRule[GroupResourceType{Group: item.GetObjectKind().GroupVersionKind().Group, Kind: item.GetObjectKind().GroupVersionKind().Kind}]; ok {
|
||||
childrenRes, err := iteratorChildResources(ctx, cluster, k8sClient, rtn, depth+1)
|
||||
if _, ok := globalRule.GetRule(GroupResourceType{Group: item.GetObjectKind().GroupVersionKind().Group, Kind: item.GetObjectKind().GroupVersionKind().Kind}); ok {
|
||||
childrenRes, err := iteratorChildResources(ctx, cluster, k8sClient, rtn, depth+1, filter)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
rtn.LeafNodes = childrenRes
|
||||
}
|
||||
if !filter(rtn) && len(rtn.LeafNodes) == 0 {
|
||||
continue
|
||||
}
|
||||
healthStatus, err := checkResourceStatus(item)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -769,18 +873,21 @@ func mergeCustomRules(ctx context.Context, k8sClient client.Client) error {
|
||||
continue
|
||||
}
|
||||
for _, rule := range customRules {
|
||||
if cResource, ok := globalRule[*rule.ParentResourceType]; ok {
|
||||
for _, resourceType := range rule.ChildrenResourceType {
|
||||
if _, ok := cResource.CareResource[resourceType]; !ok {
|
||||
cResource.CareResource[resourceType] = nil
|
||||
if cResource, ok := globalRule.GetRule(*rule.ParentResourceType); ok {
|
||||
for i, resourceType := range rule.ChildrenResourceType {
|
||||
if cResource.CareResources.Get(resourceType) == nil {
|
||||
cResource.CareResources.Put(&CareResource{ResourceType: rule.ChildrenResourceType[i]})
|
||||
}
|
||||
}
|
||||
} else {
|
||||
caredResources := map[ResourceType]genListOptionFunc{}
|
||||
for _, resourceType := range rule.ChildrenResourceType {
|
||||
caredResources[resourceType] = nil
|
||||
var caredResources []*CareResource
|
||||
for i := range rule.ChildrenResourceType {
|
||||
caredResources = append(caredResources, &CareResource{ResourceType: rule.ChildrenResourceType[i]})
|
||||
}
|
||||
globalRule[*rule.ParentResourceType] = ChildrenResourcesRule{DefaultGenListOptionFunc: nil, CareResource: caredResources}
|
||||
globalRule = append(globalRule, ChildrenResourcesRule{
|
||||
GroupResourceType: *rule.ParentResourceType,
|
||||
DefaultGenListOptionFunc: nil,
|
||||
CareResources: buildCareResources(caredResources)})
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -17,6 +17,7 @@ limitations under the License.
|
||||
package query
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
@@ -1293,7 +1294,9 @@ var _ = Describe("unit-test to e2e test", func() {
|
||||
Name: "deploy1",
|
||||
APIVersion: "apps/v1",
|
||||
Kind: "Deployment",
|
||||
}, 1)
|
||||
}, 1, func(node types.ResourceTreeNode) bool {
|
||||
return true
|
||||
})
|
||||
Expect(err).Should(BeNil())
|
||||
Expect(len(tn)).Should(BeEquivalentTo(2))
|
||||
Expect(len(tn[0].LeafNodes)).Should(BeEquivalentTo(1))
|
||||
@@ -1364,10 +1367,11 @@ var _ = Describe("unit-test to e2e test", func() {
|
||||
opt := `app: {
|
||||
name: "app"
|
||||
namespace: "test-namespace"
|
||||
withTree: true
|
||||
}`
|
||||
v, err := value.NewValue(opt, nil, "")
|
||||
Expect(err).Should(BeNil())
|
||||
Expect(prd.GetApplicationResourceTree(nil, v, nil)).Should(BeNil())
|
||||
Expect(prd.ListAppliedResources(nil, v, nil)).Should(BeNil())
|
||||
type Res struct {
|
||||
List []types.AppliedResource `json:"list"`
|
||||
}
|
||||
@@ -1401,21 +1405,28 @@ var _ = Describe("unit-test to e2e test", func() {
|
||||
}
|
||||
Expect(k8sClient.Create(ctx, &badRuleConfigMap)).Should(BeNil())
|
||||
|
||||
// clear after test
|
||||
objectList = append(objectList, &badRuleConfigMap)
|
||||
|
||||
notExistParentConfigMap := v1.ConfigMap{TypeMeta: metav1.TypeMeta{APIVersion: "v1", Kind: "ConfigMap"},
|
||||
ObjectMeta: metav1.ObjectMeta{Namespace: types3.DefaultKubeVelaNS, Name: "not-exist-parent", Labels: map[string]string{oam.LabelResourceRules: "true"}},
|
||||
Data: map[string]string{relationshipKey: notExistParentResourceStr},
|
||||
}
|
||||
Expect(k8sClient.Create(ctx, ¬ExistParentConfigMap)).Should(BeNil())
|
||||
|
||||
// clear after test
|
||||
objectList = append(objectList, &badRuleConfigMap)
|
||||
|
||||
prd := provider{cli: k8sClient}
|
||||
opt := `app: {
|
||||
name: "app"
|
||||
namespace: "test-namespace"
|
||||
withTree: true
|
||||
}`
|
||||
v, err := value.NewValue(opt, nil, "")
|
||||
|
||||
Expect(err).Should(BeNil())
|
||||
Expect(prd.GetApplicationResourceTree(nil, v, nil)).Should(BeNil())
|
||||
Expect(prd.ListAppliedResources(nil, v, nil)).Should(BeNil())
|
||||
type Res struct {
|
||||
List []types.AppliedResource `json:"list"`
|
||||
}
|
||||
@@ -1495,31 +1506,40 @@ childrenResourceType:
|
||||
Expect(k8sClient.Create(ctx, &missConfigedCm)).Should(BeNil())
|
||||
|
||||
Expect(mergeCustomRules(ctx, k8sClient)).Should(BeNil())
|
||||
childrenResources, ok := globalRule[GroupResourceType{Group: "apps.kruise.io", Kind: "CloneSet"}]
|
||||
childrenResources, ok := globalRule.GetRule(GroupResourceType{Group: "apps.kruise.io", Kind: "CloneSet"})
|
||||
Expect(ok).Should(BeTrue())
|
||||
Expect(childrenResources.DefaultGenListOptionFunc).Should(BeNil())
|
||||
Expect(len(childrenResources.CareResource)).Should(BeEquivalentTo(2))
|
||||
specifyFunc, ok := childrenResources.CareResource[ResourceType{APIVersion: "v1", Kind: "Pod"}]
|
||||
Expect(ok).Should(BeTrue())
|
||||
Expect(specifyFunc).Should(BeNil())
|
||||
Expect(len(*childrenResources.CareResources)).Should(BeEquivalentTo(2))
|
||||
|
||||
dsChildrenResources, ok := globalRule[GroupResourceType{Group: "apps", Kind: "DaemonSet"}]
|
||||
crPod := childrenResources.CareResources.Get(ResourceType{APIVersion: "v1", Kind: "Pod"})
|
||||
Expect(crPod).ShouldNot(BeNil())
|
||||
Expect(crPod.listOptions).Should(BeNil())
|
||||
|
||||
dsChildrenResources, ok := globalRule.GetRule(GroupResourceType{Group: "apps", Kind: "DaemonSet"})
|
||||
Expect(ok).Should(BeTrue())
|
||||
Expect(dsChildrenResources.DefaultGenListOptionFunc).Should(BeNil())
|
||||
Expect(len(dsChildrenResources.CareResource)).Should(BeEquivalentTo(2))
|
||||
dsSpecifyFunc, ok := dsChildrenResources.CareResource[ResourceType{APIVersion: "v1", Kind: "Pod"}]
|
||||
Expect(ok).Should(BeTrue())
|
||||
Expect(dsSpecifyFunc).Should(BeNil())
|
||||
crSpecifyFunc, ok := dsChildrenResources.CareResource[ResourceType{APIVersion: "apps/v1", Kind: "ControllerRevision"}]
|
||||
Expect(ok).Should(BeTrue())
|
||||
Expect(crSpecifyFunc).Should(BeNil())
|
||||
Expect(len(*dsChildrenResources.CareResources)).Should(BeEquivalentTo(2))
|
||||
|
||||
stsChildrenResources, ok := globalRule[GroupResourceType{Group: "apps", Kind: "StatefulSet"}]
|
||||
crPod2 := dsChildrenResources.CareResources.Get(ResourceType{APIVersion: "v1", Kind: "Pod"})
|
||||
Expect(crPod2).ShouldNot(BeNil())
|
||||
Expect(crPod2.listOptions).Should(BeNil())
|
||||
|
||||
cr := dsChildrenResources.CareResources.Get(ResourceType{APIVersion: "apps/v1", Kind: "ControllerRevision"})
|
||||
Expect(cr).ShouldNot(BeNil())
|
||||
Expect(cr.listOptions).Should(BeNil())
|
||||
|
||||
stsChildrenResources, ok := globalRule.GetRule(GroupResourceType{Group: "apps", Kind: "StatefulSet"})
|
||||
Expect(ok).Should(BeTrue())
|
||||
Expect(stsChildrenResources.DefaultGenListOptionFunc).Should(BeNil())
|
||||
Expect(len(stsChildrenResources.CareResource)).Should(BeEquivalentTo(2))
|
||||
stsCrSpecifyFunc, ok := stsChildrenResources.CareResource[ResourceType{APIVersion: "apps/v1", Kind: "ControllerRevision"}]
|
||||
Expect(ok).Should(BeTrue())
|
||||
Expect(stsCrSpecifyFunc).Should(BeNil())
|
||||
Expect(len(*stsChildrenResources.CareResources)).Should(BeEquivalentTo(2))
|
||||
revisionCR := stsChildrenResources.CareResources.Get(ResourceType{APIVersion: "apps/v1", Kind: "ControllerRevision"})
|
||||
Expect(revisionCR).ShouldNot(BeNil())
|
||||
Expect(revisionCR.listOptions).Should(BeNil())
|
||||
|
||||
// clear data
|
||||
Expect(k8sClient.Delete(context.TODO(), &missConfigedCm)).Should(BeNil())
|
||||
Expect(k8sClient.Delete(context.TODO(), &stsConfigMap)).Should(BeNil())
|
||||
Expect(k8sClient.Delete(context.TODO(), &daemonSetConfigMap)).Should(BeNil())
|
||||
Expect(k8sClient.Delete(context.TODO(), &cloneSetConfigMap)).Should(BeNil())
|
||||
})
|
||||
})
|
||||
|
||||
@@ -22,6 +22,7 @@ import (
|
||||
"time"
|
||||
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
)
|
||||
@@ -115,17 +116,18 @@ type AppliedResource struct {
|
||||
|
||||
// ResourceTreeNode is the tree node of every resource
|
||||
type ResourceTreeNode struct {
|
||||
Cluster string `json:"cluster"`
|
||||
APIVersion string `json:"apiVersion,omitempty"`
|
||||
Kind string `json:"kind"`
|
||||
Namespace string `json:"namespace,omitempty"`
|
||||
Name string `json:"name,omitempty"`
|
||||
UID types.UID `json:"uid,omitempty"`
|
||||
HealthStatus HealthStatus `json:"healthStatus,omitempty"`
|
||||
DeletionTimestamp time.Time `json:"deletionTimestamp,omitempty"`
|
||||
CreationTimestamp time.Time `json:"creationTimestamp,omitempty"`
|
||||
LeafNodes []*ResourceTreeNode `json:"leafNodes,omitempty"`
|
||||
AdditionalInfo map[string]interface{} `json:"additionalInfo,omitempty"`
|
||||
Cluster string `json:"cluster"`
|
||||
APIVersion string `json:"apiVersion,omitempty"`
|
||||
Kind string `json:"kind"`
|
||||
Namespace string `json:"namespace,omitempty"`
|
||||
Name string `json:"name,omitempty"`
|
||||
UID types.UID `json:"uid,omitempty"`
|
||||
HealthStatus HealthStatus `json:"healthStatus,omitempty"`
|
||||
DeletionTimestamp time.Time `json:"deletionTimestamp,omitempty"`
|
||||
CreationTimestamp time.Time `json:"creationTimestamp,omitempty"`
|
||||
LeafNodes []*ResourceTreeNode `json:"leafNodes,omitempty"`
|
||||
AdditionalInfo map[string]interface{} `json:"additionalInfo,omitempty"`
|
||||
Object unstructured.Unstructured `json:"-"`
|
||||
}
|
||||
|
||||
// GroupVersionKind returns the stored group, version, and kind from AppliedResource
|
||||
@@ -137,3 +139,44 @@ func (obj *AppliedResource) GroupVersionKind() schema.GroupVersionKind {
|
||||
func (rtn *ResourceTreeNode) GroupVersionKind() schema.GroupVersionKind {
|
||||
return schema.FromAPIVersionAndKind(rtn.APIVersion, rtn.Kind)
|
||||
}
|
||||
|
||||
// ResourceItem the resource base info struct
|
||||
type ResourceItem struct {
|
||||
Cluster string `json:"cluster"`
|
||||
Workload Workload `json:"workload"`
|
||||
Component string `json:"component"`
|
||||
Object unstructured.Unstructured `json:"object"`
|
||||
PublishVersion string `json:"publishVersion"`
|
||||
DeployVersion string `json:"deployVersion"`
|
||||
}
|
||||
|
||||
// Workload workload resource base info
|
||||
type Workload struct {
|
||||
APIVersion string `json:"apiVersion"`
|
||||
Kind string `json:"kind"`
|
||||
Name string `json:"name"`
|
||||
Namespace string `json:"namespace"`
|
||||
}
|
||||
|
||||
// PodBase the struct of pod list
|
||||
type PodBase struct {
|
||||
Cluster string `json:"cluster"`
|
||||
Workload Workload `json:"workload"`
|
||||
Component string `json:"component"`
|
||||
Metadata struct {
|
||||
CreationTime string `json:"creationTime"`
|
||||
Name string `json:"name"`
|
||||
Namespace string `json:"namespace"`
|
||||
Version struct {
|
||||
PublishVersion string `json:"publishVersion"`
|
||||
DeployVersion string `json:"deployVersion"`
|
||||
} `json:"version"`
|
||||
Labels map[string]string `json:"labels"`
|
||||
} `json:"metadata"`
|
||||
Status struct {
|
||||
HostIP string `json:"hostIP"`
|
||||
NodeName string `json:"nodeName"`
|
||||
Phase string `json:"phase"`
|
||||
PodIP string `json:"podIP"`
|
||||
} `json:"status"`
|
||||
}
|
||||
|
||||
@@ -20,8 +20,10 @@ import (
|
||||
"encoding/json"
|
||||
|
||||
cuejson "cuelang.org/go/pkg/encoding/json"
|
||||
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||
|
||||
"github.com/oam-dev/kubevela/pkg/cue/model/value"
|
||||
querytypes "github.com/oam-dev/kubevela/pkg/velaql/providers/query/types"
|
||||
)
|
||||
|
||||
// fillQueryResult help fill query result which contains k8s object to *value.Value
|
||||
@@ -40,3 +42,30 @@ func fillQueryResult(v *value.Value, res interface{}, paths ...string) error {
|
||||
}
|
||||
return v.Error()
|
||||
}
|
||||
|
||||
func buildResourceArray(res querytypes.AppliedResource, parent, node *querytypes.ResourceTreeNode, kind string, apiVersion string) (pods []querytypes.ResourceItem) {
|
||||
if node.LeafNodes != nil {
|
||||
for _, subNode := range node.LeafNodes {
|
||||
pods = append(pods, buildResourceArray(res, node, subNode, kind, apiVersion)...)
|
||||
}
|
||||
} else if node.Kind == kind && node.APIVersion == apiVersion {
|
||||
pods = append(pods, buildResourceItem(res, querytypes.Workload{
|
||||
APIVersion: parent.APIVersion,
|
||||
Kind: parent.Kind,
|
||||
Name: parent.Name,
|
||||
Namespace: parent.Namespace,
|
||||
}, node.Object))
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func buildResourceItem(res querytypes.AppliedResource, workload querytypes.Workload, object unstructured.Unstructured) querytypes.ResourceItem {
|
||||
return querytypes.ResourceItem{
|
||||
Cluster: res.Cluster,
|
||||
Workload: workload,
|
||||
Component: res.Component,
|
||||
Object: object,
|
||||
PublishVersion: res.PublishVersion,
|
||||
DeployVersion: res.DeployVersion,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -79,7 +79,7 @@ func LoadApplication(namespace, appName string, c common.Args) (*v1beta1.Applica
|
||||
}
|
||||
app := &v1beta1.Application{}
|
||||
if err := newClient.Get(context.TODO(), client.ObjectKey{Namespace: namespace, Name: appName}, app); err != nil {
|
||||
return nil, err
|
||||
return nil, fmt.Errorf("failed to load application %s from namespace %s: %w", appName, namespace, err)
|
||||
}
|
||||
return app, nil
|
||||
}
|
||||
|
||||
@@ -87,7 +87,7 @@ func NewAddonCommand(c common.Args, order string, ioStreams cmdutil.IOStreams) *
|
||||
Long: "Manage addons for extension.",
|
||||
Annotations: map[string]string{
|
||||
types.TagCommandOrder: order,
|
||||
types.TagCommandType: types.TypeExtension,
|
||||
types.TagCommandType: types.TypeApp,
|
||||
},
|
||||
}
|
||||
cmd.AddCommand(
|
||||
|
||||
@@ -86,14 +86,14 @@ func NewCommandWithIOStreams(ioStream util.IOStreams) *cobra.Command {
|
||||
NewCapabilityShowCommand(commandArgs, ioStream),
|
||||
|
||||
// Manage Apps
|
||||
NewQlCommand(commandArgs, "10", ioStream),
|
||||
NewListCommand(commandArgs, "9", ioStream),
|
||||
NewAppStatusCommand(commandArgs, "8", ioStream),
|
||||
NewListCommand(commandArgs, "10", ioStream),
|
||||
NewAppStatusCommand(commandArgs, "9", ioStream),
|
||||
NewDeleteCommand(commandArgs, "7", ioStream),
|
||||
NewExecCommand(commandArgs, "6", ioStream),
|
||||
NewPortForwardCommand(commandArgs, "5", ioStream),
|
||||
NewLogsCommand(commandArgs, "4", ioStream),
|
||||
NewLiveDiffCommand(commandArgs, "3", ioStream),
|
||||
NewQlCommand(commandArgs, "3", ioStream),
|
||||
NewLiveDiffCommand(commandArgs, "2", ioStream),
|
||||
NewDryRunCommand(commandArgs, ioStream),
|
||||
RevisionCommandGroup(commandArgs),
|
||||
|
||||
|
||||
@@ -18,6 +18,7 @@ package cli
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
@@ -32,6 +33,7 @@ import (
|
||||
"github.com/oam-dev/kubevela/pkg/multicluster"
|
||||
"github.com/oam-dev/kubevela/pkg/utils/common"
|
||||
"github.com/oam-dev/kubevela/pkg/utils/util"
|
||||
querytypes "github.com/oam-dev/kubevela/pkg/velaql/providers/query/types"
|
||||
"github.com/oam-dev/kubevela/references/appfile"
|
||||
)
|
||||
|
||||
@@ -49,17 +51,22 @@ type VelaExecOptions struct {
|
||||
Stdin bool
|
||||
TTY bool
|
||||
|
||||
ComponentName string
|
||||
PodName string
|
||||
ClusterName string
|
||||
ContainerName string
|
||||
|
||||
Ctx context.Context
|
||||
VelaC common.Args
|
||||
Env *types.EnvMeta
|
||||
App *v1beta1.Application
|
||||
|
||||
namespace string
|
||||
resourceName string
|
||||
resourceNamespace string
|
||||
f k8scmdutil.Factory
|
||||
kcExecOptions *cmdexec.ExecOptions
|
||||
ClientSet kubernetes.Interface
|
||||
namespace string
|
||||
podName string
|
||||
podNamespace string
|
||||
f k8scmdutil.Factory
|
||||
kcExecOptions *cmdexec.ExecOptions
|
||||
ClientSet kubernetes.Interface
|
||||
}
|
||||
|
||||
// NewExecCommand creates `exec` command
|
||||
@@ -132,6 +139,10 @@ func NewExecCommand(c common.Args, order string, ioStreams util.IOStreams) *cobr
|
||||
cmd.Flags().Duration(podRunningTimeoutFlag, defaultPodExecTimeout,
|
||||
"The length of time (like 5s, 2m, or 3h, higher than zero) to wait until at least one pod is running",
|
||||
)
|
||||
cmd.Flags().StringVarP(&o.ComponentName, "component", "c", "", "filter the pod by the component name")
|
||||
cmd.Flags().StringVarP(&o.ClusterName, "cluster", "", "", "filter the pod by the cluster name")
|
||||
cmd.Flags().StringVarP(&o.PodName, "pod", "p", "", "specify the pod name")
|
||||
cmd.Flags().StringVarP(&o.ContainerName, "container", "", "", "specify the container name")
|
||||
addNamespaceAndEnvArg(cmd)
|
||||
|
||||
return cmd
|
||||
@@ -148,21 +159,47 @@ func (o *VelaExecOptions) Init(ctx context.Context, c *cobra.Command, argsIn []s
|
||||
}
|
||||
o.App = app
|
||||
|
||||
targetResource, err := common.AskToChooseOneEnvResource(o.App)
|
||||
pods, err := GetApplicationPods(ctx, app.Name, app.Namespace, o.VelaC, Filter{
|
||||
Component: o.ComponentName,
|
||||
Cluster: o.ClusterName,
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
var selectPod *querytypes.PodBase
|
||||
if o.PodName != "" {
|
||||
for i, pod := range pods {
|
||||
if pod.Metadata.Name == o.PodName {
|
||||
selectPod = &pods[i]
|
||||
break
|
||||
}
|
||||
}
|
||||
if selectPod == nil {
|
||||
fmt.Println("The Pod you specified does not exist, please select it from the list.")
|
||||
}
|
||||
}
|
||||
if selectPod == nil {
|
||||
selectPod, err = AskToChooseOnePod(pods)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if selectPod == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
cf := genericclioptions.NewConfigFlags(true)
|
||||
cf.Namespace = &targetResource.Namespace
|
||||
var namespace = selectPod.Metadata.Namespace
|
||||
cf.Namespace = &namespace
|
||||
cf.WrapConfigFn = func(cfg *rest.Config) *rest.Config {
|
||||
cfg.Wrap(multicluster.NewClusterGatewayRoundTripperWrapperGenerator(targetResource.Cluster))
|
||||
cfg.Wrap(multicluster.NewClusterGatewayRoundTripperWrapperGenerator(selectPod.Cluster))
|
||||
return cfg
|
||||
}
|
||||
o.f = k8scmdutil.NewFactory(k8scmdutil.NewMatchVersionFlags(cf))
|
||||
o.resourceName = targetResource.Name
|
||||
o.Ctx = multicluster.ContextWithClusterName(ctx, targetResource.Cluster)
|
||||
o.resourceNamespace = targetResource.Namespace
|
||||
o.podName = selectPod.Metadata.Name
|
||||
o.Ctx = multicluster.ContextWithClusterName(ctx, selectPod.Cluster)
|
||||
o.podNamespace = namespace
|
||||
config, err := o.VelaC.GetConfig()
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -182,25 +219,18 @@ func (o *VelaExecOptions) Init(ctx context.Context, c *cobra.Command, argsIn []s
|
||||
|
||||
// Complete loads data from the command environment
|
||||
func (o *VelaExecOptions) Complete() error {
|
||||
podName, err := o.getPodName(o.resourceName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
o.kcExecOptions.StreamOptions.Stdin = o.Stdin
|
||||
o.kcExecOptions.StreamOptions.TTY = o.TTY
|
||||
o.kcExecOptions.StreamOptions.ContainerName = o.ContainerName
|
||||
|
||||
args := make([]string, len(o.Args))
|
||||
copy(args, o.Args)
|
||||
// args for kcExecOptions MUST be in such format:
|
||||
// [podName, COMMAND...]
|
||||
args[0] = podName
|
||||
args[0] = o.podName
|
||||
return o.kcExecOptions.Complete(o.f, o.Cmd, args, 1)
|
||||
}
|
||||
|
||||
func (o *VelaExecOptions) getPodName(resourceName string) (string, error) {
|
||||
return getPodNameForResource(o.Ctx, o.ClientSet, resourceName, o.resourceNamespace)
|
||||
}
|
||||
|
||||
// Run executes a validated remote execution against a pod
|
||||
func (o *VelaExecOptions) Run() error {
|
||||
return o.kcExecOptions.Run()
|
||||
|
||||
@@ -29,6 +29,7 @@ import (
|
||||
"github.com/spf13/cobra"
|
||||
"github.com/wercker/stern/stern"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/apimachinery/pkg/selection"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
|
||||
"github.com/oam-dev/kubevela/apis/core.oam.dev/v1beta1"
|
||||
@@ -36,6 +37,7 @@ import (
|
||||
"github.com/oam-dev/kubevela/pkg/multicluster"
|
||||
"github.com/oam-dev/kubevela/pkg/utils/common"
|
||||
"github.com/oam-dev/kubevela/pkg/utils/util"
|
||||
querytypes "github.com/oam-dev/kubevela/pkg/velaql/providers/query/types"
|
||||
"github.com/oam-dev/kubevela/references/appfile"
|
||||
)
|
||||
|
||||
@@ -47,15 +49,6 @@ func NewLogsCommand(c common.Args, order string, ioStreams util.IOStreams) *cobr
|
||||
Short: "Tail logs for application.",
|
||||
Long: "Tail logs for vela application.",
|
||||
Args: cobra.ExactArgs(1),
|
||||
PreRunE: func(cmd *cobra.Command, args []string) error {
|
||||
config, err := c.GetConfig()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
largs.Args = c
|
||||
config.Wrap(multicluster.NewSecretModeMultiClusterRoundTripper)
|
||||
return nil
|
||||
},
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
var err error
|
||||
largs.Namespace, err = GetFlagNamespaceOrEnv(cmd, c)
|
||||
@@ -66,10 +59,6 @@ func NewLogsCommand(c common.Args, order string, ioStreams util.IOStreams) *cobr
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
largs.Namespace, err = GetFlagNamespaceOrEnv(cmd, c)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
largs.App = app
|
||||
ctx := context.Background()
|
||||
if err := largs.Run(ctx, ioStreams); err != nil {
|
||||
@@ -84,26 +73,78 @@ func NewLogsCommand(c common.Args, order string, ioStreams util.IOStreams) *cobr
|
||||
}
|
||||
|
||||
cmd.Flags().StringVarP(&largs.Output, "output", "o", "default", "output format for logs, support: [default, raw, json]")
|
||||
cmd.Flags().StringVarP(&largs.Container, "container", "c", "", "specify container name for output")
|
||||
cmd.Flags().StringVar(&largs.Name, "name", "", "specify resource name for output")
|
||||
cmd.Flags().StringVarP(&largs.ComponentName, "component", "c", "", "filter the pod by the component name")
|
||||
cmd.Flags().StringVarP(&largs.ClusterName, "cluster", "", "", "filter the pod by the cluster name")
|
||||
cmd.Flags().StringVarP(&largs.PodName, "pod", "p", "", "specify the pod name")
|
||||
cmd.Flags().StringVarP(&largs.ContainerName, "container", "", "", "specify the container name")
|
||||
addNamespaceAndEnvArg(cmd)
|
||||
return cmd
|
||||
}
|
||||
|
||||
// Args creates arguments for `logs` command
|
||||
type Args struct {
|
||||
Output string
|
||||
Args common.Args
|
||||
Namespace string
|
||||
Container string
|
||||
Name string
|
||||
App *v1beta1.Application
|
||||
Output string
|
||||
Args common.Args
|
||||
Namespace string
|
||||
ContainerName string
|
||||
PodName string
|
||||
ClusterName string
|
||||
ComponentName string
|
||||
App *v1beta1.Application
|
||||
}
|
||||
|
||||
// Run refer to the implementation at https://github.com/oam-dev/stern/blob/master/stern/main.go
|
||||
func (l *Args) Run(ctx context.Context, ioStreams util.IOStreams) error {
|
||||
// TODO(wonderflow): we could get labels from service to narrow the pods scope selected
|
||||
labelSelector := labels.Everything()
|
||||
pods, err := GetApplicationPods(ctx, l.App.Name, l.App.Namespace, l.Args, Filter{
|
||||
Component: l.ComponentName,
|
||||
Cluster: l.ClusterName,
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
var selectPod *querytypes.PodBase
|
||||
if l.PodName != "" {
|
||||
for i, pod := range pods {
|
||||
if pod.Metadata.Name == l.PodName {
|
||||
selectPod = &pods[i]
|
||||
break
|
||||
}
|
||||
}
|
||||
if selectPod == nil {
|
||||
fmt.Println("The Pod you specified does not exist, please select it from the list.")
|
||||
}
|
||||
}
|
||||
if selectPod == nil {
|
||||
selectPod, err = AskToChooseOnePod(pods)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if selectPod == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
if selectPod.Cluster != "" {
|
||||
ctx = multicluster.ContextWithClusterName(ctx, selectPod.Cluster)
|
||||
}
|
||||
pod, err := regexp.Compile(selectPod.Metadata.Name + ".*")
|
||||
if err != nil {
|
||||
return fmt.Errorf("fail to compile '%s' for logs query", selectPod.Metadata.Name+".*")
|
||||
}
|
||||
container := regexp.MustCompile(".*")
|
||||
if l.ContainerName != "" {
|
||||
container = regexp.MustCompile(l.ContainerName + ".*")
|
||||
}
|
||||
namespace := selectPod.Metadata.Namespace
|
||||
selector := labels.NewSelector()
|
||||
for k, v := range selectPod.Metadata.Labels {
|
||||
req, _ := labels.NewRequirement(k, selection.Equals, []string{v})
|
||||
if req != nil {
|
||||
selector = selector.Add(*req)
|
||||
}
|
||||
}
|
||||
|
||||
config, err := l.Args.GetConfig()
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -112,36 +153,14 @@ func (l *Args) Run(ctx context.Context, ioStreams util.IOStreams) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
selectedRes, err := common.AskToChooseOneEnvResource(l.App, l.Name)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if selectedRes.Kind == "Configuration" {
|
||||
selectedRes.Cluster = "local"
|
||||
if l.App.DeletionTimestamp == nil {
|
||||
selectedRes.Name += "-apply"
|
||||
labelSelector = labels.SelectorFromSet(map[string]string{
|
||||
"job-name": selectedRes.Name,
|
||||
})
|
||||
}
|
||||
// TODO(zzxwill) : We should also support showing logs when the terraform is destroying resources.
|
||||
// But currently, when deleting an application, it won't hold on its deletion. So `vela logs` miss application
|
||||
// parameter and is unable to display any logs.
|
||||
}
|
||||
|
||||
if selectedRes.Cluster != "" && selectedRes.Cluster != "local" {
|
||||
ctx = multicluster.ContextWithClusterName(ctx, selectedRes.Cluster)
|
||||
}
|
||||
pod, err := regexp.Compile(selectedRes.Name + "-.*")
|
||||
if err != nil {
|
||||
return fmt.Errorf("fail to compile '%s' for logs query", selectedRes.Name+".*")
|
||||
}
|
||||
container := regexp.MustCompile(".*")
|
||||
if l.Container != "" {
|
||||
container = regexp.MustCompile(l.Container + ".*")
|
||||
}
|
||||
namespace := selectedRes.Namespace
|
||||
added, removed, err := stern.Watch(ctx, clientSet.CoreV1().Pods(namespace), pod, container, nil, []stern.ContainerState{stern.RUNNING, stern.TERMINATED}, labelSelector)
|
||||
added, removed, err := stern.Watch(ctx,
|
||||
clientSet.CoreV1().Pods(namespace),
|
||||
pod,
|
||||
container,
|
||||
nil,
|
||||
[]stern.ContainerState{stern.RUNNING, stern.TERMINATED},
|
||||
selector,
|
||||
)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -163,9 +182,9 @@ func (l *Args) Run(ctx context.Context, ioStreams util.IOStreams) error {
|
||||
switch l.Output {
|
||||
case "default":
|
||||
if color.NoColor {
|
||||
t = "{{.PodName}} {{.ContainerName}} {{.Message}}"
|
||||
t = "{{.ContainerName}} {{.Message}}"
|
||||
} else {
|
||||
t = "{{color .PodColor .PodName}} {{color .ContainerColor .ContainerName}} {{.Message}}"
|
||||
t = "{{color .ContainerColor .ContainerName}} {{.Message}}"
|
||||
}
|
||||
case "raw":
|
||||
t = "{{.Message}}"
|
||||
|
||||
69
references/cli/pods.go
Normal file
69
references/cli/pods.go
Normal file
@@ -0,0 +1,69 @@
|
||||
/*
|
||||
Copyright 2022 The KubeVela Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package cli
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
"github.com/gosuri/uitable"
|
||||
|
||||
"github.com/oam-dev/kubevela/apis/core.oam.dev/v1beta1"
|
||||
"github.com/oam-dev/kubevela/pkg/utils/common"
|
||||
"github.com/oam-dev/kubevela/references/appfile"
|
||||
)
|
||||
|
||||
func printAppPods(appName string, namespace string, f Filter, velaC common.Args) error {
|
||||
app, err := appfile.LoadApplication(namespace, appName, velaC)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
var podArgs = PodArgs{
|
||||
Args: velaC,
|
||||
Namespace: namespace,
|
||||
Filter: f,
|
||||
App: app,
|
||||
}
|
||||
|
||||
table, err := podArgs.listPods(context.Background())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
fmt.Println(table.String())
|
||||
return nil
|
||||
}
|
||||
|
||||
// PodArgs creates arguments for `pods` command
|
||||
type PodArgs struct {
|
||||
Args common.Args
|
||||
Namespace string
|
||||
Filter Filter
|
||||
App *v1beta1.Application
|
||||
}
|
||||
|
||||
func (p *PodArgs) listPods(ctx context.Context) (*uitable.Table, error) {
|
||||
pods, err := GetApplicationPods(ctx, p.App.Name, p.Namespace, p.Args, p.Filter)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
table := uitable.New()
|
||||
table.AddRow("CLUSTER", "COMPONENT", "POD NAME", "NAMESPACE", "PHASE", "CREATE TIME", "REVISION", "HOST")
|
||||
for _, pod := range pods {
|
||||
table.AddRow(pod.Cluster, pod.Component, pod.Metadata.Name, pod.Metadata.Namespace, pod.Status.Phase, pod.Metadata.CreationTime, pod.Metadata.Version.DeployVersion, pod.Status.NodeName)
|
||||
}
|
||||
return table, nil
|
||||
}
|
||||
@@ -24,10 +24,8 @@ import (
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/AlecAivazis/survey/v2"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/spf13/cobra"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
"k8s.io/cli-runtime/pkg/genericclioptions"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
"k8s.io/client-go/rest"
|
||||
@@ -43,28 +41,37 @@ import (
|
||||
"github.com/oam-dev/kubevela/pkg/multicluster"
|
||||
"github.com/oam-dev/kubevela/pkg/utils/common"
|
||||
"github.com/oam-dev/kubevela/pkg/utils/util"
|
||||
types2 "github.com/oam-dev/kubevela/pkg/velaql/providers/query/types"
|
||||
querytypes "github.com/oam-dev/kubevela/pkg/velaql/providers/query/types"
|
||||
"github.com/oam-dev/kubevela/references/appfile"
|
||||
)
|
||||
|
||||
// VelaPortForwardOptions for vela port-forward
|
||||
type VelaPortForwardOptions struct {
|
||||
Cmd *cobra.Command
|
||||
Args []string
|
||||
ioStreams util.IOStreams
|
||||
Cmd *cobra.Command
|
||||
Args []string
|
||||
ioStreams util.IOStreams
|
||||
ClusterName string
|
||||
ComponentName string
|
||||
ResourceName string
|
||||
ResourceType string
|
||||
|
||||
Ctx context.Context
|
||||
VelaC common.Args
|
||||
Env *types.EnvMeta
|
||||
Ctx context.Context
|
||||
VelaC common.Args
|
||||
|
||||
namespace string
|
||||
App *v1beta1.Application
|
||||
targetResource *types2.ServiceEndpoint
|
||||
targetResource struct {
|
||||
kind string
|
||||
name string
|
||||
cluster string
|
||||
namespace string
|
||||
}
|
||||
targetPort int
|
||||
|
||||
f k8scmdutil.Factory
|
||||
kcPortForwardOptions *cmdpf.PortForwardOptions
|
||||
ClientSet kubernetes.Interface
|
||||
Client client.Client
|
||||
|
||||
namespace string
|
||||
}
|
||||
|
||||
// NewPortForwardCommand is vela port-forward command
|
||||
@@ -89,7 +96,12 @@ func NewPortForwardCommand(c common.Args, order string, ioStreams util.IOStreams
|
||||
ioStreams.Error("Please specify application name.")
|
||||
return nil
|
||||
}
|
||||
|
||||
if o.ResourceType != "pod" && o.ResourceType != "service" {
|
||||
o.ResourceType = "service"
|
||||
}
|
||||
if o.ResourceType == "pod" && len(args) < 2 {
|
||||
return errors.New("not port specified for port-forward")
|
||||
}
|
||||
var err error
|
||||
o.namespace, err = GetFlagNamespaceOrEnv(cmd, c)
|
||||
if err != nil {
|
||||
@@ -122,7 +134,10 @@ func NewPortForwardCommand(c common.Args, order string, ioStreams util.IOStreams
|
||||
cmd.Flags().Duration(podRunningTimeoutFlag, defaultPodExecTimeout,
|
||||
"The length of time (like 5s, 2m, or 3h, higher than zero) to wait until at least one pod is running",
|
||||
)
|
||||
|
||||
cmd.Flags().StringVarP(&o.ComponentName, "component", "c", "", "filter the pod by the component name")
|
||||
cmd.Flags().StringVarP(&o.ClusterName, "cluster", "", "", "filter the pod by the cluster name")
|
||||
cmd.Flags().StringVarP(&o.ResourceName, "resource-name", "", "", "specify the resource name")
|
||||
cmd.Flags().StringVarP(&o.ResourceType, "resource-type", "t", "", "specify the resource type, support the service, and pod")
|
||||
addNamespaceAndEnvArg(cmd)
|
||||
return cmd
|
||||
}
|
||||
@@ -139,56 +154,89 @@ func (o *VelaPortForwardOptions) Init(ctx context.Context, cmd *cobra.Command, a
|
||||
}
|
||||
o.App = app
|
||||
|
||||
rawEndpoints, err := GetServiceEndpoints(o.Ctx, o.App.Name, o.namespace, o.VelaC, Filter{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
var endpoints []types2.ServiceEndpoint
|
||||
for _, ep := range rawEndpoints {
|
||||
if ep.Ref.Kind != "Service" {
|
||||
continue
|
||||
if o.ResourceType == "service" {
|
||||
var selectService *querytypes.ResourceItem
|
||||
services, err := GetApplicationServices(o.Ctx, o.App.Name, o.namespace, o.VelaC, Filter{
|
||||
Component: o.ComponentName,
|
||||
Cluster: o.ClusterName,
|
||||
})
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to load the application services: %w", err)
|
||||
}
|
||||
endpoints = append(endpoints, ep)
|
||||
}
|
||||
if len(endpoints) == 0 {
|
||||
inSide := func(str string) bool {
|
||||
for _, s := range []string{"Deployment", "StatefulSet", "CloneSet", "Job"} {
|
||||
if str == s {
|
||||
return true
|
||||
|
||||
if o.ResourceName != "" {
|
||||
for i, service := range services {
|
||||
if service.Object.GetName() == o.ResourceName {
|
||||
selectService = &services[i]
|
||||
break
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
for _, ap := range app.Status.AppliedResources {
|
||||
if !inSide(ap.Kind) {
|
||||
continue
|
||||
if selectService == nil {
|
||||
fmt.Println("The Service you specified does not exist, please select it from the list.")
|
||||
}
|
||||
endpoints = append(endpoints, types2.ServiceEndpoint{
|
||||
Endpoint: types2.Endpoint{},
|
||||
Ref: corev1.ObjectReference{
|
||||
Namespace: ap.Namespace,
|
||||
Name: ap.Name,
|
||||
Kind: ap.Kind,
|
||||
APIVersion: ap.APIVersion,
|
||||
},
|
||||
Cluster: ap.Cluster,
|
||||
})
|
||||
}
|
||||
if len(services) > 0 {
|
||||
if selectService == nil {
|
||||
selectService, o.targetPort, err = AskToChooseOneService(services, len(o.Args) < 2)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
if selectService != nil {
|
||||
o.targetResource.cluster = selectService.Cluster
|
||||
o.targetResource.name = selectService.Object.GetName()
|
||||
o.targetResource.namespace = selectService.Object.GetNamespace()
|
||||
o.targetResource.kind = selectService.Object.GetKind()
|
||||
}
|
||||
} else if o.ResourceName == "" {
|
||||
// If users do not specify the resource name and there is no service, switch to query the pod
|
||||
o.ResourceType = "pod"
|
||||
}
|
||||
}
|
||||
targetResource, err := AskToChooseOnePortForwardEndpoint(endpoints)
|
||||
if err != nil {
|
||||
return err
|
||||
|
||||
if o.ResourceType == "pod" {
|
||||
var selectPod *querytypes.PodBase
|
||||
pods, err := GetApplicationPods(o.Ctx, o.App.Name, o.namespace, o.VelaC, Filter{
|
||||
Component: o.ComponentName,
|
||||
Cluster: o.ClusterName,
|
||||
})
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to load the application services: %w", err)
|
||||
}
|
||||
|
||||
if o.ResourceName != "" {
|
||||
for i, pod := range pods {
|
||||
if pod.Metadata.Name == o.ResourceName {
|
||||
selectPod = &pods[i]
|
||||
break
|
||||
}
|
||||
}
|
||||
if selectPod == nil {
|
||||
fmt.Println("The Service you specified does not exist, please select it from the list.")
|
||||
}
|
||||
}
|
||||
if selectPod == nil {
|
||||
selectPod, err = AskToChooseOnePod(pods)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
if selectPod != nil {
|
||||
o.targetResource.cluster = selectPod.Cluster
|
||||
o.targetResource.name = selectPod.Metadata.Name
|
||||
o.targetResource.namespace = selectPod.Metadata.Namespace
|
||||
o.targetResource.kind = "Pod"
|
||||
}
|
||||
}
|
||||
|
||||
cf := genericclioptions.NewConfigFlags(true)
|
||||
cf.Namespace = pointer.String(o.namespace)
|
||||
cf.WrapConfigFn = func(cfg *rest.Config) *rest.Config {
|
||||
cfg.Wrap(multicluster.NewClusterGatewayRoundTripperWrapperGenerator(targetResource.Cluster))
|
||||
cfg.Wrap(multicluster.NewClusterGatewayRoundTripperWrapperGenerator(o.targetResource.cluster))
|
||||
return cfg
|
||||
}
|
||||
o.f = k8scmdutil.NewFactory(k8scmdutil.NewMatchVersionFlags(cf))
|
||||
o.targetResource = &targetResource
|
||||
o.Ctx = multicluster.ContextWithClusterName(ctx, targetResource.Cluster)
|
||||
o.Ctx = multicluster.ContextWithClusterName(ctx, o.targetResource.cluster)
|
||||
config, err := o.VelaC.GetConfig()
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -209,54 +257,14 @@ func (o *VelaPortForwardOptions) Init(ctx context.Context, cmd *cobra.Command, a
|
||||
return nil
|
||||
}
|
||||
|
||||
// getPortsFromApp works for compatible
|
||||
func getPortsFromApp(app *v1beta1.Application) int {
|
||||
if app == nil || len(app.Spec.Components) == 0 {
|
||||
return 0
|
||||
}
|
||||
_, configs := appfile.GetApplicationSettings(app, app.Spec.Components[0].Name)
|
||||
for k, v := range configs {
|
||||
portConv := func(v interface{}) int {
|
||||
switch pv := v.(type) {
|
||||
case int:
|
||||
return pv
|
||||
case string:
|
||||
data, err := strconv.ParseInt(pv, 10, 64)
|
||||
if err != nil {
|
||||
return 0
|
||||
}
|
||||
return int(data)
|
||||
case float64:
|
||||
return int(pv)
|
||||
}
|
||||
return 0
|
||||
}
|
||||
if k == "port" {
|
||||
return portConv(v)
|
||||
}
|
||||
if k == "ports" {
|
||||
portArray := v.([]interface{})
|
||||
for _, p := range portArray {
|
||||
return portConv(p.(map[string]interface{})["port"])
|
||||
}
|
||||
}
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
// Complete will complete the config of port-forward
|
||||
func (o *VelaPortForwardOptions) Complete() error {
|
||||
|
||||
var forwardTypeName string
|
||||
switch o.targetResource.Ref.Kind {
|
||||
switch o.targetResource.kind {
|
||||
case "Service":
|
||||
forwardTypeName = "svc/" + o.targetResource.Ref.Name
|
||||
case "Deployment", "StatefulSet", "CloneSet", "Job":
|
||||
var err error
|
||||
forwardTypeName, err = getPodNameForResource(o.Ctx, o.ClientSet, o.targetResource.Ref.Name, o.targetResource.Ref.Namespace)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
forwardTypeName = "svc/" + o.targetResource.name
|
||||
case "Pod":
|
||||
forwardTypeName = "pod/" + o.targetResource.name
|
||||
}
|
||||
|
||||
if len(o.Args) < 2 {
|
||||
@@ -269,10 +277,7 @@ func (o *VelaPortForwardOptions) Complete() error {
|
||||
}
|
||||
return val
|
||||
}
|
||||
pt := o.targetResource.Endpoint.Port
|
||||
if pt == 0 {
|
||||
pt = getPortsFromApp(o.App)
|
||||
}
|
||||
pt := o.targetPort
|
||||
if pt == 0 {
|
||||
return errors.New("not port specified for port-forward")
|
||||
}
|
||||
@@ -324,35 +329,3 @@ func (f *defaultPortForwarder) ForwardPorts(method string, url *url.URL, opts cm
|
||||
}
|
||||
return fw.ForwardPorts()
|
||||
}
|
||||
|
||||
// AskToChooseOnePortForwardEndpoint will ask user to select one applied resource as port forward endpoint
|
||||
func AskToChooseOnePortForwardEndpoint(endpoints []types2.ServiceEndpoint) (types2.ServiceEndpoint, error) {
|
||||
if len(endpoints) == 0 {
|
||||
return types2.ServiceEndpoint{}, errors.New("no endpoint found in your application")
|
||||
}
|
||||
if len(endpoints) == 1 {
|
||||
return endpoints[0], nil
|
||||
}
|
||||
lines := formatEndpoints(endpoints)
|
||||
header := strings.Join(lines[0], " | ")
|
||||
var ops []string
|
||||
for i := 1; i < len(lines); i++ {
|
||||
ops = append(ops, strings.Join(lines[i], " | "))
|
||||
}
|
||||
prompt := &survey.Select{
|
||||
Message: fmt.Sprintf("You have %d endpoints in your app. Please choose one:\n%s", len(ops), header),
|
||||
Options: ops,
|
||||
}
|
||||
var selectedRsc string
|
||||
err := survey.AskOne(prompt, &selectedRsc)
|
||||
if err != nil {
|
||||
return types2.ServiceEndpoint{}, fmt.Errorf("choosing endpoint err %w", err)
|
||||
}
|
||||
for k, resource := range ops {
|
||||
if selectedRsc == resource {
|
||||
return endpoints[k], nil
|
||||
}
|
||||
}
|
||||
// it should never happen.
|
||||
return types2.ServiceEndpoint{}, errors.New("no endpoint match for your choice")
|
||||
}
|
||||
|
||||
@@ -108,6 +108,13 @@ func NewAppStatusCommand(c common.Args, order string, ioStreams cmdutil.IOStream
|
||||
# Show detailed info in tree
|
||||
vela status first-vela-app --tree --detail --detail-format list
|
||||
|
||||
# Show pod list
|
||||
vela status first-vela-app --pod
|
||||
vela status first-vela-app --pod --component express-server --cluster local
|
||||
|
||||
# Show endpoint list
|
||||
vela status first-vela-app --endpoint
|
||||
|
||||
# Get raw Application yaml (without managedFields)
|
||||
vela status first-vela-app -o yaml
|
||||
|
||||
@@ -128,6 +135,25 @@ func NewAppStatusCommand(c common.Args, order string, ioStreams cmdutil.IOStream
|
||||
if printTree, err := cmd.Flags().GetBool("tree"); err == nil && printTree {
|
||||
return printApplicationTree(c, cmd, appName, namespace)
|
||||
}
|
||||
if printPod, err := cmd.Flags().GetBool("pod"); err == nil && printPod {
|
||||
component, _ := cmd.Flags().GetString("component")
|
||||
cluster, _ := cmd.Flags().GetString("cluster")
|
||||
f := Filter{
|
||||
Component: component,
|
||||
Cluster: cluster,
|
||||
}
|
||||
return printAppPods(appName, namespace, f, c)
|
||||
}
|
||||
showEndpoints, err := cmd.Flags().GetBool("endpoint")
|
||||
if showEndpoints && err == nil {
|
||||
component, _ := cmd.Flags().GetString("component")
|
||||
cluster, _ := cmd.Flags().GetString("cluster")
|
||||
f := Filter{
|
||||
Component: component,
|
||||
Cluster: cluster,
|
||||
}
|
||||
return printAppEndpoints(ctx, appName, namespace, f, c, false)
|
||||
}
|
||||
newClient, err := c.GetClient()
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -135,14 +161,6 @@ func NewAppStatusCommand(c common.Args, order string, ioStreams cmdutil.IOStream
|
||||
if outputFormat != "" {
|
||||
return printRawApplication(context.Background(), c, outputFormat, cmd.OutOrStdout(), namespace, appName)
|
||||
}
|
||||
showEndpoints, err := cmd.Flags().GetBool("endpoint")
|
||||
if showEndpoints && err == nil {
|
||||
component, _ := cmd.Flags().GetString("component")
|
||||
f := Filter{
|
||||
Component: component,
|
||||
}
|
||||
return printAppEndpoints(ctx, appName, namespace, f, c, false)
|
||||
}
|
||||
return printAppStatus(ctx, newClient, ioStreams, appName, namespace, cmd, c)
|
||||
},
|
||||
Annotations: map[string]string{
|
||||
@@ -152,8 +170,10 @@ func NewAppStatusCommand(c common.Args, order string, ioStreams cmdutil.IOStream
|
||||
}
|
||||
cmd.Flags().StringP("svc", "s", "", "service name")
|
||||
cmd.Flags().BoolP("endpoint", "p", false, "show all service endpoints of the application")
|
||||
cmd.Flags().StringP("component", "c", "", "filter service endpoints by component name")
|
||||
cmd.Flags().StringP("component", "c", "", "filter the endpoints or pods by component name")
|
||||
cmd.Flags().StringP("cluster", "", "", "filter the endpoints or pods by cluster name")
|
||||
cmd.Flags().BoolP("tree", "t", false, "display the application resources into tree structure")
|
||||
cmd.Flags().BoolP("pod", "", false, "show pod list of the application")
|
||||
cmd.Flags().BoolP("detail", "d", false, "display the realtime details of application resources, must be used with --tree")
|
||||
cmd.Flags().StringP("detail-format", "", "inline", "the format for displaying details, must be used with --detail. Can be one of inline, wide, list, table, raw.")
|
||||
cmd.Flags().StringVarP(&outputFormat, "output", "o", "", "raw Application output format. One of: (json, yaml, jsonpath)")
|
||||
|
||||
@@ -19,7 +19,6 @@ package cli
|
||||
import (
|
||||
"bufio"
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
@@ -27,34 +26,18 @@ import (
|
||||
"os"
|
||||
"strings"
|
||||
|
||||
"github.com/AlecAivazis/survey/v2"
|
||||
"github.com/pkg/errors"
|
||||
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/client-go/util/jsonpath"
|
||||
"k8s.io/kubectl/pkg/cmd/get"
|
||||
"sigs.k8s.io/yaml"
|
||||
|
||||
"github.com/oam-dev/kubevela/apis/core.oam.dev/v1beta1"
|
||||
"github.com/oam-dev/kubevela/pkg/utils/common"
|
||||
"github.com/oam-dev/kubevela/pkg/velaql/providers/query/types"
|
||||
)
|
||||
|
||||
func getPodNameForResource(ctx context.Context, clientSet kubernetes.Interface, resourceName string, resourceNamespace string) (string, error) {
|
||||
podList, err := clientSet.CoreV1().Pods(resourceNamespace).List(ctx, v1.ListOptions{})
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
var pods []string
|
||||
for _, p := range podList.Items {
|
||||
if strings.HasPrefix(p.Name, resourceName) {
|
||||
pods = append(pods, p.Name)
|
||||
}
|
||||
}
|
||||
if len(pods) < 1 {
|
||||
return "", fmt.Errorf("no pods found created by resource %s", resourceName)
|
||||
}
|
||||
return common.AskToChooseOnePods(pods)
|
||||
}
|
||||
|
||||
// UserInput user input in command
|
||||
type UserInput struct {
|
||||
Writer io.Writer
|
||||
@@ -157,3 +140,92 @@ func formatApplicationString(format string, app *v1beta1.Application) (string, e
|
||||
|
||||
return ret, nil
|
||||
}
|
||||
|
||||
// AskToChooseOnePod will ask user to select one pod
|
||||
func AskToChooseOnePod(pods []types.PodBase) (*types.PodBase, error) {
|
||||
if len(pods) == 0 {
|
||||
return nil, errors.New("no pod found in your application")
|
||||
}
|
||||
if len(pods) == 1 {
|
||||
return &pods[0], nil
|
||||
}
|
||||
var ops []string
|
||||
for i := 0; i < len(pods); i++ {
|
||||
pod := pods[i]
|
||||
ops = append(ops, fmt.Sprintf("%s | %s | %s", pod.Cluster, pod.Component, pod.Metadata.Name))
|
||||
}
|
||||
prompt := &survey.Select{
|
||||
Message: fmt.Sprintf("There are %d pods match your filter conditions. Please choose one:\nCluster | Component | Pod", len(ops)),
|
||||
Options: ops,
|
||||
}
|
||||
var selectedRsc string
|
||||
err := survey.AskOne(prompt, &selectedRsc)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("choosing pod err %w", err)
|
||||
}
|
||||
for k, resource := range ops {
|
||||
if selectedRsc == resource {
|
||||
return &pods[k], nil
|
||||
}
|
||||
}
|
||||
// it should never happen.
|
||||
return nil, errors.New("no pod match for your choice")
|
||||
}
|
||||
|
||||
// AskToChooseOneService will ask user to select one service and/or port
|
||||
func AskToChooseOneService(services []types.ResourceItem, selectPort bool) (*types.ResourceItem, int, error) {
|
||||
if len(services) == 0 {
|
||||
return nil, 0, errors.New("no service found in your application")
|
||||
}
|
||||
var ops []string
|
||||
var res []struct {
|
||||
item types.ResourceItem
|
||||
port int
|
||||
}
|
||||
for i := 0; i < len(services); i++ {
|
||||
obj := services[i]
|
||||
service := &corev1.Service{}
|
||||
if err := runtime.DefaultUnstructuredConverter.FromUnstructured(obj.Object.Object, service); err == nil {
|
||||
if selectPort {
|
||||
for _, port := range service.Spec.Ports {
|
||||
ops = append(ops, fmt.Sprintf("%s | %s | %s:%d", obj.Cluster, obj.Component, obj.Object.GetName(), port.Port))
|
||||
res = append(res, struct {
|
||||
item types.ResourceItem
|
||||
port int
|
||||
}{
|
||||
item: obj,
|
||||
port: int(port.Port),
|
||||
})
|
||||
}
|
||||
} else {
|
||||
ops = append(ops, fmt.Sprintf("%s | %s | %s", obj.Cluster, obj.Component, obj.Object.GetName()))
|
||||
res = append(res, struct {
|
||||
item types.ResourceItem
|
||||
port int
|
||||
}{
|
||||
item: obj,
|
||||
})
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
if len(ops) == 1 {
|
||||
return &res[0].item, res[0].port, nil
|
||||
}
|
||||
prompt := &survey.Select{
|
||||
Message: fmt.Sprintf("There are %d services match your filter conditions. Please choose one:\nCluster | Component | Service", len(ops)),
|
||||
Options: ops,
|
||||
}
|
||||
var selectedRsc string
|
||||
err := survey.AskOne(prompt, &selectedRsc)
|
||||
if err != nil {
|
||||
return nil, 0, fmt.Errorf("choosing service err %w", err)
|
||||
}
|
||||
for k, resource := range ops {
|
||||
if selectedRsc == resource {
|
||||
return &res[k].item, res[k].port, nil
|
||||
}
|
||||
}
|
||||
// it should never happen.
|
||||
return nil, 0, errors.New("no service match for your choice")
|
||||
}
|
||||
|
||||
@@ -28,6 +28,7 @@ import (
|
||||
|
||||
"github.com/oam-dev/kubevela/apis/types"
|
||||
"github.com/oam-dev/kubevela/pkg/cue/model/value"
|
||||
"github.com/oam-dev/kubevela/pkg/multicluster"
|
||||
"github.com/oam-dev/kubevela/pkg/utils"
|
||||
"github.com/oam-dev/kubevela/pkg/utils/common"
|
||||
"github.com/oam-dev/kubevela/pkg/utils/util"
|
||||
@@ -285,6 +286,77 @@ func GetServiceEndpoints(ctx context.Context, appName string, namespace string,
|
||||
return response.Endpoints, nil
|
||||
}
|
||||
|
||||
// GetApplicationPods get the pods by velaQL
|
||||
func GetApplicationPods(ctx context.Context, appName string, namespace string, velaC common.Args, f Filter) ([]querytypes.PodBase, error) {
|
||||
params := map[string]string{
|
||||
"appName": appName,
|
||||
"appNs": namespace,
|
||||
}
|
||||
if f.Component != "" {
|
||||
params["name"] = f.Component
|
||||
}
|
||||
if f.Cluster != "" && f.ClusterNamespace != "" {
|
||||
params["cluster"] = f.Cluster
|
||||
params["clusterNs"] = f.ClusterNamespace
|
||||
}
|
||||
|
||||
velaQL := MakeVelaQL("component-pod-view", params, "status")
|
||||
queryView, err := velaql.ParseVelaQL(velaQL)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
queryValue, err := QueryValue(ctx, velaC, &queryView)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
var response = struct {
|
||||
Pods []querytypes.PodBase `json:"podList"`
|
||||
Error string `json:"error"`
|
||||
}{}
|
||||
if err := queryValue.UnmarshalTo(&response); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if response.Error != "" {
|
||||
return nil, fmt.Errorf(response.Error)
|
||||
}
|
||||
return response.Pods, nil
|
||||
}
|
||||
|
||||
// GetApplicationServices get the services by velaQL
|
||||
func GetApplicationServices(ctx context.Context, appName string, namespace string, velaC common.Args, f Filter) ([]querytypes.ResourceItem, error) {
|
||||
params := map[string]string{
|
||||
"appName": appName,
|
||||
"appNs": namespace,
|
||||
}
|
||||
if f.Component != "" {
|
||||
params["name"] = f.Component
|
||||
}
|
||||
if f.Cluster != "" && f.ClusterNamespace != "" {
|
||||
params["cluster"] = f.Cluster
|
||||
params["clusterNs"] = f.ClusterNamespace
|
||||
}
|
||||
velaQL := MakeVelaQL("component-service-view", params, "status")
|
||||
queryView, err := velaql.ParseVelaQL(velaQL)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
queryValue, err := QueryValue(ctx, velaC, &queryView)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
var response = struct {
|
||||
Services []querytypes.ResourceItem `json:"services"`
|
||||
Error string `json:"error"`
|
||||
}{}
|
||||
if err := queryValue.UnmarshalTo(&response); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if response.Error != "" {
|
||||
return nil, fmt.Errorf(response.Error)
|
||||
}
|
||||
return response.Services, nil
|
||||
}
|
||||
|
||||
// QueryValue get queryValue from velaQL
|
||||
func QueryValue(ctx context.Context, velaC common.Args, queryView *velaql.QueryView) (*value.Value, error) {
|
||||
dm, err := velaC.GetDiscoveryMapper()
|
||||
@@ -299,6 +371,7 @@ func QueryValue(ctx context.Context, velaC common.Args, queryView *velaql.QueryV
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
config.Wrap(multicluster.NewSecretModeMultiClusterRoundTripper)
|
||||
client, err := velaC.GetClient()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
||||
@@ -7,119 +7,45 @@ data:
|
||||
template: |
|
||||
import (
|
||||
"vela/ql"
|
||||
"vela/op"
|
||||
"strings"
|
||||
)
|
||||
)
|
||||
|
||||
parameter: {
|
||||
appName: string
|
||||
appNs: string
|
||||
name?: string
|
||||
cluster?: string
|
||||
clusterNs?: string
|
||||
}
|
||||
parameter: {
|
||||
appName: string
|
||||
appNs: string
|
||||
name?: string
|
||||
cluster?: string
|
||||
clusterNs?: string
|
||||
}
|
||||
|
||||
annotationDeployVersion: "app.oam.dev/deployVersion"
|
||||
annotationPublishVersion: "app.oam.dev/publishVersion"
|
||||
labelComponentName: "app.oam.dev/component"
|
||||
|
||||
ignoreCollectPodKindMap: {
|
||||
"ConfigMap": true
|
||||
"Endpoints": true
|
||||
"LimitRange": true
|
||||
"Namespace": true
|
||||
"Node": true
|
||||
"PersistentVolumeClaim": true
|
||||
"PersistentVolume": true
|
||||
"ReplicationController": true
|
||||
"ResourceQuota": true
|
||||
"ServiceAccount": true
|
||||
"Service": true
|
||||
"Event": true
|
||||
"Ingress": true
|
||||
"StorageClass": true
|
||||
"NetworkPolicy": true
|
||||
"PodDisruptionBudget": true
|
||||
"PodSecurityPolicy": true
|
||||
"PriorityClass": true
|
||||
"CustomResourceDefinition": true
|
||||
"HorizontalPodAutoscaler": true
|
||||
"CertificateSigningRequest": true
|
||||
"ManagedCluster": true
|
||||
"ManagedClusterSetBinding": true
|
||||
"ManagedClusterSet": true
|
||||
"ApplicationRevision": true
|
||||
"ComponentDefinition": true
|
||||
"DefinitionRevision": true
|
||||
"EnvBinding": true
|
||||
"PolicyDefinition": true
|
||||
"ResourceTracker": true
|
||||
"ScopeDefinition": true
|
||||
"TraitDefinition": true
|
||||
"WorkflowStepDefinition": true
|
||||
"WorkloadDefinition": true
|
||||
"GitRepository": true
|
||||
"HelmRepository": true
|
||||
"ComponentStatus": true
|
||||
}
|
||||
|
||||
resources: ql.#ListResourcesInApp & {
|
||||
app: {
|
||||
name: parameter.appName
|
||||
namespace: parameter.appNs
|
||||
filter: {
|
||||
if parameter.cluster != _|_ {
|
||||
cluster: parameter.cluster
|
||||
}
|
||||
if parameter.clusterNs != _|_ {
|
||||
clusterNamespace: parameter.clusterNs
|
||||
}
|
||||
if parameter.name != _|_ {
|
||||
components: [parameter.name]
|
||||
result: ql.#CollectPods & {
|
||||
app: {
|
||||
name: parameter.appName
|
||||
namespace: parameter.appNs
|
||||
filter: {
|
||||
if parameter.cluster != _|_ {
|
||||
cluster: parameter.cluster
|
||||
}
|
||||
if parameter.clusterNs != _|_ {
|
||||
clusterNamespace: parameter.clusterNs
|
||||
}
|
||||
if parameter.name != _|_ {
|
||||
components: [parameter.name]
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if resources.err == _|_ {
|
||||
collectedPods: op.#Steps & {
|
||||
for i, resource in resources.list if ignoreCollectPodKindMap[resource.object.kind] == _|_ {
|
||||
"\(i)": ql.#CollectPods & {
|
||||
value: resource.object
|
||||
cluster: resource.cluster
|
||||
}
|
||||
}
|
||||
}
|
||||
podsWithCluster: [ for pods in collectedPods if pods.list != _|_ && pods.list != null for podObj in pods.list {
|
||||
cluster: pods.cluster
|
||||
obj: podObj
|
||||
workload: {
|
||||
apiVersion: pods.value.apiVersion
|
||||
kind: pods.value.kind
|
||||
name: pods.value.metadata.name
|
||||
namespace: pods.value.metadata.namespace
|
||||
}
|
||||
if pods.value.metadata.labels[labelComponentName] != _|_ {
|
||||
component: pods.value.metadata.labels[labelComponentName]
|
||||
}
|
||||
if pods.value.metadata.annotations[annotationPublishVersion] != _|_ {
|
||||
publishVersion: pods.value.metadata.annotations[annotationPublishVersion]
|
||||
}
|
||||
if pods.value.metadata.annotations[annotationDeployVersion] != _|_ {
|
||||
deployVersion: pods.value.metadata.annotations[annotationDeployVersion]
|
||||
}
|
||||
}]
|
||||
podsError: [ for pods in collectedPods if pods.err != _|_ {pods.err}]
|
||||
status: {
|
||||
if len(podsError) == 0 && podsWithCluster != _|_ {
|
||||
podList: [ for pod in podsWithCluster {
|
||||
if result.err == _|_ {
|
||||
status: {
|
||||
podList: [ for pod in result.list if pod.object != _|_ {
|
||||
cluster: pod.cluster
|
||||
workload: pod.workload
|
||||
component: pod.component
|
||||
metadata: {
|
||||
name: pod.obj.metadata.name
|
||||
namespace: pod.obj.metadata.namespace
|
||||
creationTime: pod.obj.metadata.creationTimestamp
|
||||
name: pod.object.metadata.name
|
||||
namespace: pod.object.metadata.namespace
|
||||
creationTime: pod.object.metadata.creationTimestamp
|
||||
labels: pod.object.metadata.labels
|
||||
version: {
|
||||
if pod.publishVersion != _|_ {
|
||||
publishVersion: pod.publishVersion
|
||||
@@ -130,27 +56,20 @@ data:
|
||||
}
|
||||
}
|
||||
status: {
|
||||
phase: pod.obj.status.phase
|
||||
phase: pod.object.status.phase
|
||||
// refer to https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#pod-phase
|
||||
if phase != "Pending" && phase != "Unknown" {
|
||||
podIP: pod.obj.status.podIP
|
||||
hostIP: pod.obj.status.hostIP
|
||||
nodeName: pod.obj.spec.nodeName
|
||||
podIP: pod.object.status.podIP
|
||||
hostIP: pod.object.status.hostIP
|
||||
nodeName: pod.object.spec.nodeName
|
||||
}
|
||||
}
|
||||
}]
|
||||
}
|
||||
if len(podsError) != 0 {
|
||||
error: strings.Join(podsError, ",")
|
||||
}
|
||||
if podsWithCluster == _|_ {
|
||||
podList: []
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if resources.err != _|_ {
|
||||
status: {
|
||||
error: resources.err
|
||||
if result.err != _|_ {
|
||||
status: {
|
||||
error: result.err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
48
test/e2e-apiserver-test/testdata/component-service-view.yaml
vendored
Normal file
48
test/e2e-apiserver-test/testdata/component-service-view.yaml
vendored
Normal file
@@ -0,0 +1,48 @@
|
||||
apiVersion: v1
|
||||
data:
|
||||
template: |
|
||||
import (
|
||||
"vela/ql"
|
||||
)
|
||||
|
||||
parameter: {
|
||||
appName: string
|
||||
appNs: string
|
||||
name?: string
|
||||
cluster?: string
|
||||
clusterNs?: string
|
||||
}
|
||||
|
||||
result: ql.#CollectServices & {
|
||||
app: {
|
||||
name: parameter.appName
|
||||
namespace: parameter.appNs
|
||||
filter: {
|
||||
if parameter.cluster != _|_ {
|
||||
cluster: parameter.cluster
|
||||
}
|
||||
if parameter.clusterNs != _|_ {
|
||||
clusterNamespace: parameter.clusterNs
|
||||
}
|
||||
if parameter.name != _|_ {
|
||||
components: [parameter.name]
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if result.err == _|_ {
|
||||
status: {
|
||||
services: result.list
|
||||
}
|
||||
}
|
||||
|
||||
if result.err != _|_ {
|
||||
status: {
|
||||
error: result.err
|
||||
}
|
||||
}
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: test-component-service-view
|
||||
namespace: vela-system
|
||||
@@ -52,6 +52,11 @@ type Status struct {
|
||||
Error string `json:"error,omitempty"`
|
||||
}
|
||||
|
||||
type Services struct {
|
||||
Services []types2.ResourceItem `json:"services,omitempty"`
|
||||
Error string `json:"error,omitempty"`
|
||||
}
|
||||
|
||||
var _ = Describe("Test velaQL rest api", func() {
|
||||
namespace := "test-velaql"
|
||||
appName := "example-app"
|
||||
@@ -85,7 +90,7 @@ var _ = Describe("Test velaQL rest api", func() {
|
||||
return errors.Errorf("expect the applied resources number is %d, but get %d", 3, len(oldApp.Status.AppliedResources))
|
||||
}
|
||||
return nil
|
||||
}, 3*time.Second).WithTimeout(time.Minute * 1).Should(BeNil())
|
||||
}).WithTimeout(time.Minute * 1).WithPolling(3 * time.Second).Should(BeNil())
|
||||
|
||||
queryRes := get(fmt.Sprintf("/query?velaql=%s{name=%s,namespace=%s}.%s", "read-view", appName, namespace, "output.value.spec"))
|
||||
var appSpec v1beta1.ApplicationSpec
|
||||
@@ -102,11 +107,14 @@ var _ = Describe("Test velaQL rest api", func() {
|
||||
Expect(queryRes.StatusCode).Should(Equal(400))
|
||||
})
|
||||
|
||||
It("Test query application component view", func() {
|
||||
It("Test query application pod and service view", func() {
|
||||
componentView := new(corev1.ConfigMap)
|
||||
serviceView := new(corev1.ConfigMap)
|
||||
Expect(common.ReadYamlToObject("./testdata/component-pod-view.yaml", componentView)).Should(BeNil())
|
||||
Expect(common.ReadYamlToObject("./testdata/component-service-view.yaml", serviceView)).Should(BeNil())
|
||||
Expect(k8sClient.Delete(context.Background(), componentView)).Should(SatisfyAny(BeNil(), &util.NotFoundMatcher{}))
|
||||
Expect(k8sClient.Create(context.Background(), componentView)).Should(SatisfyAny(BeNil(), &util.AlreadyExistMatcher{}))
|
||||
Expect(k8sClient.Create(context.Background(), serviceView)).Should(SatisfyAny(BeNil(), &util.AlreadyExistMatcher{}))
|
||||
|
||||
oldApp := new(v1beta1.Application)
|
||||
Eventually(func() error {
|
||||
@@ -117,7 +125,7 @@ var _ = Describe("Test velaQL rest api", func() {
|
||||
return errors.Errorf("expect the applied resources number is %d, but get %d", 3, len(oldApp.Status.AppliedResources))
|
||||
}
|
||||
return nil
|
||||
}, 3*time.Second).WithTimeout(time.Minute * 3).Should(BeNil())
|
||||
}).WithTimeout(time.Minute * 2).WithPolling(3 * time.Second).Should(BeNil())
|
||||
|
||||
Eventually(func(g Gomega) {
|
||||
queryRes := get(fmt.Sprintf("/query?velaql=%s{appName=%s,appNs=%s,name=%s}.%s", "test-component-pod-view", appName, namespace, component1Name, "status"))
|
||||
@@ -125,7 +133,7 @@ var _ = Describe("Test velaQL rest api", func() {
|
||||
g.Expect(decodeResponseBody(queryRes, status)).Should(Succeed())
|
||||
g.Expect(len(status.PodList)).Should(Equal(1))
|
||||
g.Expect(status.PodList[0].Component).Should(Equal(component1Name))
|
||||
}, 3*time.Second).WithTimeout(time.Minute * 3).Should(BeNil())
|
||||
}, time.Minute*3, 3*time.Second).Should(BeNil())
|
||||
|
||||
Eventually(func() error {
|
||||
queryRes1 := get(fmt.Sprintf("/query?velaql=%s{appName=%s,appNs=%s,name=%s}.%s", "test-component-pod-view", appName, namespace, component2Name, "status"))
|
||||
@@ -145,7 +153,15 @@ var _ = Describe("Test velaQL rest api", func() {
|
||||
return errors.New("container name is not correct")
|
||||
}
|
||||
return nil
|
||||
}, 3*time.Second).WithTimeout(time.Minute * 1).Should(BeNil())
|
||||
}, time.Minute*1, 3*time.Second).Should(BeNil())
|
||||
|
||||
Eventually(func(g Gomega) {
|
||||
queryRes := get(fmt.Sprintf("/query?velaql=%s{appName=%s,appNs=%s,name=%s}.%s", "test-component-service-view", appName, namespace, component1Name, "status"))
|
||||
status := new(Services)
|
||||
g.Expect(decodeResponseBody(queryRes, status)).Should(Succeed())
|
||||
g.Expect(len(status.Services)).Should(Equal(1))
|
||||
g.Expect(status.Services[0].Component).Should(Equal(component1Name))
|
||||
}, time.Minute*1, 3*time.Second).Should(BeNil())
|
||||
})
|
||||
|
||||
It("Test collect pod from cronJob", func() {
|
||||
|
||||
@@ -57,12 +57,14 @@ var _ = Describe("Test multicluster CLI commands", func() {
|
||||
Expect(err).Should(Succeed())
|
||||
Eventually(func(g Gomega) {
|
||||
pods := &v1.PodList{}
|
||||
g.Expect(k8sClient.List(workerCtx, pods, client.InNamespace(namespace))).Should(Succeed())
|
||||
g.Expect(k8sClient.List(workerCtx, pods, client.InNamespace(namespace), client.MatchingLabels(map[string]string{
|
||||
"app.oam.dev/name": app.Name,
|
||||
}))).Should(Succeed())
|
||||
g.Expect(len(pods.Items)).Should(Equal(1))
|
||||
g.Expect(pods.Items[0].Status.Phase).Should(Equal(v1.PodRunning))
|
||||
g.Expect(k8sClient.Get(hubCtx, client.ObjectKeyFromObject(app), app)).Should(Succeed())
|
||||
g.Expect(len(app.Status.AppliedResources)).ShouldNot(Equal(0))
|
||||
}, 2*time.Minute).Should(Succeed())
|
||||
}, 2*time.Minute, time.Second*3).Should(Succeed())
|
||||
})
|
||||
|
||||
AfterEach(func() {
|
||||
@@ -76,8 +78,8 @@ var _ = Describe("Test multicluster CLI commands", func() {
|
||||
It("Test vela exec", func() {
|
||||
command := exec.Command("vela", "exec", app.Name, "-n", namespace, "-i=false", "-t=false", "--", "pwd")
|
||||
outputs, err := command.CombinedOutput()
|
||||
Expect(err).Should(Succeed())
|
||||
Expect(string(outputs)).Should(ContainSubstring("/"))
|
||||
Expect(err).Should(Succeed())
|
||||
})
|
||||
|
||||
It("Test vela port-forward", func() {
|
||||
|
||||
@@ -9,7 +9,9 @@ spec:
|
||||
type: webservice
|
||||
properties:
|
||||
image: crccheck/hello-world
|
||||
port: 8000
|
||||
ports:
|
||||
- port: 8000
|
||||
expose: true
|
||||
traits:
|
||||
- type: scaler
|
||||
properties:
|
||||
|
||||
@@ -9,7 +9,9 @@ spec:
|
||||
type: webservice
|
||||
properties:
|
||||
image: crccheck/hello-world
|
||||
port: 8000
|
||||
ports:
|
||||
- port: 8000
|
||||
expose: true
|
||||
traits:
|
||||
- type: scaler
|
||||
properties:
|
||||
|
||||
@@ -9,20 +9,19 @@ spec:
|
||||
type: webservice
|
||||
properties:
|
||||
image: crccheck/hello-world
|
||||
port: 8000
|
||||
ports:
|
||||
- port: 8000
|
||||
expose: true
|
||||
policies:
|
||||
- name: env-policy
|
||||
type: env-binding
|
||||
type: topology
|
||||
properties:
|
||||
envs:
|
||||
- name: test
|
||||
placement:
|
||||
clusterSelector:
|
||||
name: cluster-worker
|
||||
clusters:
|
||||
- cluster-worker
|
||||
workflow:
|
||||
steps:
|
||||
- name: deploy-test
|
||||
type: deploy2env
|
||||
type: deploy
|
||||
properties:
|
||||
policy: env-policy
|
||||
env: test
|
||||
policies:
|
||||
- env-policy
|
||||
|
||||
@@ -9,7 +9,9 @@ spec:
|
||||
type: webservice
|
||||
properties:
|
||||
image: crccheck/hello-world
|
||||
port: 8000
|
||||
ports:
|
||||
- port: 8000
|
||||
expose: true
|
||||
type: webservice
|
||||
traits:
|
||||
- type: rollout
|
||||
|
||||
Reference in New Issue
Block a user