diff --git a/pkg/collect/cluster_resources.go b/pkg/collect/cluster_resources.go index 1aaace13..283c8a98 100644 --- a/pkg/collect/cluster_resources.go +++ b/pkg/collect/cluster_resources.go @@ -365,6 +365,32 @@ func (c *CollectClusterResources) Collect(progressChan chan<- interface{}) (Coll } output.SaveResult(c.BundlePath, path.Join(constants.CLUSTER_RESOURCES_DIR, fmt.Sprintf("%s-errors.json", constants.CLUSTER_RESOURCES_ENDPOINTS)), marshalErrors(endpointsErrors)) + // Service Accounts + servicesAccounts, servicesAccountsErrors := serviceAccounts(ctx, client, namespaceNames) + for k, v := range servicesAccounts { + output.SaveResult(c.BundlePath, path.Join(constants.CLUSTER_RESOURCES_DIR, constants.CLUSTER_RESOURCES_SERVICE_ACCOUNTS, k), bytes.NewBuffer(v)) + } + output.SaveResult(c.BundlePath, path.Join(constants.CLUSTER_RESOURCES_DIR, fmt.Sprintf("%s-errors.json", constants.CLUSTER_RESOURCES_SERVICE_ACCOUNTS)), marshalErrors(servicesAccountsErrors)) + + // Leases + leases, leasesErrors := leases(ctx, client, namespaceNames) + for k, v := range leases { + output.SaveResult(c.BundlePath, path.Join(constants.CLUSTER_RESOURCES_DIR, constants.CLUSTER_RESOURCES_LEASES, k), bytes.NewBuffer(v)) + } + output.SaveResult(c.BundlePath, path.Join(constants.CLUSTER_RESOURCES_DIR, fmt.Sprintf("%s-errors.json", constants.CLUSTER_RESOURCES_LEASES)), marshalErrors(leasesErrors)) + + // Volume Attachments + volumeAttachments, volumeAttachmentsErrors := volumeAttachments(ctx, client) + output.SaveResult(c.BundlePath, path.Join(constants.CLUSTER_RESOURCES_DIR, fmt.Sprintf("%s.json", constants.CLUSTER_RESOURCES_VOLUME_ATTACHMENTS)), bytes.NewBuffer(volumeAttachments)) + output.SaveResult(c.BundlePath, path.Join(constants.CLUSTER_RESOURCES_DIR, fmt.Sprintf("%s-errors.json", constants.CLUSTER_RESOURCES_VOLUME_ATTACHMENTS)), marshalErrors(volumeAttachmentsErrors)) + + // ConfigMaps + configMaps, configMapsErrors := configMaps(ctx, client, namespaceNames) + for k, v := range configMaps { + output.SaveResult(c.BundlePath, path.Join(constants.CLUSTER_RESOURCES_DIR, constants.CLUSTER_RESOURCES_CONFIGMAPS, k), bytes.NewBuffer(v)) + } + + output.SaveResult(c.BundlePath, path.Join(constants.CLUSTER_RESOURCES_DIR, fmt.Sprintf("%s-errors.json", constants.CLUSTER_RESOURCES_CONFIGMAPS)), marshalErrors(configMapsErrors)) return output, nil } @@ -1949,3 +1975,134 @@ func endpoints(ctx context.Context, client *kubernetes.Clientset, namespaces []s return endpointsByNamespace, errorsByNamespace } + +func serviceAccounts(ctx context.Context, client kubernetes.Interface, namespaces []string) (map[string][]byte, map[string]string) { + serviceAccountsByNamespace := make(map[string][]byte) + errorsByNamespace := make(map[string]string) + + for _, namespace := range namespaces { + serviceAccounts, err := client.CoreV1().ServiceAccounts(namespace).List(ctx, metav1.ListOptions{}) + if err != nil { + errorsByNamespace[namespace] = err.Error() + continue + } + + gvk, err := apiutil.GVKForObject(serviceAccounts, scheme.Scheme) + if err == nil { + serviceAccounts.GetObjectKind().SetGroupVersionKind(gvk) + } + + for i, o := range serviceAccounts.Items { + gvk, err := apiutil.GVKForObject(&o, scheme.Scheme) + if err == nil { + serviceAccounts.Items[i].GetObjectKind().SetGroupVersionKind(gvk) + } + } + + b, err := json.MarshalIndent(serviceAccounts, "", " ") + if err != nil { + errorsByNamespace[namespace] = err.Error() + continue + } + + serviceAccountsByNamespace[namespace+".json"] = b + } + + return serviceAccountsByNamespace, errorsByNamespace +} + +func leases(ctx context.Context, client kubernetes.Interface, namespaces []string) (map[string][]byte, map[string]string) { + leasesByNamespace := make(map[string][]byte) + errorsByNamespace := make(map[string]string) + + for _, namespace := range namespaces { + leases, err := client.CoordinationV1().Leases(namespace).List(ctx, metav1.ListOptions{}) + if err != nil { + errorsByNamespace[namespace] = err.Error() + continue + } + + gvk, err := apiutil.GVKForObject(leases, scheme.Scheme) + if err == nil { + leases.GetObjectKind().SetGroupVersionKind(gvk) + } + + for i, o := range leases.Items { + gvk, err := apiutil.GVKForObject(&o, scheme.Scheme) + if err == nil { + leases.Items[i].GetObjectKind().SetGroupVersionKind(gvk) + } + } + + b, err := json.MarshalIndent(leases, "", " ") + if err != nil { + errorsByNamespace[namespace] = err.Error() + continue + } + + leasesByNamespace[namespace+".json"] = b + } + + return leasesByNamespace, errorsByNamespace +} + +func volumeAttachments(ctx context.Context, client kubernetes.Interface) ([]byte, []string) { + volumeAttachments, err := client.StorageV1().VolumeAttachments().List(ctx, metav1.ListOptions{}) + if err != nil { + return nil, []string{err.Error()} + } + + gvk, err := apiutil.GVKForObject(volumeAttachments, scheme.Scheme) + if err == nil { + volumeAttachments.GetObjectKind().SetGroupVersionKind(gvk) + } + + for i, o := range volumeAttachments.Items { + gvk, err := apiutil.GVKForObject(&o, scheme.Scheme) + if err == nil { + volumeAttachments.Items[i].GetObjectKind().SetGroupVersionKind(gvk) + } + } + + b, err := json.MarshalIndent(volumeAttachments, "", " ") + if err != nil { + return nil, []string{err.Error()} + } + + return b, nil +} + +func configMaps(ctx context.Context, client kubernetes.Interface, namespaces []string) (map[string][]byte, map[string]string) { + configmapByNamespace := make(map[string][]byte) + errorsByNamespace := make(map[string]string) + + for _, namespace := range namespaces { + configmaps, err := client.CoreV1().ConfigMaps(namespace).List(ctx, metav1.ListOptions{}) + if err != nil { + errorsByNamespace[namespace] = err.Error() + continue + } + + gvk, err := apiutil.GVKForObject(configmaps, scheme.Scheme) + if err == nil { + configmaps.GetObjectKind().SetGroupVersionKind(gvk) + } + + for i, o := range configmaps.Items { + gvk, err := apiutil.GVKForObject(&o, scheme.Scheme) + if err == nil { + configmaps.Items[i].GetObjectKind().SetGroupVersionKind(gvk) + } + } + + b, err := json.MarshalIndent(configmaps, "", " ") + if err != nil { + errorsByNamespace[namespace] = err.Error() + continue + } + + configmapByNamespace[namespace+".json"] = b + } + + return configmapByNamespace, errorsByNamespace +} diff --git a/pkg/collect/cluster_resources_test.go b/pkg/collect/cluster_resources_test.go index 6cb31d8f..1a316337 100644 --- a/pkg/collect/cluster_resources_test.go +++ b/pkg/collect/cluster_resources_test.go @@ -1,14 +1,261 @@ package collect import ( + "context" + "encoding/json" "reflect" "testing" troubleshootv1beta2 "github.com/replicatedhq/troubleshoot/pkg/apis/troubleshoot/v1beta2" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + v1 "k8s.io/api/coordination/v1" + corev1 "k8s.io/api/core/v1" + storagev1 "k8s.io/api/storage/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/client-go/kubernetes" + testclient "k8s.io/client-go/kubernetes/fake" ) +func Test_ConfigMaps(t *testing.T) { + tests := []struct { + name string + configMapNames []string + namespaces []string + }{ + { + name: "single namespace", + configMapNames: []string{"default"}, + namespaces: []string{"default"}, + }, + { + name: "multiple namespaces", + configMapNames: []string{"default"}, + namespaces: []string{"default", "test"}, + }, + { + name: "multiple in different namespaces", + configMapNames: []string{"default", "test-cm"}, + namespaces: []string{"default", "test"}, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + client := testclient.NewSimpleClientset() + ctx := context.Background() + err := createConfigMaps(client, tt.configMapNames, tt.namespaces) + assert.NoError(t, err) + + configMaps, _ := configMaps(ctx, client, tt.namespaces) + assert.Equal(t, len(tt.namespaces), len(configMaps)) + + for _, ns := range tt.namespaces { + assert.NotEmpty(t, configMaps[ns+".json"]) + var configmapList corev1.ConfigMapList + err := json.Unmarshal(configMaps[ns+".json"], &configmapList) + assert.NoError(t, err) + // Ensure the ConfigMap names match those in the list + assert.Equal(t, len(configmapList.Items), len(tt.configMapNames)) + for _, cm := range configmapList.Items { + assert.Contains(t, tt.configMapNames, cm.ObjectMeta.Name) + } + } + }) + } +} + +func createConfigMaps(client kubernetes.Interface, configMapNames []string, namespaces []string) error { + for _, ns := range namespaces { + for _, cmName := range configMapNames { + _, err := client.CoreV1().ConfigMaps(ns).Create(context.Background(), &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: cmName, + }, + }, metav1.CreateOptions{}) + if err != nil { + return err + } + } + } + return nil +} + +func Test_VolumeAttachments(t *testing.T) { + tests := []struct { + name string + volumeAttachmentNames []string + }{ + { + name: "single volume attachment", + volumeAttachmentNames: []string{"default"}, + }, + + { + name: "multiple volume attachments", + volumeAttachmentNames: []string{"default", "test"}, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + client := testclient.NewSimpleClientset() + ctx := context.Background() + err := createTestVolumeAttachments(client, tt.volumeAttachmentNames) + assert.NoError(t, err) + + volumeAttachments, _ := volumeAttachments(ctx, client) + assert.NotEmpty(t, volumeAttachments) + var volumeAttachmentList storagev1.VolumeAttachmentList + err = json.Unmarshal(volumeAttachments, &volumeAttachmentList) + assert.NoError(t, err) + // Ensure the VolumeAttachment names match those in the list + assert.Equal(t, len(volumeAttachmentList.Items), len(tt.volumeAttachmentNames)) + for _, va := range volumeAttachmentList.Items { + assert.Contains(t, tt.volumeAttachmentNames, va.ObjectMeta.Name) + } + }) + } +} + +func createTestVolumeAttachments(client kubernetes.Interface, volumeAttachmentNames []string) error { + for _, vaName := range volumeAttachmentNames { + _, err := client.StorageV1().VolumeAttachments().Create(context.Background(), &storagev1.VolumeAttachment{ + ObjectMeta: metav1.ObjectMeta{ + Name: vaName, + }, + }, metav1.CreateOptions{}) + if err != nil { + return err + } + } + return nil +} + +func Test_Leases(t *testing.T) { + tests := []struct { + name string + leaseNames []string + namespaces []string + }{ + { + name: "single namespace", + leaseNames: []string{"default"}, + namespaces: []string{"default"}, + }, + { + name: "multiple namespaces", + leaseNames: []string{"default"}, + namespaces: []string{"default", "test"}, + }, + { + name: "multiple in different namespaces", + leaseNames: []string{"default", "test-lease"}, + namespaces: []string{"default", "test"}, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + client := testclient.NewSimpleClientset() + ctx := context.Background() + err := createTestLeases(client, tt.leaseNames, tt.namespaces) + assert.NoError(t, err) + + leases, _ := leases(ctx, client, tt.namespaces) + assert.Equal(t, len(tt.namespaces), len(leases)) + + for _, ns := range tt.namespaces { + assert.NotEmpty(t, leases[ns+".json"]) + var leaseList v1.LeaseList + err := json.Unmarshal(leases[ns+".json"], &leaseList) + assert.NoError(t, err) + // Ensure the Lease names match those in the list + assert.Equal(t, len(leaseList.Items), len(tt.leaseNames)) + for _, lease := range leaseList.Items { + assert.Contains(t, tt.leaseNames, lease.ObjectMeta.Name) + } + } + }) + } +} + +func createTestLeases(client kubernetes.Interface, leaseNames []string, namespaces []string) error { + for _, ns := range namespaces { + for _, leaseName := range leaseNames { + _, err := client.CoordinationV1().Leases(ns).Create(context.Background(), &v1.Lease{ + ObjectMeta: metav1.ObjectMeta{ + Name: leaseName, + }, + }, metav1.CreateOptions{}) + if err != nil { + return err + } + } + } + return nil +} + +func Test_ServiceAccounts(t *testing.T) { + tests := []struct { + name string + serviceAccountNames []string + namespaces []string + }{ + { + name: "single namespace", + serviceAccountNames: []string{"default"}, + namespaces: []string{"default"}, + }, + { + name: "multiple namespaces", + serviceAccountNames: []string{"default"}, + namespaces: []string{"default", "test"}, + }, + { + name: "multiple in different namespaces", + serviceAccountNames: []string{"default", "test-sa"}, + namespaces: []string{"default", "test"}, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + client := testclient.NewSimpleClientset() + ctx := context.Background() + err := createTestServiceAccounts(client, tt.serviceAccountNames, tt.namespaces) + assert.NoError(t, err) + + servicesAccounts, _ := serviceAccounts(ctx, client, tt.namespaces) + assert.Equal(t, len(tt.namespaces), len(servicesAccounts)) + + for _, ns := range tt.namespaces { + assert.NotEmpty(t, servicesAccounts[ns+".json"]) + var serviceAccountList corev1.ServiceAccountList + err := json.Unmarshal(servicesAccounts[ns+".json"], &serviceAccountList) + assert.NoError(t, err) + // Ensure the ServiceAccount names match those in the list + assert.Equal(t, len(serviceAccountList.Items), len(tt.serviceAccountNames)) + for _, sa := range serviceAccountList.Items { + assert.Contains(t, tt.serviceAccountNames, sa.ObjectMeta.Name) + } + } + }) + } +} + +func createTestServiceAccounts(client kubernetes.Interface, serviceAccountNames []string, namespaces []string) error { + for _, ns := range namespaces { + for _, saName := range serviceAccountNames { + _, err := client.CoreV1().ServiceAccounts(ns).Create(context.Background(), &corev1.ServiceAccount{ + ObjectMeta: metav1.ObjectMeta{ + Name: saName, + }, + }, metav1.CreateOptions{}) + if err != nil { + return err + } + } + } + return nil +} + func Test_SelectCRDVersionByPriority(t *testing.T) { assert.Equal(t, "v1alpha3", selectCRDVersionByPriority([]string{"v1alpha2", "v1alpha3"})) assert.Equal(t, "v1alpha3", selectCRDVersionByPriority([]string{"v1alpha3", "v1alpha2"})) diff --git a/pkg/constants/constants.go b/pkg/constants/constants.go index d166020a..d49b81b0 100644 --- a/pkg/constants/constants.go +++ b/pkg/constants/constants.go @@ -55,6 +55,10 @@ const ( CLUSTER_RESOURCES_CLUSTER_ROLE_BINDINGS = "clusterrolebindings" CLUSTER_RESOURCES_PRIORITY_CLASS = "priorityclasses" CLUSTER_RESOURCES_ENDPOINTS = "endpoints" + CLUSTER_RESOURCES_SERVICE_ACCOUNTS = "serviceaccounts" + CLUSTER_RESOURCES_LEASES = "leases" + CLUSTER_RESOURCES_VOLUME_ATTACHMENTS = "volumeattachments" + CLUSTER_RESOURCES_CONFIGMAPS = "configmaps" // SelfSubjectRulesReview evaluation responses SELFSUBJECTRULESREVIEW_ERROR_AUTHORIZATION_WEBHOOK_UNSUPPORTED = "webhook authorizer does not support user rule resolution" diff --git a/test/validate-support-bundle-e2e.sh b/test/validate-support-bundle-e2e.sh index 4c96107f..d2c934d4 100755 --- a/test/validate-support-bundle-e2e.sh +++ b/test/validate-support-bundle-e2e.sh @@ -38,6 +38,36 @@ if grep -q "No matching files" "$tmpdir/$bundle_directory_name/analysis.json"; t exit 1 fi +base_path="$tmpdir/$bundle_directory_name/cluster-resources" +folders=("auth-cani-list" "configmaps" "daemonsets" "endpoints" "events" "deployments" "leases" "services" "pvcs" "pvcs" "jobs" "roles" "statefulsets" "network-policy" "pods" "resource-quota" "rolebindings" "serviceaccounts") + +files=("namespaces" "volumeattachments" "pvs" "groups" "nodes" "priorityclasses" "resources") + +for folder in "${folders[@]}"; do + if [ -d "$base_path/$folder" ]; then + echo "$folder directory was collected" + if [ "$(ls -A $base_path/$folder)" ]; then + echo "$folder directory is not empty" + else + echo "$folder directory is empty" + exit 1 + fi + else + echo "The $folder folder does not exist in $base_path path." + exit 1 + fi +done + +for file in "${files[@]}"; do + if [ -e "$base_path/$file.json" ] + then + echo "$file.json file was collected" + else + echo "The $file.json file does not exist in $base_path path." + exit 1 + fi +done + EXIT_STATUS=0 jq -r '.[].insight.severity' "$tmpdir/$bundle_directory_name/analysis.json" | while read i; do if [ $i == "error" ]; then