Merge pull request #23 from replicatedhq/windows

Windows binaries
This commit is contained in:
Marc Campbell
2019-07-23 16:53:11 -07:00
committed by GitHub
13 changed files with 460 additions and 50 deletions

View File

@@ -87,7 +87,7 @@ release:
.PHONY: local-release
local-release:
curl -sL https://git.io/goreleaser | bash -s -- --rm-dist --snapshot --config deploy/.goreleaser.snapshot.yml
curl -sL https://git.io/goreleaser | bash -s -- --rm-dist --snapshot --config deploy/.goreleaser.local.yml
docker tag replicated/troubleshoot:alpha localhost:32000/troubleshoot:alpha
docker tag replicated/preflight:alpha localhost:32000/preflight:alpha
docker tag replicated/troubleshoot-manager:alpha localhost:32000/troubleshoot-manager:alpha

View File

@@ -40,6 +40,8 @@ func Run() *cobra.Command {
cmd.Flags().String("collector-image", "", "the full name of the collector image to use")
cmd.Flags().String("collector-pullpolicy", "", "the pull policy of the collector image")
cmd.Flags().String("serviceaccount", "", "name of the service account to use. if not provided, one will be created")
viper.BindPFlags(cmd.Flags())
return cmd

View File

@@ -125,6 +125,17 @@ func runCollectors(v *viper.Viper, preflight troubleshootv1beta1.Preflight) (map
}
restClient := clientset.CoreV1().RESTClient()
serviceAccountName := v.GetString("serviceaccount")
if serviceAccountName == "" {
generatedServiceAccountName, err := createServiceAccount(preflight, v.GetString("namespace"), clientset)
if err != nil {
return nil, err
}
defer removeServiceAccount(generatedServiceAccountName, v.GetString("namespace"), clientset)
serviceAccountName = generatedServiceAccountName
}
// deploy an object that "owns" everything to aid in cleanup
owner := corev1.ConfigMap{
ObjectMeta: metav1.ObjectMeta{
@@ -186,6 +197,11 @@ func runCollectors(v *viper.Viper, preflight troubleshootv1beta1.Preflight) (map
return
}
if newPod.Status.Phase == corev1.PodFailed {
podsDeleted = append(podsDeleted, newPod)
return
}
if newPod.Status.Phase != corev1.PodSucceeded {
return
}
@@ -229,7 +245,7 @@ func runCollectors(v *viper.Viper, preflight troubleshootv1beta1.Preflight) (map
s := runtime.NewScheme()
s.AddKnownTypes(schema.GroupVersion{Group: "", Version: "v1"}, &corev1.ConfigMap{})
for _, collector := range desiredCollectors {
_, pod, err := collectrunner.CreateCollector(client, s, &owner, preflight.Name, v.GetString("namespace"), "preflight", collector, v.GetString("image"), v.GetString("pullpolicy"))
_, pod, err := collectrunner.CreateCollector(client, s, &owner, preflight.Name, v.GetString("namespace"), serviceAccountName, "preflight", collector, v.GetString("image"), v.GetString("pullpolicy"))
if err != nil {
return nil, err
}

View File

@@ -0,0 +1,131 @@
package cli
import (
"fmt"
troubleshootv1beta1 "github.com/replicatedhq/troubleshoot/pkg/apis/troubleshoot/v1beta1"
corev1 "k8s.io/api/core/v1"
rbacv1 "k8s.io/api/rbac/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/kubernetes"
)
func createServiceAccount(preflight troubleshootv1beta1.Preflight, namespace string, clientset *kubernetes.Clientset) (string, error) {
name := fmt.Sprintf("preflight-%s", preflight.Name)
serviceAccount := corev1.ServiceAccount{
TypeMeta: metav1.TypeMeta{
APIVersion: "v1",
Kind: "ServiceAccount",
},
ObjectMeta: metav1.ObjectMeta{
Name: name,
Namespace: namespace,
},
Secrets: []corev1.ObjectReference{
{
APIVersion: "v1",
Kind: "Secret",
Name: name,
Namespace: namespace,
},
},
}
_, err := clientset.CoreV1().ServiceAccounts(namespace).Create(&serviceAccount)
if err != nil {
return "", err
}
role := rbacv1.ClusterRole{
TypeMeta: metav1.TypeMeta{
APIVersion: "v1",
Kind: "ClusterRole",
},
ObjectMeta: metav1.ObjectMeta{
Name: name,
Namespace: namespace,
},
Rules: []rbacv1.PolicyRule{
{
APIGroups: []string{""},
Resources: []string{
"namespaces",
"pods",
"services",
"secrets",
},
Verbs: metav1.Verbs{"list"},
},
{
APIGroups: []string{"apps"},
Resources: []string{"deployments"},
Verbs: metav1.Verbs{"list"},
},
{
APIGroups: []string{"extensions"},
Resources: []string{"ingresses"},
Verbs: metav1.Verbs{"list"},
},
{
APIGroups: []string{"storage.k8s.io"},
Resources: []string{"storageclasses"},
Verbs: metav1.Verbs{"list"},
},
{
APIGroups: []string{"apiextensions.k8s.io"},
Resources: []string{"customresourcedefinitions"},
Verbs: metav1.Verbs{"list"},
},
},
}
_, err = clientset.RbacV1().ClusterRoles().Create(&role)
if err != nil {
return "", err
}
roleBinding := rbacv1.ClusterRoleBinding{
TypeMeta: metav1.TypeMeta{
APIVersion: "v1",
Kind: "ClusterRoleBinding",
},
ObjectMeta: metav1.ObjectMeta{
Name: name,
Namespace: namespace,
},
Subjects: []rbacv1.Subject{
{
Kind: "ServiceAccount",
Name: name,
Namespace: namespace,
},
},
RoleRef: rbacv1.RoleRef{
APIGroup: "rbac.authorization.k8s.io",
Kind: "ClusterRole",
Name: name,
},
}
_, err = clientset.RbacV1().ClusterRoleBindings().Create(&roleBinding)
if err != nil {
return "", err
}
return name, nil
}
func removeServiceAccount(name string, namespace string, clientset *kubernetes.Clientset) error {
if err := clientset.RbacV1().ClusterRoleBindings().Delete(name, &metav1.DeleteOptions{}); err != nil {
return err
}
if err := clientset.RbacV1().ClusterRoles().Delete(name, &metav1.DeleteOptions{}); err != nil {
return err
}
if err := clientset.CoreV1().ServiceAccounts(namespace).Delete(name, &metav1.DeleteOptions{}); err != nil {
return err
}
return nil
}

View File

@@ -19,12 +19,7 @@ For example:
troubleshoot run --collectors application --wait
`,
PreRun: func(cmd *cobra.Command, args []string) {
viper.BindPFlag("collectors", cmd.Flags().Lookup("collectors"))
viper.BindPFlag("namespace", cmd.Flags().Lookup("namespace"))
viper.BindPFlag("kubecontext", cmd.Flags().Lookup("kubecontext"))
viper.BindPFlag("image", cmd.Flags().Lookup("image"))
viper.BindPFlag("pullpolicy", cmd.Flags().Lookup("pullpolicy"))
viper.BindPFlag("redact", cmd.Flags().Lookup("redact"))
viper.BindPFlags(cmd.Flags())
},
RunE: func(cmd *cobra.Command, args []string) error {
v := viper.GetViper()
@@ -46,6 +41,7 @@ troubleshoot run --collectors application --wait
cmd.Flags().String("pullpolicy", "", "the pull policy of the collector image")
cmd.Flags().Bool("redact", true, "enable/disable default redactions")
cmd.Flags().String("serviceaccount", "", "name of the service account to use. if not provided, one will be created")
viper.BindPFlags(cmd.Flags())
return cmd

View File

@@ -88,6 +88,17 @@ func runCollectors(v *viper.Viper, collector troubleshootv1beta1.Collector) (str
}
restClient := clientset.CoreV1().RESTClient()
serviceAccountName := v.GetString("serviceaccount")
if serviceAccountName == "" {
generatedServiceAccountName, err := createServiceAccount(collector, v.GetString("namespace"), clientset)
if err != nil {
return "", err
}
defer removeServiceAccount(generatedServiceAccountName, v.GetString("namespace"), clientset)
serviceAccountName = generatedServiceAccountName
}
// deploy an object that "owns" everything to aid in cleanup
owner := corev1.ConfigMap{
ObjectMeta: metav1.ObjectMeta{
@@ -126,7 +137,7 @@ func runCollectors(v *viper.Viper, collector troubleshootv1beta1.Collector) (str
if err != nil {
return "", err
}
// defer os.RemoveAll(bundlePath)
defer os.RemoveAll(bundlePath)
resyncPeriod := time.Second
ctx := context.Background()
@@ -158,6 +169,11 @@ func runCollectors(v *viper.Viper, collector troubleshootv1beta1.Collector) (str
return
}
if newPod.Status.Phase == corev1.PodFailed {
podsDeleted = append(podsDeleted, newPod)
return
}
if newPod.Status.Phase != corev1.PodSucceeded {
return
}
@@ -206,7 +222,8 @@ func runCollectors(v *viper.Viper, collector troubleshootv1beta1.Collector) (str
s := runtime.NewScheme()
s.AddKnownTypes(schema.GroupVersion{Group: "", Version: "v1"}, &corev1.ConfigMap{})
for _, collect := range desiredCollectors {
_, pod, err := collectrunner.CreateCollector(client, s, &owner, collector.Name, v.GetString("namespace"), "troubleshoot", collect, v.GetString("image"), v.GetString("pullpolicy"))
fmt.Printf("creating collector\n")
_, pod, err := collectrunner.CreateCollector(client, s, &owner, collector.Name, v.GetString("namespace"), serviceAccountName, "troubleshoot", collect, v.GetString("image"), v.GetString("pullpolicy"))
if err != nil {
return "", err
}

View File

@@ -0,0 +1,122 @@
package cli
import (
"fmt"
troubleshootv1beta1 "github.com/replicatedhq/troubleshoot/pkg/apis/troubleshoot/v1beta1"
corev1 "k8s.io/api/core/v1"
rbacv1 "k8s.io/api/rbac/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/kubernetes"
)
func createServiceAccount(collector troubleshootv1beta1.Collector, namespace string, clientset *kubernetes.Clientset) (string, error) {
name := fmt.Sprintf("troubleshoot-%s", collector.Name)
serviceAccount := corev1.ServiceAccount{
TypeMeta: metav1.TypeMeta{
APIVersion: "v1",
Kind: "ServiceAccount",
},
ObjectMeta: metav1.ObjectMeta{
Name: name,
Namespace: namespace,
},
}
_, err := clientset.CoreV1().ServiceAccounts(namespace).Create(&serviceAccount)
if err != nil {
return "", err
}
role := rbacv1.ClusterRole{
TypeMeta: metav1.TypeMeta{
APIVersion: "v1",
Kind: "ClusterRole",
},
ObjectMeta: metav1.ObjectMeta{
Name: name,
Namespace: namespace,
},
Rules: []rbacv1.PolicyRule{
{
APIGroups: []string{""},
Resources: []string{
"namespaces",
"pods",
"services",
"secrets",
},
Verbs: metav1.Verbs{"list"},
},
{
APIGroups: []string{"apps"},
Resources: []string{"deployments"},
Verbs: metav1.Verbs{"list"},
},
{
APIGroups: []string{"extensions"},
Resources: []string{"ingresses"},
Verbs: metav1.Verbs{"list"},
},
{
APIGroups: []string{"storage.k8s.io"},
Resources: []string{"storageclasses"},
Verbs: metav1.Verbs{"list"},
},
{
APIGroups: []string{"apiextensions.k8s.io"},
Resources: []string{"customresourcedefinitions"},
Verbs: metav1.Verbs{"list"},
},
},
}
_, err = clientset.RbacV1().ClusterRoles().Create(&role)
if err != nil {
return "", err
}
roleBinding := rbacv1.ClusterRoleBinding{
TypeMeta: metav1.TypeMeta{
APIVersion: "v1",
Kind: "ClusterRoleBinding",
},
ObjectMeta: metav1.ObjectMeta{
Name: name,
Namespace: namespace,
},
Subjects: []rbacv1.Subject{
{
Kind: "ServiceAccount",
Name: name,
Namespace: namespace,
},
},
RoleRef: rbacv1.RoleRef{
APIGroup: "rbac.authorization.k8s.io",
Kind: "ClusterRole",
Name: name,
},
}
_, err = clientset.RbacV1().ClusterRoleBindings().Create(&roleBinding)
if err != nil {
return "", err
}
return name, nil
}
func removeServiceAccount(name string, namespace string, clientset *kubernetes.Clientset) error {
if err := clientset.RbacV1().ClusterRoleBindings().Delete(name, &metav1.DeleteOptions{}); err != nil {
return err
}
if err := clientset.RbacV1().ClusterRoles().Delete(name, &metav1.DeleteOptions{}); err != nil {
return err
}
if err := clientset.CoreV1().ServiceAccounts(namespace).Delete(name, &metav1.DeleteOptions{}); err != nil {
return err
}
return nil
}

View File

@@ -13,38 +13,38 @@ spec:
message: Cannot pull from quay.io
- pass:
message: Found credentials to pull from quay.io
# - clusterVersion:
# outcomes:
# - fail:
# when: "< 1.13.0"
# message: Sorry, support.io requires at least Kubernetes 1.14.0. Please update your Kubernetes cluster before installing.
# uri: https://enterprise.support.io/install/requirements/kubernetes
# - warn:
# when: "< 1.15.0"
# message: The version of Kubernetes you are running meets the minimum requirements to run support.io. It's recommended to run Kubernetes 1.15.0 or later.
# uri: https://enterprise.support.io/install/requirements/kubernetes
# - pass:
# message: The version of Kubernetes you have installed meets the required and recommended versions.
# - storageClass:
# checkName: Required storage classes
# storageClassName: "microk8s-hostpath"
# outcomes:
# - fail:
# message: The required storage class was not found in the cluster.
# - pass:
# message: The required storage class was found in the cluster.
# - ingress:
# namespace: default
# ingressName: my-app-ingress
# outcomes:
# - fail:
# message: Expected to find an ingress named "my-app-ingress".
# - pass:
# message: Expected ingress was found.
# - customResourceDefinitionName:
# customResourceDefinitionName: rook
# outcomes:
# - fail:
# message: Rook is required for Support.io. Rook was not found in the cluster.
# - pass:
# message: Found a supported version of Rook installed and running in the cluster.
- clusterVersion:
outcomes:
- fail:
when: "< 1.13.0"
message: Sorry, support.io requires at least Kubernetes 1.14.0. Please update your Kubernetes cluster before installing.
uri: https://enterprise.support.io/install/requirements/kubernetes
- warn:
when: "< 1.15.0"
message: The version of Kubernetes you are running meets the minimum requirements to run support.io. It's recommended to run Kubernetes 1.15.0 or later.
uri: https://enterprise.support.io/install/requirements/kubernetes
- pass:
message: The version of Kubernetes you have installed meets the required and recommended versions.
- storageClass:
checkName: Required storage classes
storageClassName: "microk8s-hostpath"
outcomes:
- fail:
message: The required storage class was not found in the cluster.
- pass:
message: The required storage class was found in the cluster.
- ingress:
namespace: default
ingressName: my-app-ingress
outcomes:
- fail:
message: Expected to find an ingress named "my-app-ingress".
- pass:
message: Expected ingress was found.
- customResourceDefinitionName:
customResourceDefinitionName: rook
outcomes:
- fail:
message: Rook is required for Support.io. Rook was not found in the cluster.
- pass:
message: Found a supported version of Rook installed and running in the cluster.

View File

@@ -0,0 +1,105 @@
project_name: troubleshoot
release:
github:
owner: replicatedhq
name: troubleshoot
builds:
- id: collector
goos:
- linux
goarch:
- amd64
env:
- CGO_ENABLED=0
main: cmd/collector/main.go
ldflags: -s -w
-X github.com/replicatedhq/troubleshoot/pkg/version.version={{.Version}}
-X github.com/replicatedhq/troubleshoot/pkg/version.gitSHA={{.Commit}}
-X github.com/replicatedhq/troubleshoot/pkg/version.buildTime={{.Date}}
-extldflags "-static"
flags: -tags netgo -installsuffix netgo
binary: collector
hooks: {}
- id: preflight
goos:
- linux
goarch:
- amd64
env:
- CGO_ENABLED=0
main: cmd/preflight/main.go
ldflags: -s -w
-X github.com/replicatedhq/troubleshoot/pkg/version.version={{.Version}}
-X github.com/replicatedhq/troubleshoot/pkg/version.gitSHA={{.Commit}}
-X github.com/replicatedhq/troubleshoot/pkg/version.buildTime={{.Date}}
-extldflags "-static"
flags: -tags netgo -installsuffix netgo
binary: preflight
hooks: {}
- id: troubleshoot
goos:
- linux
goarch:
- amd64
env:
- CGO_ENABLED=0
main: cmd/troubleshoot/main.go
ldflags: -s -w
-X github.com/replicatedhq/troubleshoot/pkg/version.version={{.Version}}
-X github.com/replicatedhq/troubleshoot/pkg/version.gitSHA={{.Commit}}
-X github.com/replicatedhq/troubleshoot/pkg/version.buildTime={{.Date}}
-extldflags "-static"
flags: -tags netgo -installsuffix netgo
binary: troubleshoot
hooks: {}
- id: manager
goos:
- linux
goarch:
- amd64
env:
- CGO_ENABLED=0
main: cmd/manager/main.go
ldflags: -s -w
-X github.com/replicatedhq/troubleshoot/pkg/version.version={{.Version}}
-X github.com/replicatedhq/troubleshoot/pkg/version.gitSHA={{.Commit}}
-X github.com/replicatedhq/troubleshoot/pkg/version.buildTime={{.Date}}
-extldflags "-static"
flags: -tags netgo -installsuffix netgo
binary: manager
hooks: {}
archives:
- id: tar
format: tar.gz
name_template: '{{ .Binary }}_{{.Version}}_{{ .Os }}_{{ .Arch }}{{ if .Arm }}v{{.Arm }}{{ end }}-alpha'
files:
- licence*
- LICENCE*
- license*
- LICENSE*
- readme*
- README*
- changelog*
- CHANGELOG*
dockers:
- dockerfile: ./deploy/Dockerfile.troubleshoot
image_templates:
- "replicated/troubleshoot:alpha"
binaries:
- collector
- troubleshoot
- preflight
- dockerfile: ./deploy/Dockerfile.troubleshoot
image_templates:
- "replicated/preflight:alpha"
binaries:
- collector
- troubleshoot
- preflight
- dockerfile: ./deploy/Dockerfile.manager
image_templates:
- "replicated/troubleshoot-manager:alpha"
binaries:
- manager
snapshot:
name_template: SNAPSHOT-{{ .Commit }}

View File

@@ -23,6 +23,8 @@ builds:
- id: preflight
goos:
- linux
- windows
- darwin
goarch:
- amd64
env:
@@ -39,6 +41,8 @@ builds:
- id: troubleshoot
goos:
- linux
- windows
- darwin
goarch:
- amd64
env:

View File

@@ -0,0 +1,12 @@
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
namespace: default
name: preflight
rules:
- apiGroups: [""]
resources: ["namespaces"]
verbs: ["get", "list"]
- apiGroups: [""]
resources: ["pods"]
verbs: ["get", "list"]

View File

@@ -15,13 +15,13 @@ import (
"sigs.k8s.io/controller-runtime/pkg/controller/controllerutil"
)
func CreateCollector(client client.Client, scheme *runtime.Scheme, ownerRef metav1.Object, jobName string, jobNamespace string, jobType string, collect *troubleshootv1beta1.Collect, image string, pullPolicy string) (*corev1.ConfigMap, *corev1.Pod, error) {
func CreateCollector(client client.Client, scheme *runtime.Scheme, ownerRef metav1.Object, jobName string, jobNamespace string, serviceAccountName string, jobType string, collect *troubleshootv1beta1.Collect, image string, pullPolicy string) (*corev1.ConfigMap, *corev1.Pod, error) {
configMap, err := createCollectorSpecConfigMap(client, scheme, ownerRef, jobName, jobNamespace, collect)
if err != nil {
return nil, nil, err
}
pod, err := createCollectorPod(client, scheme, ownerRef, jobName, jobNamespace, jobType, collect, configMap, image, pullPolicy)
pod, err := createCollectorPod(client, scheme, ownerRef, jobName, jobNamespace, serviceAccountName, jobType, collect, configMap, image, pullPolicy)
if err != nil {
return nil, nil, err
}
@@ -75,9 +75,13 @@ func createCollectorSpecConfigMap(client client.Client, scheme *runtime.Scheme,
return &configMap, nil
}
func createCollectorPod(client client.Client, scheme *runtime.Scheme, ownerRef metav1.Object, jobName string, jobNamespace string, jobType string, collect *troubleshootv1beta1.Collect, configMap *corev1.ConfigMap, image string, pullPolicy string) (*corev1.Pod, error) {
func createCollectorPod(client client.Client, scheme *runtime.Scheme, ownerRef metav1.Object, jobName string, jobNamespace string, serviceAccountName string, jobType string, collect *troubleshootv1beta1.Collect, configMap *corev1.ConfigMap, image string, pullPolicy string) (*corev1.Pod, error) {
name := fmt.Sprintf("%s-%s", jobName, DeterministicIDForCollector(collect))
if serviceAccountName == "" {
serviceAccountName = "default"
}
namespacedName := types.NamespacedName{
Name: name,
Namespace: jobNamespace,
@@ -115,7 +119,8 @@ func createCollectorPod(client client.Client, scheme *runtime.Scheme, ownerRef m
Kind: "Pod",
},
Spec: corev1.PodSpec{
RestartPolicy: corev1.RestartPolicyNever,
ServiceAccountName: serviceAccountName,
RestartPolicy: corev1.RestartPolicyNever,
Containers: []corev1.Container{
{
Image: imageName,

View File

@@ -57,7 +57,7 @@ func (r *ReconcilePreflightJob) reconcileOnePreflightCollector(instance *trouble
return nil
}
_, _, err := collectrunner.CreateCollector(r.Client, r.scheme, instance, instance.Name, instance.Namespace, "preflight", collect, instance.Spec.Image, instance.Spec.ImagePullPolicy)
_, _, err := collectrunner.CreateCollector(r.Client, r.scheme, instance, instance.Name, instance.Namespace, "", "preflight", collect, instance.Spec.Image, instance.Spec.ImagePullPolicy)
if err != nil {
return err
}