diff --git a/README.md b/README.md index b45de386..4c7955e1 100644 --- a/README.md +++ b/README.md @@ -23,7 +23,7 @@ Polaris can be run in three different modes: * As a [validating webhook](#webhook), so you can automatically reject workloads that don't adhere to your organization's policies. * As a [command-line tool](#cli), so you can test local YAML files, e.g. as part of a CI/CD process. -**Want to learn more?** Fairwinds holds [office hours on Zoom](https://zoom.us/j/242508205) the first Friday of every month, at 12pm Eastern. You can also reach out via email at `opensource@fairwinds.com` +**Want to learn more?** Reach out on [the Slack channel](https://fairwindscommunity.slack.com/messages/polaris), send an email to `opensource@fairwinds.com`, or join us for [office hours on Zoom](https://fairwindscommunity.slack.com/messages/office-hours) # Dashboard Quickstart @@ -49,10 +49,10 @@ The Polaris dashboard is a way to get a simple visual overview of the current st Our default standards in Polaris are rather high, so don’t be surprised if your score is lower than you might expect. A key goal for Polaris was to set a high standard and aim for great configuration by default. If the defaults we’ve included are too strict, it’s easy to adjust the configuration as part of the deployment configuration to better suit your workloads. -## Webhook +## Admission Controller: Validating Webhook > [View installation instructions](docs/usage.md#webhook) -Polaris includes an optional validating webhook. This accepts the same configuration as the dashboard, and can run the same validations. This webhook will reject any workloads that trigger a validation error. This is indicative of the greater goal of Polaris, not just to encourage better configuration through dashboard visibility, but to actually enforce it with this webhook. +Polaris can be run as an admission controller that acts as a validating webhook. This accepts the same configuration as the dashboard, and can run the same validations. This webhook will reject any workloads that trigger a validation error. This is indicative of the greater goal of Polaris, not just to encourage better configuration through dashboard visibility, but to actually enforce it with this webhook. Polaris will not fix your workloads, only block them. Unfortunately we have not found a way to display warnings as part of `kubectl` output unless we are rejecting a workload altogether. That means that any checks with a severity of `warning` will still pass webhook validation, and the only evidence of that warning will either be in the Polaris dashboard or the Polaris webhook logs. diff --git a/cmd/polaris/webhook.go b/cmd/polaris/webhook.go index a7bbbb89..be9fa5ea 100644 --- a/cmd/polaris/webhook.go +++ b/cmd/polaris/webhook.go @@ -106,7 +106,9 @@ var webhookCmd = &cobra.Command{ for innerIndex, supportedAPIType := range controllerToScan.ListSupportedAPIVersions() { webhookName := strings.ToLower(fmt.Sprintf("%s-%d-%d", controllerToScan, index, innerIndex)) hook := fwebhook.NewWebhook(webhookName, mgr, fwebhook.Validator{Config: config}, supportedAPIType) - webhooks = append(webhooks, hook) + if hook != nil { + webhooks = append(webhooks, hook) + } } } diff --git a/deploy/dashboard.yaml b/deploy/dashboard.yaml index 915381e9..ee514972 100644 --- a/deploy/dashboard.yaml +++ b/deploy/dashboard.yaml @@ -251,7 +251,7 @@ spec: containers: - command: - polaris - - dashboard + - --dashboard - --config - /opt/app/config.yaml image: 'quay.io/fairwinds/polaris:0.6' diff --git a/deploy/webhook.yaml b/deploy/webhook.yaml index 5db77c98..f288d958 100644 --- a/deploy/webhook.yaml +++ b/deploy/webhook.yaml @@ -311,7 +311,7 @@ spec: - name: webhook command: - polaris - - webhook + - --webhook - --config - /opt/app/config.yaml image: 'quay.io/fairwinds/polaris:0.6' diff --git a/docs/usage.md b/docs/usage.md index da46f83d..0983be81 100644 --- a/docs/usage.md +++ b/docs/usage.md @@ -95,7 +95,7 @@ kubectl port-forward --namespace polaris svc/polaris-dashboard 8080:80 ``` ### Helm ```bash -helm repo add fairwinds-stable https://charts.fairwindsops.com/stable +helm repo add fairwinds-stable https://charts.fairwinds.com/stable helm upgrade --install polaris fairwinds-stable/polaris --namespace polaris kubectl port-forward --namespace polaris svc/polaris-dashboard 8080:80 ``` diff --git a/pkg/config/supportedcontrollers.go b/pkg/config/supportedcontrollers.go index 1bca5c86..2562859b 100644 --- a/pkg/config/supportedcontrollers.go +++ b/pkg/config/supportedcontrollers.go @@ -7,8 +7,11 @@ import ( "strings" appsv1 "k8s.io/api/apps/v1" + appsv1beta1 "k8s.io/api/apps/v1beta1" + appsv1beta2 "k8s.io/api/apps/v1beta2" batchv1 "k8s.io/api/batch/v1" batchv1beta1 "k8s.io/api/batch/v1beta1" + batchv2alpha1 "k8s.io/api/batch/v2alpha1" corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/runtime" ) @@ -103,14 +106,19 @@ func (s SupportedController) ListSupportedAPIVersions() []runtime.Object { case Deployments: supportedVersions = []runtime.Object{ &appsv1.Deployment{}, + &appsv1beta1.Deployment{}, + &appsv1beta2.Deployment{}, } case StatefulSets: supportedVersions = []runtime.Object{ &appsv1.StatefulSet{}, + &appsv1beta1.StatefulSet{}, + &appsv1beta2.StatefulSet{}, } case DaemonSets: supportedVersions = []runtime.Object{ &appsv1.DaemonSet{}, + &appsv1beta2.DaemonSet{}, } case Jobs: supportedVersions = []runtime.Object{ @@ -119,6 +127,7 @@ func (s SupportedController) ListSupportedAPIVersions() []runtime.Object { case CronJobs: supportedVersions = []runtime.Object{ &batchv1beta1.CronJob{}, + &batchv2alpha1.CronJob{}, } case ReplicationControllers: supportedVersions = []runtime.Object{ diff --git a/pkg/kube/resources.go b/pkg/kube/resources.go index 6b867517..67a3be66 100644 --- a/pkg/kube/resources.go +++ b/pkg/kube/resources.go @@ -2,6 +2,7 @@ package kube import ( "bytes" + "encoding/json" "io/ioutil" "os" "path/filepath" @@ -77,7 +78,7 @@ func CreateResourceProviderFromPath(directory string) (*ResourceProvider, error) } contents, err := ioutil.ReadFile(path) if err != nil { - logrus.Errorf("Error reading file %v", path) + logrus.Errorf("Error reading file: %v", path) return err } specs := regexp.MustCompile("\n-+\n").Split(string(contents), -1) @@ -105,12 +106,12 @@ func CreateResourceProviderFromPath(directory string) (*ResourceProvider, error) func CreateResourceProviderFromCluster() (*ResourceProvider, error) { kubeConf, configError := config.GetConfig() if configError != nil { - logrus.Errorf("Error fetching KubeConfig %v", configError) + logrus.Errorf("Error fetching KubeConfig: %v", configError) return nil, configError } api, err := kubernetes.NewForConfig(kubeConf) if err != nil { - logrus.Errorf("Error creating Kubernetes client %v", err) + logrus.Errorf("Error creating Kubernetes client: %v", err) return nil, err } return CreateResourceProviderFromAPI(api, kubeConf.Host) @@ -121,52 +122,49 @@ func CreateResourceProviderFromAPI(kube kubernetes.Interface, clusterName string listOpts := metav1.ListOptions{} serverVersion, err := kube.Discovery().ServerVersion() if err != nil { - logrus.Errorf("Error fetching Cluster API version %v", err) + logrus.Errorf("Error fetching Cluster API version: %v", err) return nil, err } - deploys, err := kube.AppsV1().Deployments("").List(listOpts) + deploys, err := getDeployments(kube) if err != nil { - logrus.Errorf("Error fetching Deployments %v", err) return nil, err } - statefulSets, err := kube.AppsV1().StatefulSets("").List(listOpts) + statefulSets, err := getStatefulSets(kube) if err != nil { - logrus.Errorf("Error fetching StatefulSets%v", err) return nil, err } - daemonSets, err := kube.AppsV1().DaemonSets("").List(listOpts) + cronJobs, err := getCronJobs(kube) if err != nil { - logrus.Errorf("Error fetching DaemonSets %v", err) return nil, err } + daemonSets, err := getDaemonSets(kube) + if err != nil { + return nil, err + } + jobs, err := kube.BatchV1().Jobs("").List(listOpts) if err != nil { - logrus.Errorf("Error fetching Jobs %v", err) - return nil, err - } - cronJobs, err := kube.BatchV1beta1().CronJobs("").List(listOpts) - if err != nil { - logrus.Errorf("Error fetching CronJobs %v", err) + logrus.Errorf("Error fetching Jobs: %v", err) return nil, err } replicationControllers, err := kube.CoreV1().ReplicationControllers("").List(listOpts) if err != nil { - logrus.Errorf("Error fetching ReplicationControllers %v", err) + logrus.Errorf("Error fetching ReplicationControllers: %v", err) return nil, err } nodes, err := kube.CoreV1().Nodes().List(listOpts) if err != nil { - logrus.Errorf("Error fetching Nodes %v", err) + logrus.Errorf("Error fetching Nodes: %v", err) return nil, err } namespaces, err := kube.CoreV1().Namespaces().List(listOpts) if err != nil { - logrus.Errorf("Error fetching Namespaces %v", err) + logrus.Errorf("Error fetching Namespaces: %v", err) return nil, err } pods, err := kube.CoreV1().Pods("").List(listOpts) if err != nil { - logrus.Errorf("Error fetching Pods %v", err) + logrus.Errorf("Error fetching Pods: %v", err) return nil, err } @@ -175,11 +173,11 @@ func CreateResourceProviderFromAPI(kube kubernetes.Interface, clusterName string SourceType: "Cluster", SourceName: clusterName, CreationTime: time.Now(), - Deployments: deploys.Items, - StatefulSets: statefulSets.Items, - DaemonSets: daemonSets.Items, + Deployments: deploys, + StatefulSets: statefulSets, + DaemonSets: daemonSets, + CronJobs: cronJobs, Jobs: jobs.Items, - CronJobs: cronJobs.Items, ReplicationControllers: replicationControllers.Items, Nodes: nodes.Items, Namespaces: namespaces.Items, @@ -237,3 +235,155 @@ func addResourceFromString(contents string, resources *ResourceProvider) error { } return nil } + +func getDeployments(kube kubernetes.Interface) ([]appsv1.Deployment, error) { + listOpts := metav1.ListOptions{} + deployList, err := kube.AppsV1().Deployments("").List(listOpts) + if err != nil { + logrus.Errorf("Error fetching Deployments: %v", err) + return nil, err + } + deploys := deployList.Items + + oldDeploys := make([]interface{}, 0) + deploysV1B1, err := kube.AppsV1beta1().Deployments("").List(listOpts) + if err != nil { + logrus.Errorf("Error fetching Deployments v1beta1: %v", err) + return nil, err + } + for _, oldDeploy := range deploysV1B1.Items { + oldDeploys = append(oldDeploys, oldDeploy) + } + deploysV1B2, err := kube.AppsV1beta2().Deployments("").List(listOpts) + if err != nil { + logrus.Errorf("Error fetching Deployments v1beta2: %v", err) + return nil, err + } + for _, oldDeploy := range deploysV1B2.Items { + oldDeploys = append(oldDeploys, oldDeploy) + } + + for _, oldDeploy := range oldDeploys { + str, err := json.Marshal(oldDeploy) + if err != nil { + logrus.Errorf("Error marshaling old deployment version: %v", err) + return nil, err + } + deploy := appsv1.Deployment{} + err = json.Unmarshal(str, &deploy) + if err != nil { + logrus.Errorf("Error unmarshaling old deployment version: %v", err) + return nil, err + } + deploys = append(deploys, deploy) + } + return deploys, nil +} + +func getStatefulSets(kube kubernetes.Interface) ([]appsv1.StatefulSet, error) { + listOpts := metav1.ListOptions{} + controllerList, err := kube.AppsV1().StatefulSets("").List(listOpts) + if err != nil { + logrus.Errorf("Error fetching StatefulSets: %v", err) + return nil, err + } + controllers := controllerList.Items + + oldControllers := make([]interface{}, 0) + controllersV1B1, err := kube.AppsV1beta1().StatefulSets("").List(listOpts) + if err != nil { + logrus.Errorf("Error fetching StatefulSets v1beta1: %v", err) + return nil, err + } + for _, oldController := range controllersV1B1.Items { + oldControllers = append(oldControllers, oldController) + } + controllersV1B2, err := kube.AppsV1beta2().StatefulSets("").List(listOpts) + if err != nil { + logrus.Errorf("Error fetching StatefulSets v1beta2: %v", err) + return nil, err + } + for _, oldController := range controllersV1B2.Items { + oldControllers = append(oldControllers, oldController) + } + + for _, oldController := range oldControllers { + str, err := json.Marshal(oldController) + if err != nil { + logrus.Errorf("Error marshaling old StatefulSet version: %v", err) + return nil, err + } + controller := appsv1.StatefulSet{} + err = json.Unmarshal(str, &controller) + if err != nil { + logrus.Errorf("Error unmarshaling old StatefulSet version: %v", err) + return nil, err + } + controllers = append(controllers, controller) + } + return controllers, nil +} + +func getDaemonSets(kube kubernetes.Interface) ([]appsv1.DaemonSet, error) { + listOpts := metav1.ListOptions{} + controllerList, err := kube.AppsV1().DaemonSets("").List(listOpts) + if err != nil { + logrus.Errorf("Error fetching DaemonSets: %v", err) + return nil, err + } + controllers := controllerList.Items + + controllersV1B2, err := kube.AppsV1beta2().DaemonSets("").List(listOpts) + if err != nil { + logrus.Errorf("Error fetching DaemonSets v1beta2: %v", err) + return nil, err + } + + for _, oldController := range controllersV1B2.Items { + str, err := json.Marshal(oldController) + if err != nil { + logrus.Errorf("Error marshaling old DaemonSet version: %v", err) + return nil, err + } + controller := appsv1.DaemonSet{} + err = json.Unmarshal(str, &controller) + if err != nil { + logrus.Errorf("Error unmarshaling old DaemonSet version: %v", err) + return nil, err + } + controllers = append(controllers, controller) + } + return controllers, nil +} + +func getCronJobs(kube kubernetes.Interface) ([]batchv1beta1.CronJob, error) { + listOpts := metav1.ListOptions{} + controllerList, err := kube.BatchV1beta1().CronJobs("").List(listOpts) + if err != nil { + logrus.Errorf("Error fetching CronJobs: %v", err) + return nil, err + } + controllers := controllerList.Items + + controllersV2A1, err := kube.BatchV2alpha1().CronJobs("").List(listOpts) + if err != nil { + logrus.Errorf("Error fetching CronJobs v2alpha1: %v", err) + return nil, err + } + + for _, oldController := range controllersV2A1.Items { + str, err := json.Marshal(oldController) + if err != nil { + logrus.Errorf("Error marshaling old CronJob version: %v", err) + return nil, err + } + controller := batchv1beta1.CronJob{} + err = json.Unmarshal(str, &controller) + if err != nil { + logrus.Errorf("Error unmarshaling old CronJob version: %v", err) + return nil, err + } + controllers = append(controllers, controller) + } + return controllers, nil +} diff --git a/pkg/validator/fullaudit_test.go b/pkg/validator/fullaudit_test.go index b51bf595..c5cdd7f8 100644 --- a/pkg/validator/fullaudit_test.go +++ b/pkg/validator/fullaudit_test.go @@ -12,6 +12,7 @@ import ( func TestGetTemplateData(t *testing.T) { k8s := test.SetupTestAPI() k8s = test.SetupAddControllers(k8s, "test") + k8s = test.SetupAddExtraControllerVersions(k8s, "test-extra") resources, err := kube.CreateResourceProviderFromAPI(k8s, "test") assert.Equal(t, err, nil, "error should be nil") @@ -32,8 +33,8 @@ func TestGetTemplateData(t *testing.T) { sum := CountSummary{ Successes: uint(0), - Warnings: uint(4), - Errors: uint(4), + Warnings: uint(9), + Errors: uint(9), } actualAudit, err := RunAudit(c, resources) @@ -43,29 +44,27 @@ func TestGetTemplateData(t *testing.T) { assert.Equal(t, actualAudit.SourceType, "Cluster", "should be from a cluster") assert.Equal(t, actualAudit.SourceName, "test", "should be from a cluster") - assert.Equal(t, 6, len(actualAudit.Results)) + expected := []struct { + kind string + results int + }{ + {kind: "Deployment", results: 2}, + {kind: "Deployment", results: 2}, + {kind: "Deployment", results: 2}, + {kind: "StatefulSet", results: 2}, + {kind: "StatefulSet", results: 2}, + {kind: "StatefulSet", results: 2}, + {kind: "DaemonSet", results: 2}, + {kind: "DaemonSet", results: 2}, + {kind: "Job", results: 0}, + {kind: "CronJob", results: 0}, + {kind: "ReplicationController", results: 2}, + } - assert.Equal(t, "Deployment", actualAudit.Results[0].Kind) - assert.Equal(t, 1, len(actualAudit.Results[0].PodResult.ContainerResults)) - assert.Equal(t, 2, len(actualAudit.Results[0].PodResult.ContainerResults[0].Results)) - - assert.Equal(t, "StatefulSet", actualAudit.Results[1].Kind) - assert.Equal(t, 1, len(actualAudit.Results[1].PodResult.ContainerResults)) - assert.Equal(t, 2, len(actualAudit.Results[1].PodResult.ContainerResults[0].Results)) - - assert.Equal(t, "DaemonSet", actualAudit.Results[2].Kind) - assert.Equal(t, 1, len(actualAudit.Results[2].PodResult.ContainerResults)) - assert.Equal(t, 2, len(actualAudit.Results[2].PodResult.ContainerResults[0].Results)) - - assert.Equal(t, "Job", actualAudit.Results[3].Kind) - assert.Equal(t, 1, len(actualAudit.Results[3].PodResult.ContainerResults)) - assert.Equal(t, 0, len(actualAudit.Results[3].PodResult.ContainerResults[0].Results)) - - assert.Equal(t, "CronJob", actualAudit.Results[4].Kind) - assert.Equal(t, 1, len(actualAudit.Results[4].PodResult.ContainerResults)) - assert.Equal(t, 0, len(actualAudit.Results[4].PodResult.ContainerResults[0].Results)) - - assert.Equal(t, "ReplicationController", actualAudit.Results[5].Kind) - assert.Equal(t, 1, len(actualAudit.Results[5].PodResult.ContainerResults)) - assert.Equal(t, 2, len(actualAudit.Results[5].PodResult.ContainerResults[0].Results)) + assert.Equal(t, len(expected), len(actualAudit.Results)) + for idx, result := range actualAudit.Results { + assert.Equal(t, expected[idx].kind, result.Kind) + assert.Equal(t, 1, len(result.PodResult.ContainerResults)) + assert.Equal(t, expected[idx].results, len(result.PodResult.ContainerResults[0].Results)) + } } diff --git a/pkg/webhook/validator.go b/pkg/webhook/validator.go index ba65d34f..f93064a3 100644 --- a/pkg/webhook/validator.go +++ b/pkg/webhook/validator.go @@ -18,7 +18,6 @@ import ( "context" "fmt" "net/http" - "os" "github.com/fairwindsops/polaris/pkg/config" validator "github.com/fairwindsops/polaris/pkg/validator" @@ -80,10 +79,9 @@ func NewWebhook(name string, mgr manager.Manager, validator Validator, apiType r Build() if err != nil { logrus.Errorf("Error building webhook: %v", err) - os.Exit(1) - } else { - logrus.Info(name + " webhook started") + return nil } + logrus.Info(name + " webhook started") return webhook } diff --git a/test/fixtures.go b/test/fixtures.go index 4b25f91d..303b0613 100644 --- a/test/fixtures.go +++ b/test/fixtures.go @@ -1,9 +1,9 @@ package test import ( - "fmt" - appsv1 "k8s.io/api/apps/v1" + appsv1beta1 "k8s.io/api/apps/v1beta1" + appsv1beta2 "k8s.io/api/apps/v1beta2" batchv1 "k8s.io/api/batch/v1" batchv1beta1 "k8s.io/api/batch/v1beta1" corev1 "k8s.io/api/core/v1" @@ -104,33 +104,84 @@ func SetupTestAPI() kubernetes.Interface { func SetupAddControllers(k kubernetes.Interface, namespace string) kubernetes.Interface { d1 := MockDeploy() if _, err := k.AppsV1().Deployments(namespace).Create(&d1); err != nil { - fmt.Println(err) + panic(err) } s1 := MockStatefulSet() if _, err := k.AppsV1().StatefulSets(namespace).Create(&s1); err != nil { - fmt.Println(err) + panic(err) } ds1 := MockDaemonSet() if _, err := k.AppsV1().DaemonSets(namespace).Create(&ds1); err != nil { - fmt.Println(err) + panic(err) } j1 := MockJob() if _, err := k.BatchV1().Jobs(namespace).Create(&j1); err != nil { - fmt.Println(err) + panic(err) } cj1 := MockCronJob() if _, err := k.BatchV1beta1().CronJobs(namespace).Create(&cj1); err != nil { - fmt.Println(err) + panic(err) } rc1 := MockReplicationController() if _, err := k.CoreV1().ReplicationControllers(namespace).Create(&rc1); err != nil { - fmt.Println(err) + panic(err) } return k } + +// SetupAddExtraControllerVersions creates mock controllers and adds them to the test clientset. +func SetupAddExtraControllerVersions(k kubernetes.Interface, namespace string) kubernetes.Interface { + p := MockPod() + + dv1b1 := appsv1beta1.Deployment{ + Spec: appsv1beta1.DeploymentSpec{ + Template: p, + }, + } + if _, err := k.AppsV1beta1().Deployments(namespace).Create(&dv1b1); err != nil { + panic(err) + } + + dv1b2 := appsv1beta2.Deployment{ + Spec: appsv1beta2.DeploymentSpec{ + Template: p, + }, + } + if _, err := k.AppsV1beta2().Deployments(namespace).Create(&dv1b2); err != nil { + panic(err) + } + + ssv1b1 := appsv1beta1.StatefulSet{ + Spec: appsv1beta1.StatefulSetSpec{ + Template: p, + }, + } + if _, err := k.AppsV1beta1().StatefulSets(namespace).Create(&ssv1b1); err != nil { + panic(err) + } + + ssv1b2 := appsv1beta2.StatefulSet{ + Spec: appsv1beta2.StatefulSetSpec{ + Template: p, + }, + } + if _, err := k.AppsV1beta2().StatefulSets(namespace).Create(&ssv1b2); err != nil { + panic(err) + } + + dsv1b2 := appsv1beta2.DaemonSet{ + Spec: appsv1beta2.DaemonSetSpec{ + Template: p, + }, + } + if _, err := k.AppsV1beta2().DaemonSets(namespace).Create(&dsv1b2); err != nil { + panic(err) + } + return k +} diff --git a/test/kube_dashboard_test.sh b/test/kube_dashboard_test.sh index 0da43b89..8366088b 100755 --- a/test/kube_dashboard_test.sh +++ b/test/kube_dashboard_test.sh @@ -1,4 +1,6 @@ sed -ri "s|'(quay.io/fairwinds/polaris:).+'|'\1${CIRCLE_SHA1}'|" ./deploy/dashboard.yaml +# TODO: remove this after 1.0 is released +sed -i "s/--dashboard/dashboard/" ./deploy/dashboard.yaml function check_dashboard_is_ready() { diff --git a/test/webhook_cases/failing_test.daemonset.v1beta2.yaml b/test/webhook_cases/failing_test.daemonset.v1beta2.yaml new file mode 100644 index 00000000..83f2a997 --- /dev/null +++ b/test/webhook_cases/failing_test.daemonset.v1beta2.yaml @@ -0,0 +1,48 @@ +apiVersion: apps/v1beta2 +kind: DaemonSet +metadata: + name: fluentd-elasticsearch + namespace: kube-system + labels: + k8s-app: fluentd-logging +spec: + selector: + matchLabels: + name: fluentd-elasticsearch + template: + metadata: + labels: + name: fluentd-elasticsearch + spec: + tolerations: + - key: node-role.kubernetes.io/master + effect: + containers: + - name: fluentd-elasticsearch + image: gcr.io/fluentd-elasticsearch/fluentd:v2.5.1 + resources: + requests: + cpu: 100m + volumeMounts: + - name: varlog + mountPath: /var/log + - name: varlibdockercontainers + mountPath: /var/lib/docker/containers + readOnly: true + securityContext: + allowPrivilegeEscalation: true + privileged: false + readOnlyRootFilesystem: true + runAsNonRoot: true + capabilities: + drop: + - ALL + terminationGracePeriodSeconds: 30 + volumes: + - name: varlog + hostPath: + path: /var/log + - name: varlibdockercontainers + hostPath: + path: /var/lib/docker/containers + diff --git a/test/webhook_cases/failing_test.deployment.v1beta1.yaml b/test/webhook_cases/failing_test.deployment.v1beta1.yaml new file mode 100644 index 00000000..0cab0a54 --- /dev/null +++ b/test/webhook_cases/failing_test.deployment.v1beta1.yaml @@ -0,0 +1,29 @@ +apiVersion: apps/v1beta1 +kind: Deployment +metadata: + name: nginx-deployment + labels: + app: nginx +spec: + replicas: 3 + selector: + matchLabels: + app: nginx + template: + metadata: + labels: + app: nginx + spec: + containers: + - name: nginx + image: nginx:1.7.9 + ports: + - containerPort: 80 + securityContext: + allowPrivilegeEscalation: true + privileged: false + readOnlyRootFilesystem: true + runAsNonRoot: true + capabilities: + drop: + - ALL diff --git a/test/webhook_cases/failing_test.deployment.v1beta2.yaml b/test/webhook_cases/failing_test.deployment.v1beta2.yaml new file mode 100644 index 00000000..8f7d6c3b --- /dev/null +++ b/test/webhook_cases/failing_test.deployment.v1beta2.yaml @@ -0,0 +1,29 @@ +apiVersion: apps/v1beta2 +kind: Deployment +metadata: + name: nginx-deployment + labels: + app: nginx +spec: + replicas: 3 + selector: + matchLabels: + app: nginx + template: + metadata: + labels: + app: nginx + spec: + containers: + - name: nginx + image: nginx:1.7.9 + ports: + - containerPort: 80 + securityContext: + allowPrivilegeEscalation: true + privileged: false + readOnlyRootFilesystem: true + runAsNonRoot: true + capabilities: + drop: + - ALL diff --git a/test/webhook_cases/failing_test.statefulset.v1beta1.yaml b/test/webhook_cases/failing_test.statefulset.v1beta1.yaml new file mode 100644 index 00000000..6579e03f --- /dev/null +++ b/test/webhook_cases/failing_test.statefulset.v1beta1.yaml @@ -0,0 +1,56 @@ +apiVersion: v1 +kind: Service +metadata: + name: nginx + labels: + app: nginx +spec: + ports: + - port: 80 + name: web + clusterIP: None + selector: + app: nginx +--- +apiVersion: apps/v1beta1 +kind: StatefulSet +metadata: + name: web +spec: + selector: + matchLabels: + app: nginx # has to match .spec.template.metadata.labels + serviceName: "nginx" + replicas: 3 # by default is 1 + template: + metadata: + labels: + app: nginx # has to match .spec.selector.matchLabels + spec: + terminationGracePeriodSeconds: 10 + containers: + - name: nginx + image: k8s.gcr.io/nginx-slim:0.8 + ports: + - containerPort: 80 + name: web + volumeMounts: + - name: www + mountPath: /usr/share/nginx/html + securityContext: + allowPrivilegeEscalation: true + privileged: false + readOnlyRootFilesystem: true + runAsNonRoot: true + capabilities: + drop: + - ALL + volumeClaimTemplates: + - metadata: + name: www + spec: + accessModes: [ "ReadWriteOnce" ] + storageClassName: "my-storage-class" + resources: + requests: + storage: 1Gi diff --git a/test/webhook_cases/failing_test.statefulset.v1beta2.yaml b/test/webhook_cases/failing_test.statefulset.v1beta2.yaml new file mode 100644 index 00000000..eb697926 --- /dev/null +++ b/test/webhook_cases/failing_test.statefulset.v1beta2.yaml @@ -0,0 +1,56 @@ +apiVersion: v1 +kind: Service +metadata: + name: nginx + labels: + app: nginx +spec: + ports: + - port: 80 + name: web + clusterIP: None + selector: + app: nginx +--- +apiVersion: apps/v1beta2 +kind: StatefulSet +metadata: + name: web +spec: + selector: + matchLabels: + app: nginx # has to match .spec.template.metadata.labels + serviceName: "nginx" + replicas: 3 # by default is 1 + template: + metadata: + labels: + app: nginx # has to match .spec.selector.matchLabels + spec: + terminationGracePeriodSeconds: 10 + containers: + - name: nginx + image: k8s.gcr.io/nginx-slim:0.8 + ports: + - containerPort: 80 + name: web + volumeMounts: + - name: www + mountPath: /usr/share/nginx/html + securityContext: + allowPrivilegeEscalation: true + privileged: false + readOnlyRootFilesystem: true + runAsNonRoot: true + capabilities: + drop: + - ALL + volumeClaimTemplates: + - metadata: + name: www + spec: + accessModes: [ "ReadWriteOnce" ] + storageClassName: "my-storage-class" + resources: + requests: + storage: 1Gi diff --git a/test/webhook_cases/passing_test.daemonset.v1beta2.yaml b/test/webhook_cases/passing_test.daemonset.v1beta2.yaml new file mode 100644 index 00000000..c6d4b380 --- /dev/null +++ b/test/webhook_cases/passing_test.daemonset.v1beta2.yaml @@ -0,0 +1,48 @@ +apiVersion: apps/v1beta2 +kind: DaemonSet +metadata: + name: fluentd-elasticsearch + namespace: kube-system + labels: + k8s-app: fluentd-logging +spec: + selector: + matchLabels: + name: fluentd-elasticsearch + template: + metadata: + labels: + name: fluentd-elasticsearch + spec: + tolerations: + - key: node-role.kubernetes.io/master + effect: + containers: + - name: fluentd-elasticsearch + image: gcr.io/fluentd-elasticsearch/fluentd:v2.5.1 + resources: + requests: + cpu: 100m + volumeMounts: + - name: varlog + mountPath: /var/log + - name: varlibdockercontainers + mountPath: /var/lib/docker/containers + readOnly: true + securityContext: + allowPrivilegeEscalation: false + privileged: false + readOnlyRootFilesystem: true + runAsNonRoot: true + capabilities: + drop: + - ALL + terminationGracePeriodSeconds: 30 + volumes: + - name: varlog + hostPath: + path: /var/log + - name: varlibdockercontainers + hostPath: + path: /var/lib/docker/containers + diff --git a/test/webhook_cases/passing_test.deployment.v1beta1.yaml b/test/webhook_cases/passing_test.deployment.v1beta1.yaml new file mode 100644 index 00000000..48f1cd86 --- /dev/null +++ b/test/webhook_cases/passing_test.deployment.v1beta1.yaml @@ -0,0 +1,29 @@ +apiVersion: apps/v1beta1 +kind: Deployment +metadata: + name: nginx-deployment + labels: + app: nginx +spec: + replicas: 3 + selector: + matchLabels: + app: nginx + template: + metadata: + labels: + app: nginx + spec: + containers: + - name: nginx + image: nginx:1.7.9 + ports: + - containerPort: 80 + securityContext: + allowPrivilegeEscalation: false + privileged: false + readOnlyRootFilesystem: true + runAsNonRoot: true + capabilities: + drop: + - ALL diff --git a/test/webhook_cases/passing_test.deployment.v1beta2.yaml b/test/webhook_cases/passing_test.deployment.v1beta2.yaml new file mode 100644 index 00000000..55cfd08f --- /dev/null +++ b/test/webhook_cases/passing_test.deployment.v1beta2.yaml @@ -0,0 +1,29 @@ +apiVersion: apps/v1beta2 +kind: Deployment +metadata: + name: nginx-deployment + labels: + app: nginx +spec: + replicas: 3 + selector: + matchLabels: + app: nginx + template: + metadata: + labels: + app: nginx + spec: + containers: + - name: nginx + image: nginx:1.7.9 + ports: + - containerPort: 80 + securityContext: + allowPrivilegeEscalation: false + privileged: false + readOnlyRootFilesystem: true + runAsNonRoot: true + capabilities: + drop: + - ALL diff --git a/test/webhook_cases/passing_test.statefulset.v1beta1.yaml b/test/webhook_cases/passing_test.statefulset.v1beta1.yaml new file mode 100644 index 00000000..4ae94853 --- /dev/null +++ b/test/webhook_cases/passing_test.statefulset.v1beta1.yaml @@ -0,0 +1,56 @@ +apiVersion: v1 +kind: Service +metadata: + name: nginx + labels: + app: nginx +spec: + ports: + - port: 80 + name: web + clusterIP: None + selector: + app: nginx +--- +apiVersion: apps/v1beta1 +kind: StatefulSet +metadata: + name: web +spec: + selector: + matchLabels: + app: nginx # has to match .spec.template.metadata.labels + serviceName: "nginx" + replicas: 3 # by default is 1 + template: + metadata: + labels: + app: nginx # has to match .spec.selector.matchLabels + spec: + terminationGracePeriodSeconds: 10 + containers: + - name: nginx + image: k8s.gcr.io/nginx-slim:0.8 + ports: + - containerPort: 80 + name: web + volumeMounts: + - name: www + mountPath: /usr/share/nginx/html + securityContext: + allowPrivilegeEscalation: false + privileged: false + readOnlyRootFilesystem: true + runAsNonRoot: true + capabilities: + drop: + - ALL + volumeClaimTemplates: + - metadata: + name: www + spec: + accessModes: [ "ReadWriteOnce" ] + storageClassName: "my-storage-class" + resources: + requests: + storage: 1Gi diff --git a/test/webhook_cases/passing_test.statefulset.v1beta2.yaml b/test/webhook_cases/passing_test.statefulset.v1beta2.yaml new file mode 100644 index 00000000..604a5dc0 --- /dev/null +++ b/test/webhook_cases/passing_test.statefulset.v1beta2.yaml @@ -0,0 +1,56 @@ +apiVersion: v1 +kind: Service +metadata: + name: nginx + labels: + app: nginx +spec: + ports: + - port: 80 + name: web + clusterIP: None + selector: + app: nginx +--- +apiVersion: apps/v1beta2 +kind: StatefulSet +metadata: + name: web +spec: + selector: + matchLabels: + app: nginx # has to match .spec.template.metadata.labels + serviceName: "nginx" + replicas: 3 # by default is 1 + template: + metadata: + labels: + app: nginx # has to match .spec.selector.matchLabels + spec: + terminationGracePeriodSeconds: 10 + containers: + - name: nginx + image: k8s.gcr.io/nginx-slim:0.8 + ports: + - containerPort: 80 + name: web + volumeMounts: + - name: www + mountPath: /usr/share/nginx/html + securityContext: + allowPrivilegeEscalation: false + privileged: false + readOnlyRootFilesystem: true + runAsNonRoot: true + capabilities: + drop: + - ALL + volumeClaimTemplates: + - metadata: + name: www + spec: + accessModes: [ "ReadWriteOnce" ] + storageClassName: "my-storage-class" + resources: + requests: + storage: 1Gi diff --git a/test/webhook_test.sh b/test/webhook_test.sh index 9909ad32..276c3d64 100755 --- a/test/webhook_test.sh +++ b/test/webhook_test.sh @@ -3,6 +3,8 @@ set -e #sed is replacing the polaris version with this commit sha so we are testing exactly this verison. sed -ri "s|'(quay.io/fairwinds/polaris:).+'|'\1${CIRCLE_SHA1}'|" ./deploy/webhook.yaml +# TODO: remove this after 1.0 is released +sed -i "s/--webhook/webhook/" ./deploy/webhook.yaml # Testing to ensure that the webhook starts up, allows a correct deployment to pass, # and prevents a incorrectly formatted deployment.