Merge branch 'master' into bb/openshift

This commit is contained in:
baderbuddy
2020-03-16 13:07:20 -04:00
committed by GitHub
22 changed files with 718 additions and 69 deletions

View File

@@ -23,7 +23,7 @@ Polaris can be run in three different modes:
* As a [validating webhook](#webhook), so you can automatically reject workloads that don't adhere to your organization's policies.
* As a [command-line tool](#cli), so you can test local YAML files, e.g. as part of a CI/CD process.
**Want to learn more?** Fairwinds holds [office hours on Zoom](https://zoom.us/j/242508205) the first Friday of every month, at 12pm Eastern. You can also reach out via email at `opensource@fairwinds.com`
**Want to learn more?** Reach out on [the Slack channel](https://fairwindscommunity.slack.com/messages/polaris), send an email to `opensource@fairwinds.com`, or join us for [office hours on Zoom](https://fairwindscommunity.slack.com/messages/office-hours)
# Dashboard Quickstart
@@ -49,10 +49,10 @@ The Polaris dashboard is a way to get a simple visual overview of the current st
Our default standards in Polaris are rather high, so dont be surprised if your score is lower than you might expect. A key goal for Polaris was to set a high standard and aim for great configuration by default. If the defaults weve included are too strict, its easy to adjust the configuration as part of the deployment configuration to better suit your workloads.
## Webhook
## Admission Controller: Validating Webhook
> [View installation instructions](docs/usage.md#webhook)
Polaris includes an optional validating webhook. This accepts the same configuration as the dashboard, and can run the same validations. This webhook will reject any workloads that trigger a validation error. This is indicative of the greater goal of Polaris, not just to encourage better configuration through dashboard visibility, but to actually enforce it with this webhook.
Polaris can be run as an admission controller that acts as a validating webhook. This accepts the same configuration as the dashboard, and can run the same validations. This webhook will reject any workloads that trigger a validation error. This is indicative of the greater goal of Polaris, not just to encourage better configuration through dashboard visibility, but to actually enforce it with this webhook. Polaris will not fix your workloads, only block them.
Unfortunately we have not found a way to display warnings as part of `kubectl` output unless we are rejecting a workload altogether. That means that any checks with a severity of `warning` will still pass webhook validation, and the only evidence of that warning will either be in the Polaris dashboard or the Polaris webhook logs.

View File

@@ -106,7 +106,9 @@ var webhookCmd = &cobra.Command{
for innerIndex, supportedAPIType := range controllerToScan.ListSupportedAPIVersions() {
webhookName := strings.ToLower(fmt.Sprintf("%s-%d-%d", controllerToScan, index, innerIndex))
hook := fwebhook.NewWebhook(webhookName, mgr, fwebhook.Validator{Config: config}, supportedAPIType)
webhooks = append(webhooks, hook)
if hook != nil {
webhooks = append(webhooks, hook)
}
}
}

View File

@@ -251,7 +251,7 @@ spec:
containers:
- command:
- polaris
- dashboard
- --dashboard
- --config
- /opt/app/config.yaml
image: 'quay.io/fairwinds/polaris:0.6'

View File

@@ -311,7 +311,7 @@ spec:
- name: webhook
command:
- polaris
- webhook
- --webhook
- --config
- /opt/app/config.yaml
image: 'quay.io/fairwinds/polaris:0.6'

View File

@@ -95,7 +95,7 @@ kubectl port-forward --namespace polaris svc/polaris-dashboard 8080:80
```
### Helm
```bash
helm repo add fairwinds-stable https://charts.fairwindsops.com/stable
helm repo add fairwinds-stable https://charts.fairwinds.com/stable
helm upgrade --install polaris fairwinds-stable/polaris --namespace polaris
kubectl port-forward --namespace polaris svc/polaris-dashboard 8080:80
```

View File

@@ -7,8 +7,11 @@ import (
"strings"
appsv1 "k8s.io/api/apps/v1"
appsv1beta1 "k8s.io/api/apps/v1beta1"
appsv1beta2 "k8s.io/api/apps/v1beta2"
batchv1 "k8s.io/api/batch/v1"
batchv1beta1 "k8s.io/api/batch/v1beta1"
batchv2alpha1 "k8s.io/api/batch/v2alpha1"
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/runtime"
)
@@ -103,14 +106,19 @@ func (s SupportedController) ListSupportedAPIVersions() []runtime.Object {
case Deployments:
supportedVersions = []runtime.Object{
&appsv1.Deployment{},
&appsv1beta1.Deployment{},
&appsv1beta2.Deployment{},
}
case StatefulSets:
supportedVersions = []runtime.Object{
&appsv1.StatefulSet{},
&appsv1beta1.StatefulSet{},
&appsv1beta2.StatefulSet{},
}
case DaemonSets:
supportedVersions = []runtime.Object{
&appsv1.DaemonSet{},
&appsv1beta2.DaemonSet{},
}
case Jobs:
supportedVersions = []runtime.Object{
@@ -119,6 +127,7 @@ func (s SupportedController) ListSupportedAPIVersions() []runtime.Object {
case CronJobs:
supportedVersions = []runtime.Object{
&batchv1beta1.CronJob{},
&batchv2alpha1.CronJob{},
}
case ReplicationControllers:
supportedVersions = []runtime.Object{

View File

@@ -2,6 +2,7 @@ package kube
import (
"bytes"
"encoding/json"
"io/ioutil"
"os"
"path/filepath"
@@ -77,7 +78,7 @@ func CreateResourceProviderFromPath(directory string) (*ResourceProvider, error)
}
contents, err := ioutil.ReadFile(path)
if err != nil {
logrus.Errorf("Error reading file %v", path)
logrus.Errorf("Error reading file: %v", path)
return err
}
specs := regexp.MustCompile("\n-+\n").Split(string(contents), -1)
@@ -105,12 +106,12 @@ func CreateResourceProviderFromPath(directory string) (*ResourceProvider, error)
func CreateResourceProviderFromCluster() (*ResourceProvider, error) {
kubeConf, configError := config.GetConfig()
if configError != nil {
logrus.Errorf("Error fetching KubeConfig %v", configError)
logrus.Errorf("Error fetching KubeConfig: %v", configError)
return nil, configError
}
api, err := kubernetes.NewForConfig(kubeConf)
if err != nil {
logrus.Errorf("Error creating Kubernetes client %v", err)
logrus.Errorf("Error creating Kubernetes client: %v", err)
return nil, err
}
return CreateResourceProviderFromAPI(api, kubeConf.Host)
@@ -121,52 +122,49 @@ func CreateResourceProviderFromAPI(kube kubernetes.Interface, clusterName string
listOpts := metav1.ListOptions{}
serverVersion, err := kube.Discovery().ServerVersion()
if err != nil {
logrus.Errorf("Error fetching Cluster API version %v", err)
logrus.Errorf("Error fetching Cluster API version: %v", err)
return nil, err
}
deploys, err := kube.AppsV1().Deployments("").List(listOpts)
deploys, err := getDeployments(kube)
if err != nil {
logrus.Errorf("Error fetching Deployments %v", err)
return nil, err
}
statefulSets, err := kube.AppsV1().StatefulSets("").List(listOpts)
statefulSets, err := getStatefulSets(kube)
if err != nil {
logrus.Errorf("Error fetching StatefulSets%v", err)
return nil, err
}
daemonSets, err := kube.AppsV1().DaemonSets("").List(listOpts)
cronJobs, err := getCronJobs(kube)
if err != nil {
logrus.Errorf("Error fetching DaemonSets %v", err)
return nil, err
}
daemonSets, err := getDaemonSets(kube)
if err != nil {
return nil, err
}
jobs, err := kube.BatchV1().Jobs("").List(listOpts)
if err != nil {
logrus.Errorf("Error fetching Jobs %v", err)
return nil, err
}
cronJobs, err := kube.BatchV1beta1().CronJobs("").List(listOpts)
if err != nil {
logrus.Errorf("Error fetching CronJobs %v", err)
logrus.Errorf("Error fetching Jobs: %v", err)
return nil, err
}
replicationControllers, err := kube.CoreV1().ReplicationControllers("").List(listOpts)
if err != nil {
logrus.Errorf("Error fetching ReplicationControllers %v", err)
logrus.Errorf("Error fetching ReplicationControllers: %v", err)
return nil, err
}
nodes, err := kube.CoreV1().Nodes().List(listOpts)
if err != nil {
logrus.Errorf("Error fetching Nodes %v", err)
logrus.Errorf("Error fetching Nodes: %v", err)
return nil, err
}
namespaces, err := kube.CoreV1().Namespaces().List(listOpts)
if err != nil {
logrus.Errorf("Error fetching Namespaces %v", err)
logrus.Errorf("Error fetching Namespaces: %v", err)
return nil, err
}
pods, err := kube.CoreV1().Pods("").List(listOpts)
if err != nil {
logrus.Errorf("Error fetching Pods %v", err)
logrus.Errorf("Error fetching Pods: %v", err)
return nil, err
}
@@ -175,11 +173,11 @@ func CreateResourceProviderFromAPI(kube kubernetes.Interface, clusterName string
SourceType: "Cluster",
SourceName: clusterName,
CreationTime: time.Now(),
Deployments: deploys.Items,
StatefulSets: statefulSets.Items,
DaemonSets: daemonSets.Items,
Deployments: deploys,
StatefulSets: statefulSets,
DaemonSets: daemonSets,
CronJobs: cronJobs,
Jobs: jobs.Items,
CronJobs: cronJobs.Items,
ReplicationControllers: replicationControllers.Items,
Nodes: nodes.Items,
Namespaces: namespaces.Items,
@@ -237,3 +235,155 @@ func addResourceFromString(contents string, resources *ResourceProvider) error {
}
return nil
}
func getDeployments(kube kubernetes.Interface) ([]appsv1.Deployment, error) {
listOpts := metav1.ListOptions{}
deployList, err := kube.AppsV1().Deployments("").List(listOpts)
if err != nil {
logrus.Errorf("Error fetching Deployments: %v", err)
return nil, err
}
deploys := deployList.Items
oldDeploys := make([]interface{}, 0)
deploysV1B1, err := kube.AppsV1beta1().Deployments("").List(listOpts)
if err != nil {
logrus.Errorf("Error fetching Deployments v1beta1: %v", err)
return nil, err
}
for _, oldDeploy := range deploysV1B1.Items {
oldDeploys = append(oldDeploys, oldDeploy)
}
deploysV1B2, err := kube.AppsV1beta2().Deployments("").List(listOpts)
if err != nil {
logrus.Errorf("Error fetching Deployments v1beta2: %v", err)
return nil, err
}
for _, oldDeploy := range deploysV1B2.Items {
oldDeploys = append(oldDeploys, oldDeploy)
}
for _, oldDeploy := range oldDeploys {
str, err := json.Marshal(oldDeploy)
if err != nil {
logrus.Errorf("Error marshaling old deployment version: %v", err)
return nil, err
}
deploy := appsv1.Deployment{}
err = json.Unmarshal(str, &deploy)
if err != nil {
logrus.Errorf("Error unmarshaling old deployment version: %v", err)
return nil, err
}
deploys = append(deploys, deploy)
}
return deploys, nil
}
func getStatefulSets(kube kubernetes.Interface) ([]appsv1.StatefulSet, error) {
listOpts := metav1.ListOptions{}
controllerList, err := kube.AppsV1().StatefulSets("").List(listOpts)
if err != nil {
logrus.Errorf("Error fetching StatefulSets: %v", err)
return nil, err
}
controllers := controllerList.Items
oldControllers := make([]interface{}, 0)
controllersV1B1, err := kube.AppsV1beta1().StatefulSets("").List(listOpts)
if err != nil {
logrus.Errorf("Error fetching StatefulSets v1beta1: %v", err)
return nil, err
}
for _, oldController := range controllersV1B1.Items {
oldControllers = append(oldControllers, oldController)
}
controllersV1B2, err := kube.AppsV1beta2().StatefulSets("").List(listOpts)
if err != nil {
logrus.Errorf("Error fetching StatefulSets v1beta2: %v", err)
return nil, err
}
for _, oldController := range controllersV1B2.Items {
oldControllers = append(oldControllers, oldController)
}
for _, oldController := range oldControllers {
str, err := json.Marshal(oldController)
if err != nil {
logrus.Errorf("Error marshaling old StatefulSet version: %v", err)
return nil, err
}
controller := appsv1.StatefulSet{}
err = json.Unmarshal(str, &controller)
if err != nil {
logrus.Errorf("Error unmarshaling old StatefulSet version: %v", err)
return nil, err
}
controllers = append(controllers, controller)
}
return controllers, nil
}
func getDaemonSets(kube kubernetes.Interface) ([]appsv1.DaemonSet, error) {
listOpts := metav1.ListOptions{}
controllerList, err := kube.AppsV1().DaemonSets("").List(listOpts)
if err != nil {
logrus.Errorf("Error fetching DaemonSets: %v", err)
return nil, err
}
controllers := controllerList.Items
controllersV1B2, err := kube.AppsV1beta2().DaemonSets("").List(listOpts)
if err != nil {
logrus.Errorf("Error fetching DaemonSets v1beta2: %v", err)
return nil, err
}
for _, oldController := range controllersV1B2.Items {
str, err := json.Marshal(oldController)
if err != nil {
logrus.Errorf("Error marshaling old DaemonSet version: %v", err)
return nil, err
}
controller := appsv1.DaemonSet{}
err = json.Unmarshal(str, &controller)
if err != nil {
logrus.Errorf("Error unmarshaling old DaemonSet version: %v", err)
return nil, err
}
controllers = append(controllers, controller)
}
return controllers, nil
}
func getCronJobs(kube kubernetes.Interface) ([]batchv1beta1.CronJob, error) {
listOpts := metav1.ListOptions{}
controllerList, err := kube.BatchV1beta1().CronJobs("").List(listOpts)
if err != nil {
logrus.Errorf("Error fetching CronJobs: %v", err)
return nil, err
}
controllers := controllerList.Items
controllersV2A1, err := kube.BatchV2alpha1().CronJobs("").List(listOpts)
if err != nil {
logrus.Errorf("Error fetching CronJobs v2alpha1: %v", err)
return nil, err
}
for _, oldController := range controllersV2A1.Items {
str, err := json.Marshal(oldController)
if err != nil {
logrus.Errorf("Error marshaling old CronJob version: %v", err)
return nil, err
}
controller := batchv1beta1.CronJob{}
err = json.Unmarshal(str, &controller)
if err != nil {
logrus.Errorf("Error unmarshaling old CronJob version: %v", err)
return nil, err
}
controllers = append(controllers, controller)
}
return controllers, nil
}

View File

@@ -12,6 +12,7 @@ import (
func TestGetTemplateData(t *testing.T) {
k8s := test.SetupTestAPI()
k8s = test.SetupAddControllers(k8s, "test")
k8s = test.SetupAddExtraControllerVersions(k8s, "test-extra")
resources, err := kube.CreateResourceProviderFromAPI(k8s, "test")
assert.Equal(t, err, nil, "error should be nil")
@@ -32,8 +33,8 @@ func TestGetTemplateData(t *testing.T) {
sum := CountSummary{
Successes: uint(0),
Warnings: uint(4),
Errors: uint(4),
Warnings: uint(9),
Errors: uint(9),
}
actualAudit, err := RunAudit(c, resources)
@@ -43,29 +44,27 @@ func TestGetTemplateData(t *testing.T) {
assert.Equal(t, actualAudit.SourceType, "Cluster", "should be from a cluster")
assert.Equal(t, actualAudit.SourceName, "test", "should be from a cluster")
assert.Equal(t, 6, len(actualAudit.Results))
expected := []struct {
kind string
results int
}{
{kind: "Deployment", results: 2},
{kind: "Deployment", results: 2},
{kind: "Deployment", results: 2},
{kind: "StatefulSet", results: 2},
{kind: "StatefulSet", results: 2},
{kind: "StatefulSet", results: 2},
{kind: "DaemonSet", results: 2},
{kind: "DaemonSet", results: 2},
{kind: "Job", results: 0},
{kind: "CronJob", results: 0},
{kind: "ReplicationController", results: 2},
}
assert.Equal(t, "Deployment", actualAudit.Results[0].Kind)
assert.Equal(t, 1, len(actualAudit.Results[0].PodResult.ContainerResults))
assert.Equal(t, 2, len(actualAudit.Results[0].PodResult.ContainerResults[0].Results))
assert.Equal(t, "StatefulSet", actualAudit.Results[1].Kind)
assert.Equal(t, 1, len(actualAudit.Results[1].PodResult.ContainerResults))
assert.Equal(t, 2, len(actualAudit.Results[1].PodResult.ContainerResults[0].Results))
assert.Equal(t, "DaemonSet", actualAudit.Results[2].Kind)
assert.Equal(t, 1, len(actualAudit.Results[2].PodResult.ContainerResults))
assert.Equal(t, 2, len(actualAudit.Results[2].PodResult.ContainerResults[0].Results))
assert.Equal(t, "Job", actualAudit.Results[3].Kind)
assert.Equal(t, 1, len(actualAudit.Results[3].PodResult.ContainerResults))
assert.Equal(t, 0, len(actualAudit.Results[3].PodResult.ContainerResults[0].Results))
assert.Equal(t, "CronJob", actualAudit.Results[4].Kind)
assert.Equal(t, 1, len(actualAudit.Results[4].PodResult.ContainerResults))
assert.Equal(t, 0, len(actualAudit.Results[4].PodResult.ContainerResults[0].Results))
assert.Equal(t, "ReplicationController", actualAudit.Results[5].Kind)
assert.Equal(t, 1, len(actualAudit.Results[5].PodResult.ContainerResults))
assert.Equal(t, 2, len(actualAudit.Results[5].PodResult.ContainerResults[0].Results))
assert.Equal(t, len(expected), len(actualAudit.Results))
for idx, result := range actualAudit.Results {
assert.Equal(t, expected[idx].kind, result.Kind)
assert.Equal(t, 1, len(result.PodResult.ContainerResults))
assert.Equal(t, expected[idx].results, len(result.PodResult.ContainerResults[0].Results))
}
}

View File

@@ -18,7 +18,6 @@ import (
"context"
"fmt"
"net/http"
"os"
"github.com/fairwindsops/polaris/pkg/config"
validator "github.com/fairwindsops/polaris/pkg/validator"
@@ -80,10 +79,9 @@ func NewWebhook(name string, mgr manager.Manager, validator Validator, apiType r
Build()
if err != nil {
logrus.Errorf("Error building webhook: %v", err)
os.Exit(1)
} else {
logrus.Info(name + " webhook started")
return nil
}
logrus.Info(name + " webhook started")
return webhook
}

View File

@@ -1,9 +1,9 @@
package test
import (
"fmt"
appsv1 "k8s.io/api/apps/v1"
appsv1beta1 "k8s.io/api/apps/v1beta1"
appsv1beta2 "k8s.io/api/apps/v1beta2"
batchv1 "k8s.io/api/batch/v1"
batchv1beta1 "k8s.io/api/batch/v1beta1"
corev1 "k8s.io/api/core/v1"
@@ -104,33 +104,84 @@ func SetupTestAPI() kubernetes.Interface {
func SetupAddControllers(k kubernetes.Interface, namespace string) kubernetes.Interface {
d1 := MockDeploy()
if _, err := k.AppsV1().Deployments(namespace).Create(&d1); err != nil {
fmt.Println(err)
panic(err)
}
s1 := MockStatefulSet()
if _, err := k.AppsV1().StatefulSets(namespace).Create(&s1); err != nil {
fmt.Println(err)
panic(err)
}
ds1 := MockDaemonSet()
if _, err := k.AppsV1().DaemonSets(namespace).Create(&ds1); err != nil {
fmt.Println(err)
panic(err)
}
j1 := MockJob()
if _, err := k.BatchV1().Jobs(namespace).Create(&j1); err != nil {
fmt.Println(err)
panic(err)
}
cj1 := MockCronJob()
if _, err := k.BatchV1beta1().CronJobs(namespace).Create(&cj1); err != nil {
fmt.Println(err)
panic(err)
}
rc1 := MockReplicationController()
if _, err := k.CoreV1().ReplicationControllers(namespace).Create(&rc1); err != nil {
fmt.Println(err)
panic(err)
}
return k
}
// SetupAddExtraControllerVersions creates mock controllers and adds them to the test clientset.
func SetupAddExtraControllerVersions(k kubernetes.Interface, namespace string) kubernetes.Interface {
p := MockPod()
dv1b1 := appsv1beta1.Deployment{
Spec: appsv1beta1.DeploymentSpec{
Template: p,
},
}
if _, err := k.AppsV1beta1().Deployments(namespace).Create(&dv1b1); err != nil {
panic(err)
}
dv1b2 := appsv1beta2.Deployment{
Spec: appsv1beta2.DeploymentSpec{
Template: p,
},
}
if _, err := k.AppsV1beta2().Deployments(namespace).Create(&dv1b2); err != nil {
panic(err)
}
ssv1b1 := appsv1beta1.StatefulSet{
Spec: appsv1beta1.StatefulSetSpec{
Template: p,
},
}
if _, err := k.AppsV1beta1().StatefulSets(namespace).Create(&ssv1b1); err != nil {
panic(err)
}
ssv1b2 := appsv1beta2.StatefulSet{
Spec: appsv1beta2.StatefulSetSpec{
Template: p,
},
}
if _, err := k.AppsV1beta2().StatefulSets(namespace).Create(&ssv1b2); err != nil {
panic(err)
}
dsv1b2 := appsv1beta2.DaemonSet{
Spec: appsv1beta2.DaemonSetSpec{
Template: p,
},
}
if _, err := k.AppsV1beta2().DaemonSets(namespace).Create(&dsv1b2); err != nil {
panic(err)
}
return k
}

View File

@@ -1,4 +1,6 @@
sed -ri "s|'(quay.io/fairwinds/polaris:).+'|'\1${CIRCLE_SHA1}'|" ./deploy/dashboard.yaml
# TODO: remove this after 1.0 is released
sed -i "s/--dashboard/dashboard/" ./deploy/dashboard.yaml
function check_dashboard_is_ready() {

View File

@@ -0,0 +1,48 @@
apiVersion: apps/v1beta2
kind: DaemonSet
metadata:
name: fluentd-elasticsearch
namespace: kube-system
labels:
k8s-app: fluentd-logging
spec:
selector:
matchLabels:
name: fluentd-elasticsearch
template:
metadata:
labels:
name: fluentd-elasticsearch
spec:
tolerations:
- key: node-role.kubernetes.io/master
effect:
containers:
- name: fluentd-elasticsearch
image: gcr.io/fluentd-elasticsearch/fluentd:v2.5.1
resources:
requests:
cpu: 100m
volumeMounts:
- name: varlog
mountPath: /var/log
- name: varlibdockercontainers
mountPath: /var/lib/docker/containers
readOnly: true
securityContext:
allowPrivilegeEscalation: true
privileged: false
readOnlyRootFilesystem: true
runAsNonRoot: true
capabilities:
drop:
- ALL
terminationGracePeriodSeconds: 30
volumes:
- name: varlog
hostPath:
path: /var/log
- name: varlibdockercontainers
hostPath:
path: /var/lib/docker/containers

View File

@@ -0,0 +1,29 @@
apiVersion: apps/v1beta1
kind: Deployment
metadata:
name: nginx-deployment
labels:
app: nginx
spec:
replicas: 3
selector:
matchLabels:
app: nginx
template:
metadata:
labels:
app: nginx
spec:
containers:
- name: nginx
image: nginx:1.7.9
ports:
- containerPort: 80
securityContext:
allowPrivilegeEscalation: true
privileged: false
readOnlyRootFilesystem: true
runAsNonRoot: true
capabilities:
drop:
- ALL

View File

@@ -0,0 +1,29 @@
apiVersion: apps/v1beta2
kind: Deployment
metadata:
name: nginx-deployment
labels:
app: nginx
spec:
replicas: 3
selector:
matchLabels:
app: nginx
template:
metadata:
labels:
app: nginx
spec:
containers:
- name: nginx
image: nginx:1.7.9
ports:
- containerPort: 80
securityContext:
allowPrivilegeEscalation: true
privileged: false
readOnlyRootFilesystem: true
runAsNonRoot: true
capabilities:
drop:
- ALL

View File

@@ -0,0 +1,56 @@
apiVersion: v1
kind: Service
metadata:
name: nginx
labels:
app: nginx
spec:
ports:
- port: 80
name: web
clusterIP: None
selector:
app: nginx
---
apiVersion: apps/v1beta1
kind: StatefulSet
metadata:
name: web
spec:
selector:
matchLabels:
app: nginx # has to match .spec.template.metadata.labels
serviceName: "nginx"
replicas: 3 # by default is 1
template:
metadata:
labels:
app: nginx # has to match .spec.selector.matchLabels
spec:
terminationGracePeriodSeconds: 10
containers:
- name: nginx
image: k8s.gcr.io/nginx-slim:0.8
ports:
- containerPort: 80
name: web
volumeMounts:
- name: www
mountPath: /usr/share/nginx/html
securityContext:
allowPrivilegeEscalation: true
privileged: false
readOnlyRootFilesystem: true
runAsNonRoot: true
capabilities:
drop:
- ALL
volumeClaimTemplates:
- metadata:
name: www
spec:
accessModes: [ "ReadWriteOnce" ]
storageClassName: "my-storage-class"
resources:
requests:
storage: 1Gi

View File

@@ -0,0 +1,56 @@
apiVersion: v1
kind: Service
metadata:
name: nginx
labels:
app: nginx
spec:
ports:
- port: 80
name: web
clusterIP: None
selector:
app: nginx
---
apiVersion: apps/v1beta2
kind: StatefulSet
metadata:
name: web
spec:
selector:
matchLabels:
app: nginx # has to match .spec.template.metadata.labels
serviceName: "nginx"
replicas: 3 # by default is 1
template:
metadata:
labels:
app: nginx # has to match .spec.selector.matchLabels
spec:
terminationGracePeriodSeconds: 10
containers:
- name: nginx
image: k8s.gcr.io/nginx-slim:0.8
ports:
- containerPort: 80
name: web
volumeMounts:
- name: www
mountPath: /usr/share/nginx/html
securityContext:
allowPrivilegeEscalation: true
privileged: false
readOnlyRootFilesystem: true
runAsNonRoot: true
capabilities:
drop:
- ALL
volumeClaimTemplates:
- metadata:
name: www
spec:
accessModes: [ "ReadWriteOnce" ]
storageClassName: "my-storage-class"
resources:
requests:
storage: 1Gi

View File

@@ -0,0 +1,48 @@
apiVersion: apps/v1beta2
kind: DaemonSet
metadata:
name: fluentd-elasticsearch
namespace: kube-system
labels:
k8s-app: fluentd-logging
spec:
selector:
matchLabels:
name: fluentd-elasticsearch
template:
metadata:
labels:
name: fluentd-elasticsearch
spec:
tolerations:
- key: node-role.kubernetes.io/master
effect:
containers:
- name: fluentd-elasticsearch
image: gcr.io/fluentd-elasticsearch/fluentd:v2.5.1
resources:
requests:
cpu: 100m
volumeMounts:
- name: varlog
mountPath: /var/log
- name: varlibdockercontainers
mountPath: /var/lib/docker/containers
readOnly: true
securityContext:
allowPrivilegeEscalation: false
privileged: false
readOnlyRootFilesystem: true
runAsNonRoot: true
capabilities:
drop:
- ALL
terminationGracePeriodSeconds: 30
volumes:
- name: varlog
hostPath:
path: /var/log
- name: varlibdockercontainers
hostPath:
path: /var/lib/docker/containers

View File

@@ -0,0 +1,29 @@
apiVersion: apps/v1beta1
kind: Deployment
metadata:
name: nginx-deployment
labels:
app: nginx
spec:
replicas: 3
selector:
matchLabels:
app: nginx
template:
metadata:
labels:
app: nginx
spec:
containers:
- name: nginx
image: nginx:1.7.9
ports:
- containerPort: 80
securityContext:
allowPrivilegeEscalation: false
privileged: false
readOnlyRootFilesystem: true
runAsNonRoot: true
capabilities:
drop:
- ALL

View File

@@ -0,0 +1,29 @@
apiVersion: apps/v1beta2
kind: Deployment
metadata:
name: nginx-deployment
labels:
app: nginx
spec:
replicas: 3
selector:
matchLabels:
app: nginx
template:
metadata:
labels:
app: nginx
spec:
containers:
- name: nginx
image: nginx:1.7.9
ports:
- containerPort: 80
securityContext:
allowPrivilegeEscalation: false
privileged: false
readOnlyRootFilesystem: true
runAsNonRoot: true
capabilities:
drop:
- ALL

View File

@@ -0,0 +1,56 @@
apiVersion: v1
kind: Service
metadata:
name: nginx
labels:
app: nginx
spec:
ports:
- port: 80
name: web
clusterIP: None
selector:
app: nginx
---
apiVersion: apps/v1beta1
kind: StatefulSet
metadata:
name: web
spec:
selector:
matchLabels:
app: nginx # has to match .spec.template.metadata.labels
serviceName: "nginx"
replicas: 3 # by default is 1
template:
metadata:
labels:
app: nginx # has to match .spec.selector.matchLabels
spec:
terminationGracePeriodSeconds: 10
containers:
- name: nginx
image: k8s.gcr.io/nginx-slim:0.8
ports:
- containerPort: 80
name: web
volumeMounts:
- name: www
mountPath: /usr/share/nginx/html
securityContext:
allowPrivilegeEscalation: false
privileged: false
readOnlyRootFilesystem: true
runAsNonRoot: true
capabilities:
drop:
- ALL
volumeClaimTemplates:
- metadata:
name: www
spec:
accessModes: [ "ReadWriteOnce" ]
storageClassName: "my-storage-class"
resources:
requests:
storage: 1Gi

View File

@@ -0,0 +1,56 @@
apiVersion: v1
kind: Service
metadata:
name: nginx
labels:
app: nginx
spec:
ports:
- port: 80
name: web
clusterIP: None
selector:
app: nginx
---
apiVersion: apps/v1beta2
kind: StatefulSet
metadata:
name: web
spec:
selector:
matchLabels:
app: nginx # has to match .spec.template.metadata.labels
serviceName: "nginx"
replicas: 3 # by default is 1
template:
metadata:
labels:
app: nginx # has to match .spec.selector.matchLabels
spec:
terminationGracePeriodSeconds: 10
containers:
- name: nginx
image: k8s.gcr.io/nginx-slim:0.8
ports:
- containerPort: 80
name: web
volumeMounts:
- name: www
mountPath: /usr/share/nginx/html
securityContext:
allowPrivilegeEscalation: false
privileged: false
readOnlyRootFilesystem: true
runAsNonRoot: true
capabilities:
drop:
- ALL
volumeClaimTemplates:
- metadata:
name: www
spec:
accessModes: [ "ReadWriteOnce" ]
storageClassName: "my-storage-class"
resources:
requests:
storage: 1Gi

View File

@@ -3,6 +3,8 @@ set -e
#sed is replacing the polaris version with this commit sha so we are testing exactly this verison.
sed -ri "s|'(quay.io/fairwinds/polaris:).+'|'\1${CIRCLE_SHA1}'|" ./deploy/webhook.yaml
# TODO: remove this after 1.0 is released
sed -i "s/--webhook/webhook/" ./deploy/webhook.yaml
# Testing to ensure that the webhook starts up, allows a correct deployment to pass,
# and prevents a incorrectly formatted deployment.