mirror of
https://github.com/aquasecurity/kube-bench.git
synced 2026-02-20 04:49:50 +00:00
Compare commits
17 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
037bb14729 | ||
|
|
89f8e454ba | ||
|
|
813dc6ef47 | ||
|
|
27d326614f | ||
|
|
4925adbe6b | ||
|
|
efcd63aa38 | ||
|
|
b677c86868 | ||
|
|
48e33d33e5 | ||
|
|
5f34058dc7 | ||
|
|
dc14cb14b0 | ||
|
|
ca749ccb32 | ||
|
|
299ab36a13 | ||
|
|
9fc13ca02e | ||
|
|
13193d75b0 | ||
|
|
62af68f3f5 | ||
|
|
4a07f87e6f | ||
|
|
6e1c39237a |
@@ -1,5 +1,7 @@
|
||||
---
|
||||
env:
|
||||
- GO111MODULE=on
|
||||
- KUBEBENCH_CFG=/etc/kube-bench/cfg
|
||||
builds:
|
||||
- main: main.go
|
||||
binary: kube-bench
|
||||
@@ -7,14 +9,23 @@ builds:
|
||||
- linux
|
||||
goarch:
|
||||
- amd64
|
||||
ldflags:
|
||||
- "-X github.com/aquasecurity/kube-bench/cmd.KubeBenchVersion={{.Version}}"
|
||||
- "-X github.com/aquasecurity/kube-bench/cmd.cfgDir={{.Env.KUBEBENCH_CFG}}"
|
||||
# Archive customization
|
||||
archive:
|
||||
format: tar.gz
|
||||
nfpm:
|
||||
vendor: Aqua Security
|
||||
description: "The Kubernetes Bench for Security is a Go application that checks whether Kubernetes is deployed according to security best practices"
|
||||
license: Apache-2.0
|
||||
homepage: https://github.com/aquasecurity/kube-bench
|
||||
formats:
|
||||
- deb
|
||||
- rpm
|
||||
archives:
|
||||
- id: default
|
||||
format: tar.gz
|
||||
files:
|
||||
- "cfg/**/*"
|
||||
nfpms:
|
||||
-
|
||||
vendor: Aqua Security
|
||||
description: "The Kubernetes Bench for Security is a Go application that checks whether Kubernetes is deployed according to security best practices"
|
||||
license: Apache-2.0
|
||||
homepage: https://github.com/aquasecurity/kube-bench
|
||||
files:
|
||||
"cfg/**/*": "/etc/kube-bench/cfg"
|
||||
formats:
|
||||
- deb
|
||||
- rpm
|
||||
|
||||
@@ -2,7 +2,7 @@
|
||||
language: go
|
||||
|
||||
services:
|
||||
- docker
|
||||
- docker
|
||||
|
||||
notifications:
|
||||
email: false
|
||||
@@ -10,18 +10,20 @@ notifications:
|
||||
before_install:
|
||||
- sudo apt-get -qq update
|
||||
- sudo apt-get install -y rpm
|
||||
- pip install --user yamllint==1.18.0
|
||||
- gem install --no-ri --no-rdoc fpm
|
||||
- go get -t -v ./...
|
||||
|
||||
script:
|
||||
- yamllint -c ./.yamllint.yaml .
|
||||
- GO111MODULE=on go test ./...
|
||||
- IMAGE_NAME=kube-bench make build-docker
|
||||
- docker run -v `pwd`:/host kube-bench install
|
||||
- docker run -v `pwd`:/host kube-bench install
|
||||
- test -d cfg
|
||||
- test -f kube-bench
|
||||
- make tests
|
||||
- make integration-tests
|
||||
|
||||
|
||||
after_success:
|
||||
- bash <(curl -s https://codecov.io/bash)
|
||||
deploy:
|
||||
|
||||
6
.yamllint.yaml
Normal file
6
.yamllint.yaml
Normal file
@@ -0,0 +1,6 @@
|
||||
---
|
||||
extends: default
|
||||
|
||||
rules:
|
||||
line-length: disable
|
||||
truthy: disable
|
||||
@@ -1,4 +1,4 @@
|
||||
FROM golang:1.12 AS build
|
||||
FROM golang:1.13 AS build
|
||||
WORKDIR /go/src/github.com/aquasecurity/kube-bench/
|
||||
COPY go.mod go.sum ./
|
||||
COPY main.go .
|
||||
@@ -7,11 +7,16 @@ COPY cmd/ cmd/
|
||||
ARG KUBEBENCH_VERSION
|
||||
RUN GO111MODULE=on CGO_ENABLED=0 go install -a -ldflags "-X github.com/aquasecurity/kube-bench/cmd.KubeBenchVersion=${KUBEBENCH_VERSION} -w"
|
||||
|
||||
FROM alpine:3.10 AS run
|
||||
FROM alpine:3.11 AS run
|
||||
WORKDIR /opt/kube-bench/
|
||||
# add GNU ps for -C, -o cmd, and --no-headers support
|
||||
# https://github.com/aquasecurity/kube-bench/issues/109
|
||||
RUN apk --no-cache add procps
|
||||
|
||||
# Openssl is used by OpenShift tests
|
||||
# https://github.com/aquasecurity/kube-bench/issues/535
|
||||
RUN apk --no-cache add openssl
|
||||
|
||||
COPY --from=build /go/bin/kube-bench /usr/local/bin/kube-bench
|
||||
COPY entrypoint.sh .
|
||||
COPY cfg/ cfg/
|
||||
|
||||
64
README.md
64
README.md
@@ -19,26 +19,27 @@ Tests are configured with YAML files, making this tool easy to update as test sp
|
||||
Table of Contents
|
||||
=================
|
||||
|
||||
- [Table of Contents](#table-of-contents)
|
||||
- [CIS Kubernetes Benchmark support](#cis-kubernetes-benchmark-support)
|
||||
- [Installation](#installation)
|
||||
- [Running kube-bench](#running-kube-bench)
|
||||
- [Running inside a container](#running-inside-a-container)
|
||||
- [Running in a Kubernetes cluster](#running-in-a-kubernetes-cluster)
|
||||
- [Running in an EKS cluster](#running-in-an-eks-cluster)
|
||||
- [Installing from a container](#installing-from-a-container)
|
||||
- [Installing from sources](#installing-from-sources)
|
||||
- [Running on OpenShift](#running-on-openshift)
|
||||
- [Output](#output)
|
||||
- [Configuration](#configuration)
|
||||
- [Test config YAML representation](#test-config-yaml-representation)
|
||||
- [Omitting checks](#omitting-checks)
|
||||
- [Roadmap](#roadmap)
|
||||
- [Testing locally with kind](#testing-locally-with-kind)
|
||||
- [Contributing](#contributing)
|
||||
- [Bugs](#bugs)
|
||||
- [Features](#features)
|
||||
- [Pull Requests](#pull-requests)
|
||||
* [CIS Kubernetes Benchmark support](#cis-kubernetes-benchmark-support)
|
||||
* [Installation](#installation)
|
||||
* [Running kube-bench](#running-kube-bench)
|
||||
* [Running inside a container](#running-inside-a-container)
|
||||
* [Running in a kubernetes cluster](#running-in-a-kubernetes-cluster)
|
||||
* [Running in an Azure Kubernetes Service(AKS) cluster](#running-in-an-aks-cluster)
|
||||
* [Running in an EKS cluster](#running-in-an-eks-cluster)
|
||||
* [Installing from a container](#installing-from-a-container)
|
||||
* [Installing from sources](#installing-from-sources)
|
||||
* [Running on OpenShift](#running-on-openshift)
|
||||
* [Output](#output)
|
||||
* [Configuration](#configuration)
|
||||
* [Test config YAML representation](#test-config-yaml-representation)
|
||||
* [Omitting checks](#omitting-checks)
|
||||
* [Roadmap](#roadmap)
|
||||
* [Testing locally with kind](#testing-locally-with-kind)
|
||||
* [Contributing](#contributing)
|
||||
* [Bugs](#bugs)
|
||||
* [Features](#features)
|
||||
* [Pull Requests](#pull-requests)
|
||||
|
||||
|
||||
## CIS Kubernetes Benchmark support
|
||||
|
||||
@@ -177,6 +178,25 @@ To run the tests on the master node, the pod needs to be scheduled on that node.
|
||||
|
||||
The default labels applied to master nodes has changed since Kubernetes 1.11, so if you are using an older version you may need to modify the nodeSelector and tolerations to run the job on the master node.
|
||||
|
||||
|
||||
### Running in an AKS cluster
|
||||
|
||||
1. Create an AKS cluster(e.g. 1.13.7) with RBAC enabled, otherwise there would be 4 failures
|
||||
|
||||
1. Use the [kubectl-enter plugin] (https://github.com/kvaps/kubectl-enter) to shell into a node
|
||||
`
|
||||
kubectl-enter {node-name}
|
||||
`
|
||||
or ssh to one agent node
|
||||
could open nsg 22 port and assign a public ip for one agent node (only for testing purpose)
|
||||
|
||||
1. Run CIS benchmark to view results:
|
||||
```
|
||||
docker run --rm -v `pwd`:/host aquasec/kube-bench:latest install
|
||||
./kube-bench node
|
||||
```
|
||||
kube-bench cannot be run on AKS master nodes
|
||||
|
||||
### Running in an EKS cluster
|
||||
|
||||
There is a `job-eks.yaml` file for running the kube-bench node checks on an EKS cluster. The significant difference on EKS is that it's not possible to schedule jobs onto the master node, so master checks can't be performed
|
||||
@@ -190,10 +210,10 @@ aws ecr create-repository --repository-name k8s/kube-bench --image-tag-mutabilit
|
||||
3. Download, build and push the kube-bench container image to your ECR repo
|
||||
```
|
||||
git clone https://github.com/aquasecurity/kube-bench.git
|
||||
cd kube-bench
|
||||
$(aws ecr get-login --no-include-email --region <AWS_REGION>)
|
||||
docker build -t k8s/kube-bench .
|
||||
docker tag k8s/kube-bench:latest <AWS_ACCT_NUMBER>.dkr.ecr.<AWS_REGION>.amazonaws.com/k8s/kube-bench:latest
|
||||
docker tag k8s/kube-bench:latest <AWS_ACCT_NUMBER>.dkr.ecr.<AWS_REGION>.amazonaws.com/k8s/kube-bench:latest
|
||||
docker push <AWS_ACCT_NUMBER>.dkr.ecr.<AWS_REGION>.amazonaws.com/k8s/kube-bench:latest
|
||||
```
|
||||
4. Copy the URI of your pushed image, the URI format is like this: `<AWS_ACCT_NUMBER>.dkr.ecr.<AWS_REGION>.amazonaws.com/k8s/kube-bench:latest`
|
||||
@@ -312,7 +332,7 @@ If you think you have found a bug please follow the instructions below.
|
||||
- Open a [new issue](https://github.com/aquasecurity/kube-bench/issues/new) if a duplicate doesn't already exist.
|
||||
- Note the version of kube-bench you are running (from `kube-bench version`) and the command line options you are using.
|
||||
- Note the version of Kubernetes you are running (from `kubectl version` or `oc version` for OpenShift).
|
||||
- Set `-v 10` command line option and save the log output. Please paste this into your issue.
|
||||
- Set `-v 10 --logtostderr` command line options and save the log output. Please paste this into your issue.
|
||||
- Remember users might be searching for your issue in the future, so please give it a meaningful title to help others.
|
||||
|
||||
### Features
|
||||
|
||||
@@ -1,2 +1,2 @@
|
||||
---
|
||||
## Version-specific settings that override the values in cfg/config.yaml
|
||||
## Version-specific settings that override the values in cfg/config.yaml
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
@@ -1,2 +1,2 @@
|
||||
---
|
||||
## Version-specific settings that override the values in cfg/config.yaml
|
||||
---
|
||||
## Version-specific settings that override the values in cfg/config.yaml
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
@@ -1,2 +1,2 @@
|
||||
---
|
||||
## Version-specific settings that override the values in cfg/config.yaml
|
||||
---
|
||||
## Version-specific settings that override the values in cfg/config.yaml
|
||||
|
||||
@@ -5,31 +5,31 @@ id: 3
|
||||
text: "Control Plane Configuration"
|
||||
type: "controlplane"
|
||||
groups:
|
||||
- id: 3.1
|
||||
text: "Authentication and Authorization"
|
||||
checks:
|
||||
- id: 3.1.1
|
||||
text: "Client certificate authentication should not be used for users (Not Scored) "
|
||||
type: "manual"
|
||||
remediation: |
|
||||
Alternative mechanisms provided by Kubernetes such as the use of OIDC should be
|
||||
implemented in place of client certificates.
|
||||
scored: false
|
||||
- id: 3.1
|
||||
text: "Authentication and Authorization"
|
||||
checks:
|
||||
- id: 3.1.1
|
||||
text: "Client certificate authentication should not be used for users (Not Scored) "
|
||||
type: "manual"
|
||||
remediation: |
|
||||
Alternative mechanisms provided by Kubernetes such as the use of OIDC should be
|
||||
implemented in place of client certificates.
|
||||
scored: false
|
||||
|
||||
- id: 3.2
|
||||
text: "Logging"
|
||||
checks:
|
||||
- id: 3.2.1
|
||||
text: "Ensure that a minimal audit policy is created (Scored) "
|
||||
type: "manual"
|
||||
remediation: |
|
||||
Create an audit policy file for your cluster.
|
||||
scored: true
|
||||
- id: 3.2
|
||||
text: "Logging"
|
||||
checks:
|
||||
- id: 3.2.1
|
||||
text: "Ensure that a minimal audit policy is created (Scored) "
|
||||
type: "manual"
|
||||
remediation: |
|
||||
Create an audit policy file for your cluster.
|
||||
scored: true
|
||||
|
||||
- id: 3.2.2
|
||||
text: "Ensure that the audit policy covers key security concerns (Not Scored) "
|
||||
type: "manual"
|
||||
remediation: |
|
||||
Consider modification of the audit policy in use on the cluster to include these items, at a
|
||||
minimum.
|
||||
scored: false
|
||||
- id: 3.2.2
|
||||
text: "Ensure that the audit policy covers key security concerns (Not Scored) "
|
||||
type: "manual"
|
||||
remediation: |
|
||||
Consider modification of the audit policy in use on the cluster to include these items, at a
|
||||
minimum.
|
||||
scored: false
|
||||
|
||||
@@ -5,127 +5,127 @@ id: 2
|
||||
text: "Etcd Node Configuration"
|
||||
type: "etcd"
|
||||
groups:
|
||||
- id: 2
|
||||
text: "Etcd Node Configuration Files"
|
||||
checks:
|
||||
- id: 2.1
|
||||
text: "Ensure that the --cert-file and --key-file arguments are set as appropriate (Scored)"
|
||||
audit: "/bin/ps -ef | /bin/grep $etcdbin | /bin/grep -v grep"
|
||||
tests:
|
||||
bin_op: and
|
||||
test_items:
|
||||
- flag: "--cert-file"
|
||||
set: true
|
||||
- flag: "--key-file"
|
||||
set: true
|
||||
remediation: |
|
||||
Follow the etcd service documentation and configure TLS encryption.
|
||||
Then, edit the etcd pod specification file /etc/kubernetes/manifests/etcd.yaml
|
||||
on the master node and set the below parameters.
|
||||
--cert-file=</path/to/ca-file>
|
||||
--key-file=</path/to/key-file>
|
||||
scored: true
|
||||
|
||||
- id: 2.2
|
||||
text: "Ensure that the --client-cert-auth argument is set to true (Scored)"
|
||||
audit: "/bin/ps -ef | /bin/grep $etcdbin | /bin/grep -v grep"
|
||||
tests:
|
||||
test_items:
|
||||
- flag: "--client-cert-auth"
|
||||
compare:
|
||||
op: eq
|
||||
value: true
|
||||
set: true
|
||||
remediation: |
|
||||
Edit the etcd pod specification file $etcdconf on the master
|
||||
node and set the below parameter.
|
||||
--client-cert-auth="true"
|
||||
scored: true
|
||||
- id: 2
|
||||
text: "Etcd Node Configuration Files"
|
||||
checks:
|
||||
- id: 2.1
|
||||
text: "Ensure that the --cert-file and --key-file arguments are set as appropriate (Scored)"
|
||||
audit: "/bin/ps -ef | /bin/grep $etcdbin | /bin/grep -v grep"
|
||||
tests:
|
||||
bin_op: and
|
||||
test_items:
|
||||
- flag: "--cert-file"
|
||||
set: true
|
||||
- flag: "--key-file"
|
||||
set: true
|
||||
remediation: |
|
||||
Follow the etcd service documentation and configure TLS encryption.
|
||||
Then, edit the etcd pod specification file /etc/kubernetes/manifests/etcd.yaml
|
||||
on the master node and set the below parameters.
|
||||
--cert-file=</path/to/ca-file>
|
||||
--key-file=</path/to/key-file>
|
||||
scored: true
|
||||
|
||||
- id: 2.3
|
||||
text: "Ensure that the --auto-tls argument is not set to true (Scored)"
|
||||
audit: "/bin/ps -ef | /bin/grep $etcdbin | /bin/grep -v grep"
|
||||
tests:
|
||||
bin_op: or
|
||||
test_items:
|
||||
- flag: "--auto-tls"
|
||||
set: false
|
||||
- flag: "--auto-tls"
|
||||
compare:
|
||||
op: eq
|
||||
value: false
|
||||
remediation: |
|
||||
Edit the etcd pod specification file $etcdconf on the master
|
||||
node and either remove the --auto-tls parameter or set it to false.
|
||||
--auto-tls=false
|
||||
scored: true
|
||||
|
||||
- id: 2.4
|
||||
text: "Ensure that the --peer-cert-file and --peer-key-file arguments are
|
||||
set as appropriate (Scored)"
|
||||
audit: "/bin/ps -ef | /bin/grep $etcdbin | /bin/grep -v grep"
|
||||
tests:
|
||||
bin_op: and
|
||||
test_items:
|
||||
- flag: "--peer-cert-file"
|
||||
set: true
|
||||
- flag: "--peer-key-file"
|
||||
set: true
|
||||
remediation: |
|
||||
Follow the etcd service documentation and configure peer TLS encryption as appropriate
|
||||
for your etcd cluster. Then, edit the etcd pod specification file $etcdconf on the
|
||||
master node and set the below parameters.
|
||||
--peer-client-file=</path/to/peer-cert-file>
|
||||
--peer-key-file=</path/to/peer-key-file>
|
||||
scored: true
|
||||
|
||||
- id: 2.5
|
||||
text: "Ensure that the --peer-client-cert-auth argument is set to true (Scored)"
|
||||
audit: "/bin/ps -ef | /bin/grep $etcdbin | /bin/grep -v grep"
|
||||
tests:
|
||||
test_items:
|
||||
- flag: "--peer-client-cert-auth"
|
||||
compare:
|
||||
op: eq
|
||||
value: true
|
||||
set: true
|
||||
remediation: |
|
||||
Edit the etcd pod specification file $etcdconf on the master
|
||||
node and set the below parameter.
|
||||
--peer-client-cert-auth=true
|
||||
scored: true
|
||||
|
||||
- id: 2.6
|
||||
text: "Ensure that the --peer-auto-tls argument is not set to true (Scored)"
|
||||
audit: "/bin/ps -ef | /bin/grep $etcdbin | /bin/grep -v grep"
|
||||
tests:
|
||||
bin_op: or
|
||||
test_items:
|
||||
- flag: "--peer-auto-tls"
|
||||
set: false
|
||||
- flag: "--peer-auto-tls"
|
||||
compare:
|
||||
op: eq
|
||||
value: false
|
||||
set: true
|
||||
remediation: |
|
||||
Edit the etcd pod specification file $etcdconf on the master
|
||||
node and either remove the --peer-auto-tls parameter or set it to false.
|
||||
--peer-auto-tls=false
|
||||
scored: true
|
||||
|
||||
- id: 2.7
|
||||
text: "Ensure that a unique Certificate Authority is used for etcd (Not Scored)"
|
||||
audit: "/bin/ps -ef | /bin/grep $etcdbin | /bin/grep -v grep"
|
||||
tests:
|
||||
test_items:
|
||||
- flag: "--trusted-ca-file"
|
||||
set: true
|
||||
remediation: |
|
||||
[Manual test]
|
||||
Follow the etcd documentation and create a dedicated certificate authority setup for the
|
||||
etcd service.
|
||||
Then, edit the etcd pod specification file $etcdconf on the
|
||||
master node and set the below parameter.
|
||||
--trusted-ca-file=</path/to/ca-file>
|
||||
scored: false
|
||||
- id: 2.2
|
||||
text: "Ensure that the --client-cert-auth argument is set to true (Scored)"
|
||||
audit: "/bin/ps -ef | /bin/grep $etcdbin | /bin/grep -v grep"
|
||||
tests:
|
||||
test_items:
|
||||
- flag: "--client-cert-auth"
|
||||
compare:
|
||||
op: eq
|
||||
value: true
|
||||
set: true
|
||||
remediation: |
|
||||
Edit the etcd pod specification file $etcdconf on the master
|
||||
node and set the below parameter.
|
||||
--client-cert-auth="true"
|
||||
scored: true
|
||||
|
||||
- id: 2.3
|
||||
text: "Ensure that the --auto-tls argument is not set to true (Scored)"
|
||||
audit: "/bin/ps -ef | /bin/grep $etcdbin | /bin/grep -v grep"
|
||||
tests:
|
||||
bin_op: or
|
||||
test_items:
|
||||
- flag: "--auto-tls"
|
||||
set: false
|
||||
- flag: "--auto-tls"
|
||||
compare:
|
||||
op: eq
|
||||
value: false
|
||||
remediation: |
|
||||
Edit the etcd pod specification file $etcdconf on the master
|
||||
node and either remove the --auto-tls parameter or set it to false.
|
||||
--auto-tls=false
|
||||
scored: true
|
||||
|
||||
- id: 2.4
|
||||
text: "Ensure that the --peer-cert-file and --peer-key-file arguments are
|
||||
set as appropriate (Scored)"
|
||||
audit: "/bin/ps -ef | /bin/grep $etcdbin | /bin/grep -v grep"
|
||||
tests:
|
||||
bin_op: and
|
||||
test_items:
|
||||
- flag: "--peer-cert-file"
|
||||
set: true
|
||||
- flag: "--peer-key-file"
|
||||
set: true
|
||||
remediation: |
|
||||
Follow the etcd service documentation and configure peer TLS encryption as appropriate
|
||||
for your etcd cluster. Then, edit the etcd pod specification file $etcdconf on the
|
||||
master node and set the below parameters.
|
||||
--peer-client-file=</path/to/peer-cert-file>
|
||||
--peer-key-file=</path/to/peer-key-file>
|
||||
scored: true
|
||||
|
||||
- id: 2.5
|
||||
text: "Ensure that the --peer-client-cert-auth argument is set to true (Scored)"
|
||||
audit: "/bin/ps -ef | /bin/grep $etcdbin | /bin/grep -v grep"
|
||||
tests:
|
||||
test_items:
|
||||
- flag: "--peer-client-cert-auth"
|
||||
compare:
|
||||
op: eq
|
||||
value: true
|
||||
set: true
|
||||
remediation: |
|
||||
Edit the etcd pod specification file $etcdconf on the master
|
||||
node and set the below parameter.
|
||||
--peer-client-cert-auth=true
|
||||
scored: true
|
||||
|
||||
- id: 2.6
|
||||
text: "Ensure that the --peer-auto-tls argument is not set to true (Scored)"
|
||||
audit: "/bin/ps -ef | /bin/grep $etcdbin | /bin/grep -v grep"
|
||||
tests:
|
||||
bin_op: or
|
||||
test_items:
|
||||
- flag: "--peer-auto-tls"
|
||||
set: false
|
||||
- flag: "--peer-auto-tls"
|
||||
compare:
|
||||
op: eq
|
||||
value: false
|
||||
set: true
|
||||
remediation: |
|
||||
Edit the etcd pod specification file $etcdconf on the master
|
||||
node and either remove the --peer-auto-tls parameter or set it to false.
|
||||
--peer-auto-tls=false
|
||||
scored: true
|
||||
|
||||
- id: 2.7
|
||||
text: "Ensure that a unique Certificate Authority is used for etcd (Not Scored)"
|
||||
audit: "/bin/ps -ef | /bin/grep $etcdbin | /bin/grep -v grep"
|
||||
tests:
|
||||
test_items:
|
||||
- flag: "--trusted-ca-file"
|
||||
set: true
|
||||
remediation: |
|
||||
[Manual test]
|
||||
Follow the etcd documentation and create a dedicated certificate authority setup for the
|
||||
etcd service.
|
||||
Then, edit the etcd pod specification file $etcdconf on the
|
||||
master node and set the below parameter.
|
||||
--trusted-ca-file=</path/to/ca-file>
|
||||
scored: false
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
@@ -5,235 +5,235 @@ id: 5
|
||||
text: "Kubernetes Policies"
|
||||
type: "policies"
|
||||
groups:
|
||||
- id: 5.1
|
||||
text: "RBAC and Service Accounts"
|
||||
checks:
|
||||
- id: 5.1.1
|
||||
text: "Ensure that the cluster-admin role is only used where required (Not Scored)"
|
||||
type: "manual"
|
||||
remediation: |
|
||||
Identify all clusterrolebindings to the cluster-admin role. Check if they are used and
|
||||
if they need this role or if they could use a role with fewer privileges.
|
||||
Where possible, first bind users to a lower privileged role and then remove the
|
||||
clusterrolebinding to the cluster-admin role :
|
||||
kubectl delete clusterrolebinding [name]
|
||||
scored: false
|
||||
- id: 5.1
|
||||
text: "RBAC and Service Accounts"
|
||||
checks:
|
||||
- id: 5.1.1
|
||||
text: "Ensure that the cluster-admin role is only used where required (Not Scored)"
|
||||
type: "manual"
|
||||
remediation: |
|
||||
Identify all clusterrolebindings to the cluster-admin role. Check if they are used and
|
||||
if they need this role or if they could use a role with fewer privileges.
|
||||
Where possible, first bind users to a lower privileged role and then remove the
|
||||
clusterrolebinding to the cluster-admin role :
|
||||
kubectl delete clusterrolebinding [name]
|
||||
scored: false
|
||||
|
||||
- id: 5.1.2
|
||||
text: "Minimize access to secrets (Not Scored)"
|
||||
type: "manual"
|
||||
remediation: |
|
||||
Where possible, remove get, list and watch access to secret objects in the cluster.
|
||||
scored: false
|
||||
- id: 5.1.2
|
||||
text: "Minimize access to secrets (Not Scored)"
|
||||
type: "manual"
|
||||
remediation: |
|
||||
Where possible, remove get, list and watch access to secret objects in the cluster.
|
||||
scored: false
|
||||
|
||||
- id: 5.1.3
|
||||
text: "Minimize wildcard use in Roles and ClusterRoles (Not Scored)"
|
||||
type: "manual"
|
||||
remediation: |
|
||||
Where possible replace any use of wildcards in clusterroles and roles with specific
|
||||
objects or actions.
|
||||
scored: false
|
||||
|
||||
- id: 5.1.4
|
||||
text: "Minimize access to create pods (Not Scored)"
|
||||
type: "manual"
|
||||
Remediation: |
|
||||
Where possible, remove create access to pod objects in the cluster.
|
||||
scored: false
|
||||
- id: 5.1.3
|
||||
text: "Minimize wildcard use in Roles and ClusterRoles (Not Scored)"
|
||||
type: "manual"
|
||||
remediation: |
|
||||
Where possible replace any use of wildcards in clusterroles and roles with specific
|
||||
objects or actions.
|
||||
scored: false
|
||||
|
||||
- id: 5.1.5
|
||||
text: "Ensure that default service accounts are not actively used. (Scored)"
|
||||
type: "manual"
|
||||
remediation: |
|
||||
Create explicit service accounts wherever a Kubernetes workload requires specific access
|
||||
to the Kubernetes API server.
|
||||
Modify the configuration of each default service account to include this value
|
||||
automountServiceAccountToken: false
|
||||
scored: true
|
||||
- id: 5.1.4
|
||||
text: "Minimize access to create pods (Not Scored)"
|
||||
type: "manual"
|
||||
Remediation: |
|
||||
Where possible, remove create access to pod objects in the cluster.
|
||||
scored: false
|
||||
|
||||
- id: 5.1.6
|
||||
text: "Ensure that Service Account Tokens are only mounted where necessary (Not Scored)"
|
||||
type: "manual"
|
||||
remediation: |
|
||||
Modify the definition of pods and service accounts which do not need to mount service
|
||||
account tokens to disable it.
|
||||
scored: false
|
||||
- id: 5.1.5
|
||||
text: "Ensure that default service accounts are not actively used. (Scored)"
|
||||
type: "manual"
|
||||
remediation: |
|
||||
Create explicit service accounts wherever a Kubernetes workload requires specific access
|
||||
to the Kubernetes API server.
|
||||
Modify the configuration of each default service account to include this value
|
||||
automountServiceAccountToken: false
|
||||
scored: true
|
||||
|
||||
- id: 5.2
|
||||
text: "Pod Security Policies"
|
||||
checks:
|
||||
- id: 5.2.1
|
||||
text: "Minimize the admission of privileged containers (Not Scored)"
|
||||
type: "manual"
|
||||
remediation: |
|
||||
Create a PSP as described in the Kubernetes documentation, ensuring that
|
||||
the .spec.privileged field is omitted or set to false.
|
||||
scored: false
|
||||
- id: 5.1.6
|
||||
text: "Ensure that Service Account Tokens are only mounted where necessary (Not Scored)"
|
||||
type: "manual"
|
||||
remediation: |
|
||||
Modify the definition of pods and service accounts which do not need to mount service
|
||||
account tokens to disable it.
|
||||
scored: false
|
||||
|
||||
- id: 5.2.2
|
||||
text: "Minimize the admission of containers wishing to share the host process ID namespace (Scored)"
|
||||
type: "manual"
|
||||
remediation: |
|
||||
Create a PSP as described in the Kubernetes documentation, ensuring that the
|
||||
.spec.hostPID field is omitted or set to false.
|
||||
scored: true
|
||||
- id: 5.2
|
||||
text: "Pod Security Policies"
|
||||
checks:
|
||||
- id: 5.2.1
|
||||
text: "Minimize the admission of privileged containers (Not Scored)"
|
||||
type: "manual"
|
||||
remediation: |
|
||||
Create a PSP as described in the Kubernetes documentation, ensuring that
|
||||
the .spec.privileged field is omitted or set to false.
|
||||
scored: false
|
||||
|
||||
- id: 5.2.3
|
||||
text: "Minimize the admission of containers wishing to share the host IPC namespace (Scored)"
|
||||
type: "manual"
|
||||
remediation: |
|
||||
Create a PSP as described in the Kubernetes documentation, ensuring that the
|
||||
.spec.hostIPC field is omitted or set to false.
|
||||
scored: true
|
||||
- id: 5.2.2
|
||||
text: "Minimize the admission of containers wishing to share the host process ID namespace (Scored)"
|
||||
type: "manual"
|
||||
remediation: |
|
||||
Create a PSP as described in the Kubernetes documentation, ensuring that the
|
||||
.spec.hostPID field is omitted or set to false.
|
||||
scored: true
|
||||
|
||||
- id: 5.2.4
|
||||
text: "Minimize the admission of containers wishing to share the host network namespace (Scored)"
|
||||
type: "manual"
|
||||
remediation: |
|
||||
Create a PSP as described in the Kubernetes documentation, ensuring that the
|
||||
.spec.hostNetwork field is omitted or set to false.
|
||||
scored: true
|
||||
- id: 5.2.3
|
||||
text: "Minimize the admission of containers wishing to share the host IPC namespace (Scored)"
|
||||
type: "manual"
|
||||
remediation: |
|
||||
Create a PSP as described in the Kubernetes documentation, ensuring that the
|
||||
.spec.hostIPC field is omitted or set to false.
|
||||
scored: true
|
||||
|
||||
- id: 5.2.5
|
||||
text: "Minimize the admission of containers with allowPrivilegeEscalation (Scored)"
|
||||
type: "manual"
|
||||
remediation: |
|
||||
Create a PSP as described in the Kubernetes documentation, ensuring that the
|
||||
.spec.allowPrivilegeEscalation field is omitted or set to false.
|
||||
scored: true
|
||||
- id: 5.2.4
|
||||
text: "Minimize the admission of containers wishing to share the host network namespace (Scored)"
|
||||
type: "manual"
|
||||
remediation: |
|
||||
Create a PSP as described in the Kubernetes documentation, ensuring that the
|
||||
.spec.hostNetwork field is omitted or set to false.
|
||||
scored: true
|
||||
|
||||
- id: 5.2.6
|
||||
text: "Minimize the admission of root containers (Not Scored)"
|
||||
type: "manual"
|
||||
remediation: |
|
||||
Create a PSP as described in the Kubernetes documentation, ensuring that the
|
||||
.spec.runAsUser.rule is set to either MustRunAsNonRoot or MustRunAs with the range of
|
||||
UIDs not including 0.
|
||||
scored: false
|
||||
- id: 5.2.5
|
||||
text: "Minimize the admission of containers with allowPrivilegeEscalation (Scored)"
|
||||
type: "manual"
|
||||
remediation: |
|
||||
Create a PSP as described in the Kubernetes documentation, ensuring that the
|
||||
.spec.allowPrivilegeEscalation field is omitted or set to false.
|
||||
scored: true
|
||||
|
||||
- id: 5.2.7
|
||||
text: "Minimize the admission of containers with the NET_RAW capability (Not Scored)"
|
||||
type: "manual"
|
||||
remediation: |
|
||||
Create a PSP as described in the Kubernetes documentation, ensuring that the
|
||||
.spec.requiredDropCapabilities is set to include either NET_RAW or ALL.
|
||||
scored: false
|
||||
- id: 5.2.6
|
||||
text: "Minimize the admission of root containers (Not Scored)"
|
||||
type: "manual"
|
||||
remediation: |
|
||||
Create a PSP as described in the Kubernetes documentation, ensuring that the
|
||||
.spec.runAsUser.rule is set to either MustRunAsNonRoot or MustRunAs with the range of
|
||||
UIDs not including 0.
|
||||
scored: false
|
||||
|
||||
- id: 5.2.8
|
||||
text: "Minimize the admission of containers with added capabilities (Not Scored)"
|
||||
type: "manual"
|
||||
remediation: |
|
||||
Ensure that allowedCapabilities is not present in PSPs for the cluster unless
|
||||
it is set to an empty array.
|
||||
scored: false
|
||||
- id: 5.2.7
|
||||
text: "Minimize the admission of containers with the NET_RAW capability (Not Scored)"
|
||||
type: "manual"
|
||||
remediation: |
|
||||
Create a PSP as described in the Kubernetes documentation, ensuring that the
|
||||
.spec.requiredDropCapabilities is set to include either NET_RAW or ALL.
|
||||
scored: false
|
||||
|
||||
- id: 5.2.9
|
||||
text: "Minimize the admission of containers with capabilities assigned (Not Scored) "
|
||||
type: "manual"
|
||||
remediation: |
|
||||
Review the use of capabilites in applications runnning on your cluster. Where a namespace
|
||||
contains applicaions which do not require any Linux capabities to operate consider adding
|
||||
a PSP which forbids the admission of containers which do not drop all capabilities.
|
||||
scored: false
|
||||
- id: 5.2.8
|
||||
text: "Minimize the admission of containers with added capabilities (Not Scored)"
|
||||
type: "manual"
|
||||
remediation: |
|
||||
Ensure that allowedCapabilities is not present in PSPs for the cluster unless
|
||||
it is set to an empty array.
|
||||
scored: false
|
||||
|
||||
- id: 5.3
|
||||
text: "Network Policies and CNI"
|
||||
checks:
|
||||
- id: 5.3.1
|
||||
text: "Ensure that the CNI in use supports Network Policies (Not Scored)"
|
||||
type: "manual"
|
||||
remediation: |
|
||||
If the CNI plugin in use does not support network policies, consideration should be given to
|
||||
making use of a different plugin, or finding an alternate mechanism for restricting traffic
|
||||
in the Kubernetes cluster.
|
||||
scored: false
|
||||
- id: 5.2.9
|
||||
text: "Minimize the admission of containers with capabilities assigned (Not Scored) "
|
||||
type: "manual"
|
||||
remediation: |
|
||||
Review the use of capabilites in applications runnning on your cluster. Where a namespace
|
||||
contains applicaions which do not require any Linux capabities to operate consider adding
|
||||
a PSP which forbids the admission of containers which do not drop all capabilities.
|
||||
scored: false
|
||||
|
||||
- id: 5.3.2
|
||||
text: "Ensure that all Namespaces have Network Policies defined (Scored)"
|
||||
type: "manual"
|
||||
remediation: |
|
||||
Follow the documentation and create NetworkPolicy objects as you need them.
|
||||
scored: true
|
||||
- id: 5.3
|
||||
text: "Network Policies and CNI"
|
||||
checks:
|
||||
- id: 5.3.1
|
||||
text: "Ensure that the CNI in use supports Network Policies (Not Scored)"
|
||||
type: "manual"
|
||||
remediation: |
|
||||
If the CNI plugin in use does not support network policies, consideration should be given to
|
||||
making use of a different plugin, or finding an alternate mechanism for restricting traffic
|
||||
in the Kubernetes cluster.
|
||||
scored: false
|
||||
|
||||
- id: 5.4
|
||||
text: "Secrets Management"
|
||||
checks:
|
||||
- id: 5.4.1
|
||||
text: "Prefer using secrets as files over secrets as environment variables (Not Scored)"
|
||||
type: "manual"
|
||||
remediation: |
|
||||
if possible, rewrite application code to read secrets from mounted secret files, rather than
|
||||
from environment variables.
|
||||
scored: false
|
||||
- id: 5.3.2
|
||||
text: "Ensure that all Namespaces have Network Policies defined (Scored)"
|
||||
type: "manual"
|
||||
remediation: |
|
||||
Follow the documentation and create NetworkPolicy objects as you need them.
|
||||
scored: true
|
||||
|
||||
- id: 5.4.2
|
||||
text: "Consider external secret storage (Not Scored)"
|
||||
type: "manual"
|
||||
remediation: |
|
||||
Refer to the secrets management options offered by your cloud provider or a third-party
|
||||
secrets management solution.
|
||||
scored: false
|
||||
- id: 5.4
|
||||
text: "Secrets Management"
|
||||
checks:
|
||||
- id: 5.4.1
|
||||
text: "Prefer using secrets as files over secrets as environment variables (Not Scored)"
|
||||
type: "manual"
|
||||
remediation: |
|
||||
if possible, rewrite application code to read secrets from mounted secret files, rather than
|
||||
from environment variables.
|
||||
scored: false
|
||||
|
||||
- id: 5.5
|
||||
text: "Extensible Admission Control"
|
||||
checks:
|
||||
- id: 5.5.1
|
||||
text: "Configure Image Provenance using ImagePolicyWebhook admission controller (Not Scored)"
|
||||
type: "manual"
|
||||
remediation: |
|
||||
Follow the Kubernetes documentation and setup image provenance.
|
||||
scored: false
|
||||
- id: 5.4.2
|
||||
text: "Consider external secret storage (Not Scored)"
|
||||
type: "manual"
|
||||
remediation: |
|
||||
Refer to the secrets management options offered by your cloud provider or a third-party
|
||||
secrets management solution.
|
||||
scored: false
|
||||
|
||||
- id: 5.6
|
||||
text: "General Policies"
|
||||
checks:
|
||||
- id: 5.6.1
|
||||
text: "Create administrative boundaries between resources using namespaces (Not Scored)"
|
||||
type: "manual"
|
||||
remediation: |
|
||||
Follow the documentation and create namespaces for objects in your deployment as you need
|
||||
them.
|
||||
scored: false
|
||||
- id: 5.5
|
||||
text: "Extensible Admission Control"
|
||||
checks:
|
||||
- id: 5.5.1
|
||||
text: "Configure Image Provenance using ImagePolicyWebhook admission controller (Not Scored)"
|
||||
type: "manual"
|
||||
remediation: |
|
||||
Follow the Kubernetes documentation and setup image provenance.
|
||||
scored: false
|
||||
|
||||
- id: 5.6.2
|
||||
text: "Ensure that the seccomp profile is set to docker/default in your pod definitions (Not Scored)"
|
||||
type: "manual"
|
||||
remediation: |
|
||||
Seccomp is an alpha feature currently. By default, all alpha features are disabled. So, you
|
||||
would need to enable alpha features in the apiserver by passing "--feature-
|
||||
gates=AllAlpha=true" argument.
|
||||
Edit the /etc/kubernetes/apiserver file on the master node and set the KUBE_API_ARGS
|
||||
parameter to "--feature-gates=AllAlpha=true"
|
||||
KUBE_API_ARGS="--feature-gates=AllAlpha=true"
|
||||
Based on your system, restart the kube-apiserver service. For example:
|
||||
systemctl restart kube-apiserver.service
|
||||
Use annotations to enable the docker/default seccomp profile in your pod definitions. An
|
||||
example is as below:
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: trustworthy-pod
|
||||
annotations:
|
||||
seccomp.security.alpha.kubernetes.io/pod: docker/default
|
||||
spec:
|
||||
containers:
|
||||
- name: trustworthy-container
|
||||
image: sotrustworthy:latest
|
||||
scored: false
|
||||
- id: 5.6
|
||||
text: "General Policies"
|
||||
checks:
|
||||
- id: 5.6.1
|
||||
text: "Create administrative boundaries between resources using namespaces (Not Scored)"
|
||||
type: "manual"
|
||||
remediation: |
|
||||
Follow the documentation and create namespaces for objects in your deployment as you need
|
||||
them.
|
||||
scored: false
|
||||
|
||||
- id: 5.6.3
|
||||
text: "Apply Security Context to Your Pods and Containers (Not Scored)"
|
||||
type: "manual"
|
||||
remediation: |
|
||||
Follow the Kubernetes documentation and apply security contexts to your pods. For a
|
||||
suggested list of security contexts, you may refer to the CIS Security Benchmark for Docker
|
||||
Containers.
|
||||
scored: false
|
||||
- id: 5.6.2
|
||||
text: "Ensure that the seccomp profile is set to docker/default in your pod definitions (Not Scored)"
|
||||
type: "manual"
|
||||
remediation: |
|
||||
Seccomp is an alpha feature currently. By default, all alpha features are disabled. So, you
|
||||
would need to enable alpha features in the apiserver by passing "--feature-
|
||||
gates=AllAlpha=true" argument.
|
||||
Edit the /etc/kubernetes/apiserver file on the master node and set the KUBE_API_ARGS
|
||||
parameter to "--feature-gates=AllAlpha=true"
|
||||
KUBE_API_ARGS="--feature-gates=AllAlpha=true"
|
||||
Based on your system, restart the kube-apiserver service. For example:
|
||||
systemctl restart kube-apiserver.service
|
||||
Use annotations to enable the docker/default seccomp profile in your pod definitions. An
|
||||
example is as below:
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: trustworthy-pod
|
||||
annotations:
|
||||
seccomp.security.alpha.kubernetes.io/pod: docker/default
|
||||
spec:
|
||||
containers:
|
||||
- name: trustworthy-container
|
||||
image: sotrustworthy:latest
|
||||
scored: false
|
||||
|
||||
- id: 5.6.4
|
||||
text: "The default namespace should not be used (Scored)"
|
||||
type: "manual"
|
||||
remediation: |
|
||||
Ensure that namespaces are created to allow for appropriate segregation of Kubernetes
|
||||
resources and that all new resources are created in a specific namespace.
|
||||
scored: true
|
||||
- id: 5.6.3
|
||||
text: "Apply Security Context to Your Pods and Containers (Not Scored)"
|
||||
type: "manual"
|
||||
remediation: |
|
||||
Follow the Kubernetes documentation and apply security contexts to your pods. For a
|
||||
suggested list of security contexts, you may refer to the CIS Security Benchmark for Docker
|
||||
Containers.
|
||||
scored: false
|
||||
|
||||
- id: 5.6.4
|
||||
text: "The default namespace should not be used (Scored)"
|
||||
type: "manual"
|
||||
remediation: |
|
||||
Ensure that namespaces are created to allow for appropriate segregation of Kubernetes
|
||||
resources and that all new resources are created in a specific namespace.
|
||||
scored: true
|
||||
|
||||
@@ -88,7 +88,7 @@ node:
|
||||
- "/etc/kubernetes/pki/ca.crt"
|
||||
- "/etc/kubernetes/certs/ca.crt"
|
||||
- "/etc/kubernetes/cert/ca.pem"
|
||||
svc:
|
||||
svc:
|
||||
# These paths must also be included
|
||||
# in the 'confs' property below
|
||||
- "/etc/systemd/system/kubelet.service.d/10-kubeadm.conf"
|
||||
@@ -145,7 +145,7 @@ node:
|
||||
etcd:
|
||||
components:
|
||||
- etcd
|
||||
|
||||
|
||||
etcd:
|
||||
bins:
|
||||
- "etcd"
|
||||
@@ -172,4 +172,4 @@ version_mapping:
|
||||
"1.16": "cis-1.5"
|
||||
"1.17": "cis-1.5"
|
||||
"ocp-3.10": "rh-0.7"
|
||||
"ocp-3.11": "rh-0.7"
|
||||
"ocp-3.11": "rh-0.7"
|
||||
|
||||
@@ -59,4 +59,15 @@ node:
|
||||
svc:
|
||||
- "/lib/systemd/system/kube-proxy.service"
|
||||
defaultconf: /etc/kubernetes/addons/kube-proxy-daemonset.yaml
|
||||
defaultkubeconfig: "/etc/kubernetes/proxy.conf"
|
||||
defaultkubeconfig: "/etc/kubernetes/proxy.conf"
|
||||
|
||||
version_mapping:
|
||||
"1.11": "cis-1.3"
|
||||
"1.12": "cis-1.3"
|
||||
"1.13": "cis-1.4"
|
||||
"1.14": "cis-1.4"
|
||||
"1.15": "cis-1.5"
|
||||
"1.16": "cis-1.5"
|
||||
"1.17": "cis-1.5"
|
||||
"ocp-3.10": "rh-0.7"
|
||||
"ocp-3.11": "rh-0.7"
|
||||
|
||||
@@ -6,22 +6,27 @@ master:
|
||||
bins:
|
||||
- openshift start master api
|
||||
- hypershift openshift-kube-apiserver
|
||||
|
||||
|
||||
scheduler:
|
||||
bins:
|
||||
- "openshift start master controllers"
|
||||
- "hyperkube kube-scheduler"
|
||||
confs:
|
||||
- /etc/origin/master/scheduler.json
|
||||
|
||||
controllermanager:
|
||||
bins:
|
||||
- "openshift start master controllers"
|
||||
- "hypershift openshift-controller-manager"
|
||||
|
||||
etcd:
|
||||
bins:
|
||||
- openshift start etcd
|
||||
|
||||
node:
|
||||
svcs:
|
||||
- /etc/systemd/system/atomic-openshift-node.service
|
||||
- /etc/systemd/system/origin-node.service
|
||||
proxy:
|
||||
bins:
|
||||
- openshift start network
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -1,376 +1,456 @@
|
||||
---
|
||||
controls:
|
||||
id: 2
|
||||
text: "Worker Node Security Configuration"
|
||||
type: "node"
|
||||
groups:
|
||||
- id: 7
|
||||
text: "Kubelet"
|
||||
checks:
|
||||
- id: 7.1
|
||||
text: "Use Security Context Constraints to manage privileged containers as needed"
|
||||
type: "skip"
|
||||
scored: true
|
||||
|
||||
- id: 7.2
|
||||
text: "Ensure anonymous-auth is not disabled"
|
||||
type: "skip"
|
||||
scored: true
|
||||
|
||||
- id: 7.3
|
||||
text: "Verify that the --authorization-mode argument is set to WebHook"
|
||||
audit: "grep -A1 authorization-mode /etc/origin/node/node-config.yaml"
|
||||
tests:
|
||||
bin_op: or
|
||||
test_items:
|
||||
- flag: "authorization-mode"
|
||||
set: false
|
||||
- flag: "authorization-mode"
|
||||
compare:
|
||||
op: has
|
||||
value: "Webhook"
|
||||
set: true
|
||||
remediation: |
|
||||
Edit the Openshift node config file /etc/origin/node/node-config.yaml and remove authorization-mode under
|
||||
kubeletArguments in /etc/origin/node/node-config.yaml or set it to "Webhook".
|
||||
scored: true
|
||||
|
||||
- id: 7.4
|
||||
text: "Verify the OpenShift default for the client-ca-file argument"
|
||||
audit: "grep -A1 client-ca-file /etc/origin/node/node-config.yaml"
|
||||
tests:
|
||||
test_items:
|
||||
- flag: "client-ca-file"
|
||||
set: false
|
||||
remediation: |
|
||||
Edit the Openshift node config file /etc/origin/node/node-config.yaml and remove any configuration returned by the following:
|
||||
grep -A1 client-ca-file /etc/origin/node/node-config.yaml
|
||||
|
||||
Reset to the OpenShift default.
|
||||
See https://github.com/openshift/openshift-ansible/blob/release-3.10/roles/openshift_node_group/templates/node-config.yaml.j2#L65
|
||||
The config file does not have this defined in kubeletArgument, but in PodManifestConfig.
|
||||
scored: true
|
||||
|
||||
- id: 7.5
|
||||
text: "Verify the OpenShift default setting for the read-only-port argument"
|
||||
audit: "grep -A1 read-only-port /etc/origin/node/node-config.yaml"
|
||||
tests:
|
||||
bin_op: or
|
||||
test_items:
|
||||
- flag: "read-only-port"
|
||||
set: false
|
||||
- flag: "read-only-port"
|
||||
compare:
|
||||
op: has
|
||||
value: "0"
|
||||
set: true
|
||||
remediation: |
|
||||
Edit the Openshift node config file /etc/origin/node/node-config.yaml and removed so that the OpenShift default is applied.
|
||||
scored: true
|
||||
|
||||
- id: 7.6
|
||||
text: "Adjust the streaming-connection-idle-timeout argument"
|
||||
audit: "grep -A1 streaming-connection-idle-timeout /etc/origin/node/node-config.yaml"
|
||||
tests:
|
||||
bin_op: or
|
||||
test_items:
|
||||
- flag: "streaming-connection-idle-timeout"
|
||||
set: false
|
||||
- flag: "5m"
|
||||
set: false
|
||||
remediation: |
|
||||
Edit the Openshift node config file /etc/origin/node/node-config.yaml and set the streaming-connection-timeout
|
||||
value like the following in node-config.yaml.
|
||||
|
||||
kubeletArguments:
|
||||
streaming-connection-idle-timeout:
|
||||
- "5m"
|
||||
scored: true
|
||||
|
||||
- id: 7.7
|
||||
text: "Verify the OpenShift defaults for the protect-kernel-defaults argument"
|
||||
type: "skip"
|
||||
scored: true
|
||||
|
||||
- id: 7.8
|
||||
text: "Verify the OpenShift default value of true for the make-iptables-util-chains argument"
|
||||
audit: "grep -A1 make-iptables-util-chains /etc/origin/node/node-config.yaml"
|
||||
tests:
|
||||
bin_op: or
|
||||
test_items:
|
||||
- flag: "make-iptables-util-chains"
|
||||
set: false
|
||||
- flag: "make-iptables-util-chains"
|
||||
compare:
|
||||
op: has
|
||||
value: "true"
|
||||
set: true
|
||||
remediation: |
|
||||
Edit the Openshift node config file /etc/origin/node/node-config.yaml and reset make-iptables-util-chains to the OpenShift
|
||||
default value of true.
|
||||
scored: true
|
||||
|
||||
- id: 7.9
|
||||
text: "Verify that the --keep-terminated-pod-volumes argument is set to false"
|
||||
audit: "grep -A1 keep-terminated-pod-volumes /etc/origin/node/node-config.yaml"
|
||||
tests:
|
||||
test_items:
|
||||
- flag: "keep-terminated-pod-volumes"
|
||||
compare:
|
||||
op: has
|
||||
value: "false"
|
||||
set: true
|
||||
remediation: |
|
||||
Reset to the OpenShift defaults
|
||||
scored: true
|
||||
|
||||
- id: 7.10
|
||||
text: "Verify the OpenShift defaults for the hostname-override argument"
|
||||
type: "skip"
|
||||
scored: true
|
||||
|
||||
- id: 7.11
|
||||
text: "Set the --event-qps argument to 0"
|
||||
audit: "grep -A1 event-qps /etc/origin/node/node-config.yaml"
|
||||
tests:
|
||||
bin_op: or
|
||||
test_items:
|
||||
- flag: "event-qps"
|
||||
set: false
|
||||
- flag: "event-qps"
|
||||
compare:
|
||||
op: has
|
||||
value: "0"
|
||||
set: true
|
||||
remediation: |
|
||||
Edit the Openshift node config file /etc/origin/node/node-config.yaml set the event-qps argument to 0 in
|
||||
the kubeletArguments section of.
|
||||
scored: true
|
||||
|
||||
- id: 7.12
|
||||
text: "Verify the OpenShift cert-dir flag for HTTPS traffic"
|
||||
audit: "grep -A1 cert-dir /etc/origin/node/node-config.yaml"
|
||||
tests:
|
||||
test_items:
|
||||
- flag: "/etc/origin/node/certificates"
|
||||
compare:
|
||||
op: has
|
||||
value: "/etc/origin/node/certificates"
|
||||
set: true
|
||||
remediation: |
|
||||
Reset to the OpenShift default values.
|
||||
scored: true
|
||||
|
||||
- id: 7.13
|
||||
text: "Verify the OpenShift default of 0 for the cadvisor-port argument"
|
||||
audit: "grep -A1 cadvisor-port /etc/origin/node/node-config.yaml"
|
||||
tests:
|
||||
bin_op: or
|
||||
test_items:
|
||||
- flag: "cadvisor-port"
|
||||
set: false
|
||||
- flag: "cadvisor-port"
|
||||
compare:
|
||||
op: has
|
||||
value: "0"
|
||||
set: true
|
||||
remediation: |
|
||||
Edit the Openshift node config file /etc/origin/node/node-config.yaml and remove the cadvisor-port flag
|
||||
if it is set in the kubeletArguments section.
|
||||
scored: true
|
||||
|
||||
- id: 7.14
|
||||
text: "Verify that the RotateKubeletClientCertificate argument is set to true"
|
||||
audit: "grep -B1 RotateKubeletClientCertificate=true /etc/origin/node/node-config.yaml"
|
||||
tests:
|
||||
test_items:
|
||||
- flag: "RotateKubeletClientCertificate=true"
|
||||
compare:
|
||||
op: has
|
||||
value: "true"
|
||||
set: true
|
||||
remediation: |
|
||||
Edit the Openshift node config file /etc/origin/node/node-config.yaml and set RotateKubeletClientCertificate to true.
|
||||
scored: true
|
||||
|
||||
- id: 7.15
|
||||
text: "Verify that the RotateKubeletServerCertificate argument is set to true"
|
||||
audit: "grep -B1 RotateKubeletServerCertificate=true /etc/origin/node/node-config.yaml"
|
||||
tests:
|
||||
test_items:
|
||||
- flag: "RotateKubeletServerCertificate=true"
|
||||
compare:
|
||||
op: has
|
||||
value: "true"
|
||||
set: true
|
||||
remediation: |
|
||||
Edit the Openshift node config file /etc/origin/node/node-config.yaml and set RotateKubeletServerCertificate to true.
|
||||
scored: true
|
||||
|
||||
|
||||
- id: 8
|
||||
text: "Configuration Files"
|
||||
checks:
|
||||
- id: 8.1
|
||||
text: "Verify the OpenShift default permissions for the kubelet.conf file"
|
||||
audit: "stat -c %a /etc/origin/node/node.kubeconfig"
|
||||
tests:
|
||||
bin_op: or
|
||||
test_items:
|
||||
- flag: "644"
|
||||
compare:
|
||||
op: eq
|
||||
value: "644"
|
||||
set: true
|
||||
- flag: "640"
|
||||
compare:
|
||||
op: eq
|
||||
value: "640"
|
||||
set: true
|
||||
- flag: "600"
|
||||
compare:
|
||||
op: eq
|
||||
value: "600"
|
||||
set: true
|
||||
remediation: |
|
||||
Run the below command on each worker node.
|
||||
chmod 644 /etc/origin/node/node.kubeconfig
|
||||
scored: true
|
||||
|
||||
- id: 8.2
|
||||
text: "Verify the kubeconfig file ownership of root:root"
|
||||
audit: "stat -c %U:%G /etc/origin/node/node.kubeconfig"
|
||||
tests:
|
||||
test_items:
|
||||
- flag: "root:root"
|
||||
compare:
|
||||
op: eq
|
||||
value: root:root
|
||||
set: true
|
||||
remediation: |
|
||||
Run the below command on each worker node.
|
||||
chown root:root /etc/origin/node/node.kubeconfig
|
||||
scored: true
|
||||
|
||||
- id: 8.3
|
||||
text: "Verify the kubelet service file permissions of 644"
|
||||
audit: "stat -c %a /etc/systemd/system/atomic-openshift-node.service"
|
||||
tests:
|
||||
bin_op: or
|
||||
test_items:
|
||||
- flag: "644"
|
||||
compare:
|
||||
op: eq
|
||||
value: "644"
|
||||
set: true
|
||||
- flag: "640"
|
||||
compare:
|
||||
op: eq
|
||||
value: "640"
|
||||
set: true
|
||||
- flag: "600"
|
||||
compare:
|
||||
op: eq
|
||||
value: "600"
|
||||
set: true
|
||||
remediation: |
|
||||
Run the below command on each worker node.
|
||||
chmod 644 /etc/systemd/system/atomic-openshift-node.service
|
||||
scored: true
|
||||
|
||||
- id: 8.4
|
||||
text: "Verify the kubelet service file ownership of root:root"
|
||||
audit: "stat -c %U:%G /etc/systemd/system/atomic-openshift-node.service"
|
||||
tests:
|
||||
test_items:
|
||||
- flag: "root:root"
|
||||
compare:
|
||||
op: eq
|
||||
value: root:root
|
||||
set: true
|
||||
remediation: |
|
||||
Run the below command on each worker node.
|
||||
chown root:root /etc/systemd/system/atomic-openshift-node.service
|
||||
scored: true
|
||||
|
||||
- id: 8.5
|
||||
text: "Verify the OpenShift default permissions for the proxy kubeconfig file"
|
||||
audit: "stat -c %a /etc/origin/node/node.kubeconfig"
|
||||
tests:
|
||||
bin_op: or
|
||||
test_items:
|
||||
- flag: "644"
|
||||
compare:
|
||||
op: eq
|
||||
value: "644"
|
||||
set: true
|
||||
- flag: "640"
|
||||
compare:
|
||||
op: eq
|
||||
value: "640"
|
||||
set: true
|
||||
- flag: "600"
|
||||
compare:
|
||||
op: eq
|
||||
value: "600"
|
||||
set: true
|
||||
remediation: |
|
||||
Run the below command on each worker node.
|
||||
chmod 644 /etc/origin/node/node.kubeconfig
|
||||
scored: true
|
||||
|
||||
- id: 8.6
|
||||
text: "Verify the proxy kubeconfig file ownership of root:root"
|
||||
audit: "stat -c %U:%G /etc/origin/node/node.kubeconfig"
|
||||
tests:
|
||||
test_items:
|
||||
- flag: "root:root"
|
||||
compare:
|
||||
op: eq
|
||||
value: root:root
|
||||
set: true
|
||||
remediation: |
|
||||
Run the below command on each worker node.
|
||||
chown root:root /etc/origin/node/node.kubeconfig
|
||||
scored: true
|
||||
|
||||
- id: 8.7
|
||||
text: "Verify the OpenShift default permissions for the certificate authorities file."
|
||||
audit: "stat -c %a /etc/origin/node/client-ca.crt"
|
||||
tests:
|
||||
bin_op: or
|
||||
test_items:
|
||||
- flag: "644"
|
||||
compare:
|
||||
op: eq
|
||||
value: "644"
|
||||
set: true
|
||||
- flag: "640"
|
||||
compare:
|
||||
op: eq
|
||||
value: "640"
|
||||
set: true
|
||||
- flag: "600"
|
||||
compare:
|
||||
op: eq
|
||||
value: "600"
|
||||
set: true
|
||||
remediation: |
|
||||
Run the below command on each worker node.
|
||||
chmod 644 /etc/origin/node/client-ca.crt
|
||||
scored: true
|
||||
|
||||
- id: 8.8
|
||||
text: "Verify the client certificate authorities file ownership of root:root"
|
||||
audit: "stat -c %U:%G /etc/origin/node/client-ca.crt"
|
||||
tests:
|
||||
test_items:
|
||||
- flag: "root:root"
|
||||
compare:
|
||||
op: eq
|
||||
value: root:root
|
||||
set: true
|
||||
remediation: |
|
||||
Run the below command on each worker node.
|
||||
chown root:root /etc/origin/node/client-ca.crt
|
||||
scored: true
|
||||
---
|
||||
controls:
|
||||
id: 2
|
||||
text: "Worker Node Security Configuration"
|
||||
type: "node"
|
||||
groups:
|
||||
- id: 7
|
||||
text: "Kubelet"
|
||||
checks:
|
||||
- id: 7.1
|
||||
text: "Use Security Context Constraints to manage privileged containers as needed"
|
||||
type: "skip"
|
||||
scored: true
|
||||
|
||||
- id: 7.2
|
||||
text: "Ensure anonymous-auth is not disabled"
|
||||
type: "skip"
|
||||
scored: true
|
||||
|
||||
- id: 7.3
|
||||
text: "Verify that the --authorization-mode argument is set to WebHook"
|
||||
audit: "grep -A1 authorization-mode /etc/origin/node/node-config.yaml"
|
||||
tests:
|
||||
bin_op: or
|
||||
test_items:
|
||||
- flag: "authorization-mode"
|
||||
set: false
|
||||
- flag: "authorization-mode"
|
||||
compare:
|
||||
op: has
|
||||
value: "Webhook"
|
||||
set: true
|
||||
remediation: |
|
||||
Edit the Openshift node config file /etc/origin/node/node-config.yaml and remove authorization-mode under
|
||||
kubeletArguments in /etc/origin/node/node-config.yaml or set it to "Webhook".
|
||||
scored: true
|
||||
|
||||
- id: 7.4
|
||||
text: "Verify the OpenShift default for the client-ca-file argument"
|
||||
audit: "grep -A1 client-ca-file /etc/origin/node/node-config.yaml"
|
||||
tests:
|
||||
test_items:
|
||||
- flag: "client-ca-file"
|
||||
set: false
|
||||
remediation: |
|
||||
Edit the Openshift node config file /etc/origin/node/node-config.yaml and remove any configuration returned by the following:
|
||||
grep -A1 client-ca-file /etc/origin/node/node-config.yaml
|
||||
|
||||
Reset to the OpenShift default.
|
||||
See https://github.com/openshift/openshift-ansible/blob/release-3.10/roles/openshift_node_group/templates/node-config.yaml.j2#L65
|
||||
The config file does not have this defined in kubeletArgument, but in PodManifestConfig.
|
||||
scored: true
|
||||
|
||||
- id: 7.5
|
||||
text: "Verify the OpenShift default setting for the read-only-port argument"
|
||||
audit: "grep -A1 read-only-port /etc/origin/node/node-config.yaml"
|
||||
tests:
|
||||
bin_op: or
|
||||
test_items:
|
||||
- flag: "read-only-port"
|
||||
set: false
|
||||
- flag: "read-only-port"
|
||||
compare:
|
||||
op: has
|
||||
value: "0"
|
||||
set: true
|
||||
remediation: |
|
||||
Edit the Openshift node config file /etc/origin/node/node-config.yaml and removed so that the OpenShift default is applied.
|
||||
scored: true
|
||||
|
||||
- id: 7.6
|
||||
text: "Adjust the streaming-connection-idle-timeout argument"
|
||||
audit: "grep -A1 streaming-connection-idle-timeout /etc/origin/node/node-config.yaml"
|
||||
tests:
|
||||
bin_op: or
|
||||
test_items:
|
||||
- flag: "streaming-connection-idle-timeout"
|
||||
set: false
|
||||
- flag: "5m"
|
||||
set: false
|
||||
remediation: |
|
||||
Edit the Openshift node config file /etc/origin/node/node-config.yaml and set the streaming-connection-timeout
|
||||
value like the following in node-config.yaml.
|
||||
|
||||
kubeletArguments:
|
||||
streaming-connection-idle-timeout:
|
||||
- "5m"
|
||||
scored: true
|
||||
|
||||
- id: 7.7
|
||||
text: "Verify the OpenShift defaults for the protect-kernel-defaults argument"
|
||||
type: "skip"
|
||||
scored: true
|
||||
|
||||
- id: 7.8
|
||||
text: "Verify the OpenShift default value of true for the make-iptables-util-chains argument"
|
||||
audit: "grep -A1 make-iptables-util-chains /etc/origin/node/node-config.yaml"
|
||||
tests:
|
||||
bin_op: or
|
||||
test_items:
|
||||
- flag: "make-iptables-util-chains"
|
||||
set: false
|
||||
- flag: "make-iptables-util-chains"
|
||||
compare:
|
||||
op: has
|
||||
value: "true"
|
||||
set: true
|
||||
remediation: |
|
||||
Edit the Openshift node config file /etc/origin/node/node-config.yaml and reset make-iptables-util-chains to the OpenShift
|
||||
default value of true.
|
||||
scored: true
|
||||
|
||||
- id: 7.9
|
||||
text: "Verify that the --keep-terminated-pod-volumes argument is set to false"
|
||||
audit: "grep -A1 keep-terminated-pod-volumes /etc/origin/node/node-config.yaml"
|
||||
tests:
|
||||
test_items:
|
||||
- flag: "keep-terminated-pod-volumes"
|
||||
compare:
|
||||
op: has
|
||||
value: "false"
|
||||
set: true
|
||||
remediation: |
|
||||
Reset to the OpenShift defaults
|
||||
scored: true
|
||||
|
||||
- id: 7.10
|
||||
text: "Verify the OpenShift defaults for the hostname-override argument"
|
||||
type: "skip"
|
||||
scored: true
|
||||
|
||||
- id: 7.11
|
||||
text: "Set the --event-qps argument to 0"
|
||||
audit: "grep -A1 event-qps /etc/origin/node/node-config.yaml"
|
||||
tests:
|
||||
bin_op: or
|
||||
test_items:
|
||||
- flag: "event-qps"
|
||||
set: false
|
||||
- flag: "event-qps"
|
||||
compare:
|
||||
op: has
|
||||
value: "0"
|
||||
set: true
|
||||
remediation: |
|
||||
Edit the Openshift node config file /etc/origin/node/node-config.yaml set the event-qps argument to 0 in
|
||||
the kubeletArguments section of.
|
||||
scored: true
|
||||
|
||||
- id: 7.12
|
||||
text: "Verify the OpenShift cert-dir flag for HTTPS traffic"
|
||||
audit: "grep -A1 cert-dir /etc/origin/node/node-config.yaml"
|
||||
tests:
|
||||
test_items:
|
||||
- flag: "/etc/origin/node/certificates"
|
||||
compare:
|
||||
op: has
|
||||
value: "/etc/origin/node/certificates"
|
||||
set: true
|
||||
remediation: |
|
||||
Reset to the OpenShift default values.
|
||||
scored: true
|
||||
|
||||
- id: 7.13
|
||||
text: "Verify the OpenShift default of 0 for the cadvisor-port argument"
|
||||
audit: "grep -A1 cadvisor-port /etc/origin/node/node-config.yaml"
|
||||
tests:
|
||||
bin_op: or
|
||||
test_items:
|
||||
- flag: "cadvisor-port"
|
||||
set: false
|
||||
- flag: "cadvisor-port"
|
||||
compare:
|
||||
op: has
|
||||
value: "0"
|
||||
set: true
|
||||
remediation: |
|
||||
Edit the Openshift node config file /etc/origin/node/node-config.yaml and remove the cadvisor-port flag
|
||||
if it is set in the kubeletArguments section.
|
||||
scored: true
|
||||
|
||||
- id: 7.14
|
||||
text: "Verify that the RotateKubeletClientCertificate argument is set to true"
|
||||
audit: "grep -B1 RotateKubeletClientCertificate=true /etc/origin/node/node-config.yaml"
|
||||
tests:
|
||||
test_items:
|
||||
- flag: "RotateKubeletClientCertificate=true"
|
||||
compare:
|
||||
op: has
|
||||
value: "true"
|
||||
set: true
|
||||
remediation: |
|
||||
Edit the Openshift node config file /etc/origin/node/node-config.yaml and set RotateKubeletClientCertificate to true.
|
||||
scored: true
|
||||
|
||||
- id: 7.15
|
||||
text: "Verify that the RotateKubeletServerCertificate argument is set to true"
|
||||
audit: "grep -B1 RotateKubeletServerCertificate=true /etc/origin/node/node-config.yaml"
|
||||
tests:
|
||||
test_items:
|
||||
- flag: "RotateKubeletServerCertificate=true"
|
||||
compare:
|
||||
op: has
|
||||
value: "true"
|
||||
set: true
|
||||
remediation: |
|
||||
Edit the Openshift node config file /etc/origin/node/node-config.yaml and set RotateKubeletServerCertificate to true.
|
||||
scored: true
|
||||
|
||||
|
||||
- id: 8
|
||||
text: "Configuration Files"
|
||||
checks:
|
||||
- id: 8.1
|
||||
text: "Verify the OpenShift default permissions for the kubelet.conf file"
|
||||
audit: "stat -c %a /etc/origin/node/node.kubeconfig"
|
||||
tests:
|
||||
bin_op: or
|
||||
test_items:
|
||||
- flag: "644"
|
||||
compare:
|
||||
op: eq
|
||||
value: "644"
|
||||
set: true
|
||||
- flag: "640"
|
||||
compare:
|
||||
op: eq
|
||||
value: "640"
|
||||
set: true
|
||||
- flag: "600"
|
||||
compare:
|
||||
op: eq
|
||||
value: "600"
|
||||
set: true
|
||||
- flag: "444"
|
||||
compare:
|
||||
op: eq
|
||||
value: "444"
|
||||
set: true
|
||||
- flag: "440"
|
||||
compare:
|
||||
op: eq
|
||||
value: "440"
|
||||
set: true
|
||||
- flag: "400"
|
||||
compare:
|
||||
op: eq
|
||||
value: "400"
|
||||
set: true
|
||||
- flag: "000"
|
||||
compare:
|
||||
op: eq
|
||||
value: "000"
|
||||
set: true
|
||||
remediation: |
|
||||
Run the below command on each worker node.
|
||||
chmod 644 /etc/origin/node/node.kubeconfig
|
||||
scored: true
|
||||
|
||||
- id: 8.2
|
||||
text: "Verify the kubeconfig file ownership of root:root"
|
||||
audit: "stat -c %U:%G /etc/origin/node/node.kubeconfig"
|
||||
tests:
|
||||
test_items:
|
||||
- flag: "root:root"
|
||||
compare:
|
||||
op: eq
|
||||
value: root:root
|
||||
set: true
|
||||
remediation: |
|
||||
Run the below command on each worker node.
|
||||
chown root:root /etc/origin/node/node.kubeconfig
|
||||
scored: true
|
||||
|
||||
- id: 8.3
|
||||
text: "Verify the kubelet service file permissions of 644"
|
||||
audit: "stat -c %a $nodesvc"
|
||||
tests:
|
||||
bin_op: or
|
||||
test_items:
|
||||
- flag: "644"
|
||||
compare:
|
||||
op: eq
|
||||
value: "644"
|
||||
set: true
|
||||
- flag: "640"
|
||||
compare:
|
||||
op: eq
|
||||
value: "640"
|
||||
set: true
|
||||
- flag: "600"
|
||||
compare:
|
||||
op: eq
|
||||
value: "600"
|
||||
set: true
|
||||
- flag: "444"
|
||||
compare:
|
||||
op: eq
|
||||
value: "444"
|
||||
set: true
|
||||
- flag: "440"
|
||||
compare:
|
||||
op: eq
|
||||
value: "440"
|
||||
set: true
|
||||
- flag: "400"
|
||||
compare:
|
||||
op: eq
|
||||
value: "400"
|
||||
set: true
|
||||
- flag: "000"
|
||||
compare:
|
||||
op: eq
|
||||
value: "000"
|
||||
set: true
|
||||
remediation: |
|
||||
Run the below command on each worker node.
|
||||
chmod 644 $nodesvc
|
||||
scored: true
|
||||
|
||||
- id: 8.4
|
||||
text: "Verify the kubelet service file ownership of root:root"
|
||||
audit: "stat -c %U:%G $nodesvc"
|
||||
tests:
|
||||
test_items:
|
||||
- flag: "root:root"
|
||||
compare:
|
||||
op: eq
|
||||
value: root:root
|
||||
set: true
|
||||
remediation: |
|
||||
Run the below command on each worker node.
|
||||
chown root:root $nodesvc
|
||||
scored: true
|
||||
|
||||
- id: 8.5
|
||||
text: "Verify the OpenShift default permissions for the proxy kubeconfig file"
|
||||
audit: "stat -c %a /etc/origin/node/node.kubeconfig"
|
||||
tests:
|
||||
bin_op: or
|
||||
test_items:
|
||||
- flag: "644"
|
||||
compare:
|
||||
op: eq
|
||||
value: "644"
|
||||
set: true
|
||||
- flag: "640"
|
||||
compare:
|
||||
op: eq
|
||||
value: "640"
|
||||
set: true
|
||||
- flag: "600"
|
||||
compare:
|
||||
op: eq
|
||||
value: "600"
|
||||
set: true
|
||||
- flag: "444"
|
||||
compare:
|
||||
op: eq
|
||||
value: "444"
|
||||
set: true
|
||||
- flag: "440"
|
||||
compare:
|
||||
op: eq
|
||||
value: "440"
|
||||
set: true
|
||||
- flag: "400"
|
||||
compare:
|
||||
op: eq
|
||||
value: "400"
|
||||
set: true
|
||||
- flag: "000"
|
||||
compare:
|
||||
op: eq
|
||||
value: "000"
|
||||
set: true
|
||||
remediation: |
|
||||
Run the below command on each worker node.
|
||||
chmod 644 /etc/origin/node/node.kubeconfig
|
||||
scored: true
|
||||
|
||||
- id: 8.6
|
||||
text: "Verify the proxy kubeconfig file ownership of root:root"
|
||||
audit: "stat -c %U:%G /etc/origin/node/node.kubeconfig"
|
||||
tests:
|
||||
test_items:
|
||||
- flag: "root:root"
|
||||
compare:
|
||||
op: eq
|
||||
value: root:root
|
||||
set: true
|
||||
remediation: |
|
||||
Run the below command on each worker node.
|
||||
chown root:root /etc/origin/node/node.kubeconfig
|
||||
scored: true
|
||||
|
||||
- id: 8.7
|
||||
text: "Verify the OpenShift default permissions for the certificate authorities file."
|
||||
audit: "stat -c %a /etc/origin/node/client-ca.crt"
|
||||
tests:
|
||||
bin_op: or
|
||||
test_items:
|
||||
- flag: "644"
|
||||
compare:
|
||||
op: eq
|
||||
value: "644"
|
||||
set: true
|
||||
- flag: "640"
|
||||
compare:
|
||||
op: eq
|
||||
value: "640"
|
||||
set: true
|
||||
- flag: "600"
|
||||
compare:
|
||||
op: eq
|
||||
value: "600"
|
||||
set: true
|
||||
- flag: "444"
|
||||
compare:
|
||||
op: eq
|
||||
value: "444"
|
||||
set: true
|
||||
- flag: "440"
|
||||
compare:
|
||||
op: eq
|
||||
value: "440"
|
||||
set: true
|
||||
- flag: "400"
|
||||
compare:
|
||||
op: eq
|
||||
value: "400"
|
||||
set: true
|
||||
- flag: "000"
|
||||
compare:
|
||||
op: eq
|
||||
value: "000"
|
||||
set: true
|
||||
remediation: |
|
||||
Run the below command on each worker node.
|
||||
chmod 644 /etc/origin/node/client-ca.crt
|
||||
scored: true
|
||||
|
||||
- id: 8.8
|
||||
text: "Verify the client certificate authorities file ownership of root:root"
|
||||
audit: "stat -c %U:%G /etc/origin/node/client-ca.crt"
|
||||
tests:
|
||||
test_items:
|
||||
- flag: "root:root"
|
||||
compare:
|
||||
op: eq
|
||||
value: root:root
|
||||
set: true
|
||||
remediation: |
|
||||
Run the below command on each worker node.
|
||||
chown root:root /etc/origin/node/client-ca.crt
|
||||
scored: true
|
||||
|
||||
@@ -171,7 +171,6 @@ func (c *Check) run() State {
|
||||
c.State = PASS
|
||||
c.ActualValue = finalOutput.actualResult
|
||||
c.ExpectedResult = finalOutput.ExpectedResult
|
||||
glog.V(3).Infof("Check.ID: %s Command: %q TestResult: %t Score: %q \n", c.ID, lastCommand, finalOutput.testResult, c.State)
|
||||
} else {
|
||||
if c.Scored {
|
||||
c.State = FAIL
|
||||
@@ -180,7 +179,9 @@ func (c *Check) run() State {
|
||||
}
|
||||
}
|
||||
|
||||
if finalOutput == nil {
|
||||
if finalOutput != nil {
|
||||
glog.V(3).Infof("Check.ID: %s Command: %q TestResult: %t State: %q \n", c.ID, lastCommand, finalOutput.testResult, c.State)
|
||||
} else {
|
||||
glog.V(3).Infof("Check.ID: %s Command: %q TestResult: <<EMPTY>> \n", c.ID, lastCommand)
|
||||
}
|
||||
|
||||
@@ -242,8 +243,7 @@ func isShellCommand(s string) bool {
|
||||
|
||||
out, err := cmd.Output()
|
||||
if err != nil {
|
||||
fmt.Fprintf(os.Stderr, "%s\n", err)
|
||||
os.Exit(1)
|
||||
exitWithError(fmt.Errorf("failed to check if command: %q is valid %v", s, err))
|
||||
}
|
||||
|
||||
if strings.Contains(string(out), s) {
|
||||
@@ -331,6 +331,13 @@ func runExecCommands(audit string, commands []*exec.Cmd, out *bytes.Buffer) (Sta
|
||||
i++
|
||||
}
|
||||
|
||||
glog.V(3).Infof("Command %q - Output:\n\n %s\n", audit, out.String())
|
||||
glog.V(3).Infof("Command %q - Output:\n\n %q\n - Error Messages:%q \n", audit, out.String(), errmsgs)
|
||||
return "", errmsgs
|
||||
}
|
||||
|
||||
func exitWithError(err error) {
|
||||
fmt.Fprintf(os.Stderr, "\n%v\n", err)
|
||||
// flush before exit non-zero
|
||||
glog.Flush()
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
@@ -15,6 +15,7 @@
|
||||
package check
|
||||
|
||||
import (
|
||||
"os/exec"
|
||||
"testing"
|
||||
)
|
||||
|
||||
@@ -27,10 +28,27 @@ func TestCheck_Run(t *testing.T) {
|
||||
testCases := []TestCase{
|
||||
{check: Check{Type: MANUAL}, Expected: WARN},
|
||||
{check: Check{Type: "skip"}, Expected: INFO},
|
||||
{check: Check{Type: "", Scored: false}, Expected: WARN}, // Not scored checks with no type should be marked warn
|
||||
{check: Check{Type: "", Scored: true}, Expected: WARN}, // If there are no tests in the check, warn
|
||||
{check: Check{Type: MANUAL, Scored: false}, Expected: WARN},
|
||||
{check: Check{Type: "skip", Scored: false}, Expected: INFO},
|
||||
|
||||
{check: Check{Scored: false}, Expected: WARN}, // Not scored checks with no type, or not scored failing tests are marked warn
|
||||
{
|
||||
check: Check{ // Not scored checks with passing tests are marked pass
|
||||
Scored: false,
|
||||
Audit: ":", Commands: []*exec.Cmd{exec.Command("")},
|
||||
Tests: &tests{TestItems: []*testItem{&testItem{}}},
|
||||
},
|
||||
Expected: PASS,
|
||||
},
|
||||
|
||||
{check: Check{Scored: true}, Expected: WARN}, // If there are no tests in the check, warn
|
||||
{check: Check{Scored: true, Tests: &tests{}}, Expected: FAIL}, // If there are tests that are not passing, fail
|
||||
{
|
||||
check: Check{ // Scored checks with passing tests are marked pass
|
||||
Scored: true,
|
||||
Audit: ":", Commands: []*exec.Cmd{exec.Command("")},
|
||||
Tests: &tests{TestItems: []*testItem{&testItem{}}},
|
||||
},
|
||||
Expected: PASS,
|
||||
},
|
||||
}
|
||||
for _, testCase := range testCases {
|
||||
|
||||
|
||||
@@ -165,7 +165,7 @@ func compareOp(tCompareOp string, flagVal string, tCompareValue string) (string,
|
||||
case "gt", "gte", "lt", "lte":
|
||||
a, b, err := toNumeric(flagVal, tCompareValue)
|
||||
if err != nil {
|
||||
fmt.Fprintf(os.Stderr, "%v\n", err)
|
||||
fmt.Fprintf(os.Stderr, "Not numeric value - flag: %q - compareValue: %q %v\n", flagVal, tCompareValue, err)
|
||||
os.Exit(1)
|
||||
}
|
||||
switch tCompareOp {
|
||||
|
||||
@@ -90,7 +90,7 @@ func runChecks(nodetype check.NodeType, testYamlFile string) {
|
||||
|
||||
// Checks that the executables we need for the section are running.
|
||||
if err != nil {
|
||||
exitWithError(err)
|
||||
exitWithError(fmt.Errorf("failed to get a set of executables needed for tests: %v", err))
|
||||
}
|
||||
|
||||
confmap := getFiles(typeConf, "config")
|
||||
@@ -229,7 +229,7 @@ func loadConfig(nodetype check.NodeType) string {
|
||||
|
||||
benchmarkVersion, err := getBenchmarkVersion(kubeVersion, benchmarkVersion, viper.GetViper())
|
||||
if err != nil {
|
||||
exitWithError(err)
|
||||
exitWithError(fmt.Errorf("failed to get benchMark version: %v", err))
|
||||
}
|
||||
|
||||
path, err := getConfigFilePath(benchmarkVersion, file)
|
||||
@@ -319,6 +319,7 @@ func getBenchmarkVersion(kubeVersion, benchmarkVersion string, v *viper.Viper) (
|
||||
|
||||
// isMaster verify if master components are running on the node.
|
||||
func isMaster() bool {
|
||||
loadConfig(check.MASTER)
|
||||
return isThisNodeRunning(check.MASTER)
|
||||
}
|
||||
|
||||
|
||||
@@ -155,6 +155,20 @@ func TestIsMaster(t *testing.T) {
|
||||
isMaster: false,
|
||||
},
|
||||
}
|
||||
cfgDirOld := cfgDir
|
||||
cfgDir = "../cfg"
|
||||
defer func() {
|
||||
cfgDir = cfgDirOld
|
||||
}()
|
||||
|
||||
execCode := `#!/bin/sh
|
||||
echo "Server Version: v1.13.10"
|
||||
`
|
||||
restore, err := fakeExecutableInPath("kubectl", execCode)
|
||||
if err != nil {
|
||||
t.Fatal("Failed when calling fakeExecutableInPath ", err)
|
||||
}
|
||||
defer restore()
|
||||
|
||||
for _, tc := range testCases {
|
||||
cfgFile = tc.cfgFile
|
||||
@@ -386,6 +400,73 @@ func TestValidTargets(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestIsEtcd(t *testing.T) {
|
||||
testCases := []struct {
|
||||
name string
|
||||
cfgFile string
|
||||
getBinariesFunc func(*viper.Viper, check.NodeType) (map[string]string, error)
|
||||
isEtcd bool
|
||||
}{
|
||||
{
|
||||
name: "valid config, is etcd and all components are running",
|
||||
cfgFile: "../cfg/config.yaml",
|
||||
getBinariesFunc: func(viper *viper.Viper, nt check.NodeType) (strings map[string]string, i error) {
|
||||
return map[string]string{"etcd": "etcd"}, nil
|
||||
},
|
||||
isEtcd: true,
|
||||
},
|
||||
{
|
||||
name: "valid config, is etcd and but not all components are running",
|
||||
cfgFile: "../cfg/config.yaml",
|
||||
getBinariesFunc: func(viper *viper.Viper, nt check.NodeType) (strings map[string]string, i error) {
|
||||
return map[string]string{}, nil
|
||||
},
|
||||
isEtcd: false,
|
||||
},
|
||||
{
|
||||
name: "valid config, is etcd, not all components are running and fails to find all binaries",
|
||||
cfgFile: "../cfg/config.yaml",
|
||||
getBinariesFunc: func(viper *viper.Viper, nt check.NodeType) (strings map[string]string, i error) {
|
||||
return map[string]string{}, errors.New("failed to find binaries")
|
||||
},
|
||||
isEtcd: false,
|
||||
},
|
||||
{
|
||||
name: "valid config, does not include etcd",
|
||||
cfgFile: "../cfg/node_only.yaml",
|
||||
isEtcd: false,
|
||||
},
|
||||
}
|
||||
cfgDirOld := cfgDir
|
||||
cfgDir = "../cfg"
|
||||
defer func() {
|
||||
cfgDir = cfgDirOld
|
||||
}()
|
||||
|
||||
execCode := `#!/bin/sh
|
||||
echo "Server Version: v1.15.03"
|
||||
`
|
||||
restore, err := fakeExecutableInPath("kubectl", execCode)
|
||||
if err != nil {
|
||||
t.Fatal("Failed when calling fakeExecutableInPath ", err)
|
||||
}
|
||||
defer restore()
|
||||
|
||||
for _, tc := range testCases {
|
||||
cfgFile = tc.cfgFile
|
||||
initConfig()
|
||||
|
||||
oldGetBinariesFunc := getBinariesFunc
|
||||
getBinariesFunc = tc.getBinariesFunc
|
||||
defer func() {
|
||||
getBinariesFunc = oldGetBinariesFunc
|
||||
cfgFile = ""
|
||||
}()
|
||||
|
||||
assert.Equal(t, tc.isEtcd, isEtcd(), tc.name)
|
||||
}
|
||||
}
|
||||
|
||||
func loadConfigForTest() (*viper.Viper, error) {
|
||||
viperWithData := viper.New()
|
||||
viperWithData.SetConfigFile(filepath.Join("..", cfgDir, "config.yaml"))
|
||||
@@ -410,11 +491,6 @@ func fakeExecutableInPath(execFile, execCode string) (restoreFn, error) {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
err = os.Chdir(tmp)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if len(execCode) > 0 {
|
||||
ioutil.WriteFile(filepath.Join(tmp, execFile), []byte(execCode), 0700)
|
||||
} else {
|
||||
|
||||
@@ -38,7 +38,7 @@ var (
|
||||
kubeVersion string
|
||||
benchmarkVersion string
|
||||
cfgFile string
|
||||
cfgDir string
|
||||
cfgDir = "./cfg/"
|
||||
jsonFmt bool
|
||||
junitFmt bool
|
||||
pgSQL bool
|
||||
@@ -64,7 +64,7 @@ var RootCmd = &cobra.Command{
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
benchmarkVersion, err := getBenchmarkVersion(kubeVersion, benchmarkVersion, viper.GetViper())
|
||||
if err != nil {
|
||||
exitWithError(err)
|
||||
exitWithError(fmt.Errorf("unable to determine benchmark version: %v", err))
|
||||
}
|
||||
|
||||
if isMaster() {
|
||||
@@ -81,7 +81,7 @@ var RootCmd = &cobra.Command{
|
||||
|
||||
// Etcd is only valid for CIS 1.5 and later,
|
||||
// this a gatekeeper for previous versions.
|
||||
if isEtcd() && validTargets(benchmarkVersion, []string{string(check.ETCD)}) {
|
||||
if validTargets(benchmarkVersion, []string{string(check.ETCD)}) && isEtcd() {
|
||||
glog.V(1).Info("== Running etcd checks ==\n")
|
||||
runChecks(check.ETCD, loadConfig(check.ETCD))
|
||||
}
|
||||
@@ -102,7 +102,6 @@ var RootCmd = &cobra.Command{
|
||||
// Execute adds all child commands to the root command sets flags appropriately.
|
||||
// This is called by main.main(). It only needs to happen once to the rootCmd.
|
||||
func Execute() {
|
||||
goflag.Set("logtostderr", "true")
|
||||
goflag.CommandLine.Parse([]string{})
|
||||
|
||||
if err := RootCmd.Execute(); err != nil {
|
||||
@@ -145,7 +144,7 @@ func init() {
|
||||
`Run all the checks under this comma-delimited list of groups. Example --group="1.1"`,
|
||||
)
|
||||
RootCmd.PersistentFlags().StringVar(&cfgFile, "config", "", "config file (default is ./cfg/config.yaml)")
|
||||
RootCmd.PersistentFlags().StringVarP(&cfgDir, "config-dir", "D", "./cfg/", "config directory")
|
||||
RootCmd.PersistentFlags().StringVarP(&cfgDir, "config-dir", "D", cfgDir, "config directory")
|
||||
RootCmd.PersistentFlags().StringVar(&kubeVersion, "version", "", "Manually specify Kubernetes version, automatically detected if unset")
|
||||
RootCmd.PersistentFlags().StringVar(&benchmarkVersion, "benchmark", "", "Manually specify CIS benchmark version. It would be an error to specify both --version and --benchmark flags")
|
||||
|
||||
|
||||
@@ -29,12 +29,12 @@ var runCmd = &cobra.Command{
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
targets, err := cmd.Flags().GetStringSlice("targets")
|
||||
if err != nil {
|
||||
exitWithError(err)
|
||||
exitWithError(fmt.Errorf("unable to get `targets` from command line :%v", err))
|
||||
}
|
||||
|
||||
benchmarkVersion, err := getBenchmarkVersion(kubeVersion, benchmarkVersion, viper.GetViper())
|
||||
if err != nil {
|
||||
exitWithError(err)
|
||||
exitWithError(fmt.Errorf("unable to get benchmark version. error: %v", err))
|
||||
}
|
||||
|
||||
glog.V(2).Infof("Checking targets %v for %v", targets, benchmarkVersion)
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
---
|
||||
# use this pod with: kubectl run ubuntu -it --pid=host -- /bin/bash
|
||||
# this allows you to debug what is running on the host.
|
||||
apiVersion: v1
|
||||
@@ -7,40 +8,40 @@ metadata:
|
||||
spec:
|
||||
hostPID: true
|
||||
containers:
|
||||
- name: ubuntu
|
||||
image: ubuntu
|
||||
command: [ "/bin/bash", "-c", "--" ]
|
||||
args: [ "while true; do sleep 30; done;" ]
|
||||
volumeMounts:
|
||||
- name: var-lib-kubelet
|
||||
mountPath: /var/lib/kubelet
|
||||
- name: etc-systemd
|
||||
mountPath: /etc/systemd
|
||||
- name: etc-kubernetes
|
||||
mountPath: /etc/kubernetes
|
||||
# /usr/bin is mounted to access kubectl / kubelet, for auto-detecting the Kubernetes version.
|
||||
# You can omit this mount if you specify --version as part of the command.
|
||||
- name: usr-bin
|
||||
mountPath: /usr/bin
|
||||
- name: kind-bin
|
||||
mountPath: /kind/bin
|
||||
resources:
|
||||
limits:
|
||||
memory: "128Mi"
|
||||
cpu: "500m"
|
||||
- name: ubuntu
|
||||
image: ubuntu
|
||||
command: ["/bin/bash", "-c", "--"]
|
||||
args: ["while true; do sleep 30; done;"]
|
||||
volumeMounts:
|
||||
- name: var-lib-kubelet
|
||||
mountPath: /var/lib/kubelet
|
||||
- name: etc-systemd
|
||||
mountPath: /etc/systemd
|
||||
- name: etc-kubernetes
|
||||
mountPath: /etc/kubernetes
|
||||
# /usr/bin is mounted to access kubectl / kubelet, for auto-detecting the Kubernetes version.
|
||||
# You can omit this mount if you specify --version as part of the command.
|
||||
- name: usr-bin
|
||||
mountPath: /usr/bin
|
||||
- name: kind-bin
|
||||
mountPath: /kind/bin
|
||||
resources:
|
||||
limits:
|
||||
memory: "128Mi"
|
||||
cpu: "500m"
|
||||
volumes:
|
||||
- name: var-lib-kubelet
|
||||
hostPath:
|
||||
path: "/var/lib/kubelet"
|
||||
- name: etc-systemd
|
||||
hostPath:
|
||||
path: "/etc/systemd"
|
||||
- name: etc-kubernetes
|
||||
hostPath:
|
||||
path: "/etc/kubernetes"
|
||||
- name: usr-bin
|
||||
hostPath:
|
||||
path: "/usr/bin"
|
||||
- name: kind-bin
|
||||
hostPath:
|
||||
path: "/kind/bin"
|
||||
- name: var-lib-kubelet
|
||||
hostPath:
|
||||
path: "/var/lib/kubelet"
|
||||
- name: etc-systemd
|
||||
hostPath:
|
||||
path: "/etc/systemd"
|
||||
- name: etc-kubernetes
|
||||
hostPath:
|
||||
path: "/etc/kubernetes"
|
||||
- name: usr-bin
|
||||
hostPath:
|
||||
path: "/usr/bin"
|
||||
- name: kind-bin
|
||||
hostPath:
|
||||
path: "/kind/bin"
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
---
|
||||
apiVersion: batch/v1
|
||||
kind: Job
|
||||
metadata:
|
||||
@@ -10,41 +11,41 @@ spec:
|
||||
spec:
|
||||
hostPID: true
|
||||
containers:
|
||||
- name: kube-bench
|
||||
image: aquasec/kube-bench:${VERSION}
|
||||
command: ["kube-bench"]
|
||||
volumeMounts:
|
||||
- name: var-lib-etcd
|
||||
mountPath: /var/lib/etcd
|
||||
- name: var-lib-kubelet
|
||||
mountPath: /var/lib/kubelet
|
||||
- name: etc-systemd
|
||||
mountPath: /etc/systemd
|
||||
- name: etc-kubernetes
|
||||
mountPath: /etc/kubernetes
|
||||
# /usr/bin is mounted to access kubectl / kubelet, for auto-detecting the Kubernetes version.
|
||||
# You can omit this mount if you specify --version as part of the command.
|
||||
- name: usr-bin
|
||||
mountPath: /usr/bin
|
||||
- name: kind-bin
|
||||
mountPath: /kind/bin
|
||||
- name: kube-bench
|
||||
image: aquasec/kube-bench:${VERSION}
|
||||
command: ["kube-bench"]
|
||||
volumeMounts:
|
||||
- name: var-lib-etcd
|
||||
mountPath: /var/lib/etcd
|
||||
- name: var-lib-kubelet
|
||||
mountPath: /var/lib/kubelet
|
||||
- name: etc-systemd
|
||||
mountPath: /etc/systemd
|
||||
- name: etc-kubernetes
|
||||
mountPath: /etc/kubernetes
|
||||
# /usr/bin is mounted to access kubectl / kubelet, for auto-detecting the Kubernetes version.
|
||||
# You can omit this mount if you specify --version as part of the command.
|
||||
- name: usr-bin
|
||||
mountPath: /usr/bin
|
||||
- name: kind-bin
|
||||
mountPath: /kind/bin
|
||||
restartPolicy: Never
|
||||
volumes:
|
||||
- name: var-lib-etcd
|
||||
hostPath:
|
||||
path: "/var/lib/etcd"
|
||||
- name: var-lib-kubelet
|
||||
hostPath:
|
||||
path: "/var/lib/kubelet"
|
||||
- name: etc-systemd
|
||||
hostPath:
|
||||
path: "/etc/systemd"
|
||||
- name: etc-kubernetes
|
||||
hostPath:
|
||||
path: "/etc/kubernetes"
|
||||
- name: usr-bin
|
||||
hostPath:
|
||||
path: "/usr/bin"
|
||||
- name: kind-bin
|
||||
hostPath:
|
||||
path: "/kind/bin"
|
||||
- name: var-lib-etcd
|
||||
hostPath:
|
||||
path: "/var/lib/etcd"
|
||||
- name: var-lib-kubelet
|
||||
hostPath:
|
||||
path: "/var/lib/kubelet"
|
||||
- name: etc-systemd
|
||||
hostPath:
|
||||
path: "/etc/systemd"
|
||||
- name: etc-kubernetes
|
||||
hostPath:
|
||||
path: "/etc/kubernetes"
|
||||
- name: usr-bin
|
||||
hostPath:
|
||||
path: "/usr/bin"
|
||||
- name: kind-bin
|
||||
hostPath:
|
||||
path: "/kind/bin"
|
||||
|
||||
@@ -19,60 +19,38 @@ import (
|
||||
"sigs.k8s.io/kind/pkg/cluster/create"
|
||||
)
|
||||
|
||||
func runWithKind(clusterName, kindCfg, kubebenchYAML, kubebenchImg string, timeout, ticker time.Duration) (string, error) {
|
||||
options := create.WithConfigFile(kindCfg)
|
||||
ctx := cluster.NewContext(clusterName)
|
||||
if err := ctx.Create(options); err != nil {
|
||||
return "", err
|
||||
}
|
||||
defer func() {
|
||||
ctx.Delete()
|
||||
}()
|
||||
|
||||
clientset, err := getClientSet(ctx.KubeConfigPath())
|
||||
func runWithKind(ctx *cluster.Context, clientset *kubernetes.Clientset, jobName, kubebenchYAML, kubebenchImg string, timeout time.Duration) (string, error) {
|
||||
err := deployJob(clientset, kubebenchYAML, kubebenchImg)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
jobYAML, err := ioutil.ReadFile(kubebenchYAML)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
decoder := yaml.NewYAMLOrJSONDecoder(bytes.NewReader(jobYAML), len(jobYAML))
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
job := &batchv1.Job{}
|
||||
if err := decoder.Decode(job); err != nil {
|
||||
return "", err
|
||||
}
|
||||
job.Spec.Template.Spec.Containers[0].Image = kubebenchImg
|
||||
|
||||
if err := loadImageFromDocker(kubebenchImg, ctx); err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
_, err = clientset.BatchV1().Jobs(apiv1.NamespaceDefault).Create(job)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
clientset, err = getClientSet(ctx.KubeConfigPath())
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
p, err := findPodForJob(clientset, "kube-bench", timeout, ticker)
|
||||
p, err := findPodForJob(clientset, jobName, timeout)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
output := getPodLogs(clientset, p)
|
||||
|
||||
err = clientset.BatchV1().Jobs(apiv1.NamespaceDefault).Delete(jobName, nil)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
return output, nil
|
||||
}
|
||||
|
||||
func setupCluster(clusterName, kindCfg string, duration time.Duration) (*cluster.Context, error) {
|
||||
options := create.WithConfigFile(kindCfg)
|
||||
toptions := create.WaitForReady(duration)
|
||||
ctx := cluster.NewContext(clusterName)
|
||||
if err := ctx.Create(options, toptions); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return ctx, nil
|
||||
}
|
||||
|
||||
func getClientSet(configPath string) (*kubernetes.Clientset, error) {
|
||||
config, err := clientcmd.BuildConfigFromFlags("", configPath)
|
||||
if err != nil {
|
||||
@@ -86,16 +64,38 @@ func getClientSet(configPath string) (*kubernetes.Clientset, error) {
|
||||
return clientset, nil
|
||||
}
|
||||
|
||||
func findPodForJob(clientset *kubernetes.Clientset, name string, tout, timer time.Duration) (*apiv1.Pod, error) {
|
||||
timeout := time.After(tout)
|
||||
func deployJob(clientset *kubernetes.Clientset, kubebenchYAML, kubebenchImg string) error {
|
||||
jobYAML, err := ioutil.ReadFile(kubebenchYAML)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
decoder := yaml.NewYAMLOrJSONDecoder(bytes.NewReader(jobYAML), len(jobYAML))
|
||||
job := &batchv1.Job{}
|
||||
if err := decoder.Decode(job); err != nil {
|
||||
return err
|
||||
}
|
||||
job.Spec.Template.Spec.Containers[0].Image = kubebenchImg
|
||||
|
||||
_, err = clientset.BatchV1().Jobs(apiv1.NamespaceDefault).Create(job)
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
func findPodForJob(clientset *kubernetes.Clientset, jobName string, duration time.Duration) (*apiv1.Pod, error) {
|
||||
failedPods := make(map[string]struct{})
|
||||
selector := fmt.Sprintf("job-name=%s", jobName)
|
||||
timeout := time.After(duration)
|
||||
for {
|
||||
time.Sleep(3 * time.Second)
|
||||
podfailed:
|
||||
select {
|
||||
case <-timeout:
|
||||
return nil, fmt.Errorf("podList - time out: no Pod with %s", name)
|
||||
return nil, fmt.Errorf("podList - timed out: no Pod found for Job %s", jobName)
|
||||
default:
|
||||
pods, err := clientset.CoreV1().Pods(apiv1.NamespaceDefault).List(metav1.ListOptions{})
|
||||
pods, err := clientset.CoreV1().Pods(apiv1.NamespaceDefault).List(metav1.ListOptions{
|
||||
LabelSelector: selector,
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -105,7 +105,7 @@ func findPodForJob(clientset *kubernetes.Clientset, name string, tout, timer tim
|
||||
continue
|
||||
}
|
||||
|
||||
if strings.HasPrefix(cp.Name, name) {
|
||||
if strings.HasPrefix(cp.Name, jobName) {
|
||||
fmt.Printf("pod (%s) - %#v\n", cp.Name, cp.Status.Phase)
|
||||
if cp.Status.Phase == apiv1.PodSucceeded {
|
||||
return &cp, nil
|
||||
@@ -117,48 +117,12 @@ func findPodForJob(clientset *kubernetes.Clientset, name string, tout, timer tim
|
||||
break podfailed
|
||||
}
|
||||
|
||||
// Pod still working
|
||||
// Wait and try again...
|
||||
ticker := time.NewTicker(timer)
|
||||
for {
|
||||
fmt.Println("using ticker and an timer...")
|
||||
select {
|
||||
case <-ticker.C:
|
||||
thePod, err := clientset.CoreV1().Pods(apiv1.NamespaceDefault).Get(cp.Name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
fmt.Printf("thePod (%s) - status:%#v \n", thePod.Name, thePod.Status.Phase)
|
||||
if thePod.Status.Phase == apiv1.PodSucceeded {
|
||||
return thePod, nil
|
||||
}
|
||||
|
||||
if thePod.Status.Phase == apiv1.PodFailed {
|
||||
fmt.Printf("thePod (%s) - %s - retrying...\n", thePod.Name, thePod.Status.Phase)
|
||||
failedPods[thePod.Name] = struct{}{}
|
||||
ticker.Stop()
|
||||
break podfailed
|
||||
}
|
||||
|
||||
if thePod.Status.Phase == apiv1.PodPending && strings.Contains(thePod.Status.Reason, "Failed") {
|
||||
fmt.Printf("thePod (%s) - %s - retrying...\n", thePod.Name, thePod.Status.Reason)
|
||||
failedPods[thePod.Name] = struct{}{}
|
||||
ticker.Stop()
|
||||
break podfailed
|
||||
}
|
||||
|
||||
case <-timeout:
|
||||
ticker.Stop()
|
||||
return nil, fmt.Errorf("getPod time out: no Pod with %s", name)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
time.Sleep(1 * time.Second)
|
||||
}
|
||||
|
||||
return nil, fmt.Errorf("no Pod with %s", name)
|
||||
return nil, fmt.Errorf("no Pod found for Job %q", jobName)
|
||||
}
|
||||
|
||||
func getPodLogs(clientset *kubernetes.Clientset, pod *apiv1.Pod) string {
|
||||
|
||||
@@ -3,6 +3,8 @@
|
||||
package integration
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"bytes"
|
||||
"flag"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
@@ -12,59 +14,129 @@ import (
|
||||
)
|
||||
|
||||
var kubebenchImg = flag.String("kubebenchImg", "aquasec/kube-bench:latest", "kube-bench image used as part of this test")
|
||||
var timeout = flag.Duration("timeout", 10*time.Minute, "Test Timeout")
|
||||
|
||||
func TestRunWithKind(t *testing.T) {
|
||||
flag.Parse()
|
||||
fmt.Printf("kube-bench Container Image: %s\n", *kubebenchImg)
|
||||
timeout := time.Duration(10 * time.Minute)
|
||||
ticker := time.Duration(2 * time.Second)
|
||||
|
||||
mustMatch := func(expFname, data string) {
|
||||
d, err := ioutil.ReadFile(expFname)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
expectedData := strings.TrimSpace(string(d))
|
||||
data = strings.TrimSpace(data)
|
||||
if expectedData != data {
|
||||
t.Errorf("expected: %q\n\n Got %q\n\n", expectedData, data)
|
||||
}
|
||||
}
|
||||
|
||||
cases := []struct {
|
||||
TestName string
|
||||
KindCfg string
|
||||
KubebenchYAML string
|
||||
ExpectedFile string
|
||||
ExpectError bool
|
||||
}{
|
||||
{
|
||||
TestName: "job",
|
||||
KindCfg: "./testdata/add-tls-kind-k8s114.yaml",
|
||||
TestName: "kube-bench",
|
||||
KubebenchYAML: "../job.yaml",
|
||||
ExpectedFile: "./testdata/job.data",
|
||||
},
|
||||
{
|
||||
TestName: "job-node",
|
||||
KindCfg: "./testdata/add-tls-kind-k8s114.yaml",
|
||||
TestName: "kube-bench-node",
|
||||
KubebenchYAML: "../job-node.yaml",
|
||||
ExpectedFile: "./testdata/job-node.data",
|
||||
},
|
||||
{
|
||||
TestName: "job-master",
|
||||
KindCfg: "./testdata/add-tls-kind-k8s114.yaml",
|
||||
TestName: "kube-bench-master",
|
||||
KubebenchYAML: "../job-master.yaml",
|
||||
ExpectedFile: "./testdata/job-master.data",
|
||||
},
|
||||
}
|
||||
ctx, err := setupCluster("kube-bench", "./testdata/add-tls-kind-k8s114.yaml", *timeout)
|
||||
if err != nil {
|
||||
t.Fatalf("failed to setup KIND cluster error: %v", err)
|
||||
}
|
||||
defer func() {
|
||||
ctx.Delete()
|
||||
}()
|
||||
|
||||
if err := loadImageFromDocker(*kubebenchImg, ctx); err != nil {
|
||||
t.Fatalf("failed to load kube-bench image from Docker to KIND error: %v", err)
|
||||
}
|
||||
|
||||
clientset, err := getClientSet(ctx.KubeConfigPath())
|
||||
if err != nil {
|
||||
t.Fatalf("failed to connect to Kubernetes cluster error: %v", err)
|
||||
}
|
||||
|
||||
for _, c := range cases {
|
||||
t.Run(c.TestName, func(t *testing.T) {
|
||||
data, err := runWithKind(c.TestName, c.KindCfg, c.KubebenchYAML, *kubebenchImg, timeout, ticker)
|
||||
resultData, err := runWithKind(ctx, clientset, c.TestName, c.KubebenchYAML, *kubebenchImg, *timeout)
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error: %v", err)
|
||||
return
|
||||
t.Errorf("unexpected error: %v", err)
|
||||
}
|
||||
|
||||
c, err := ioutil.ReadFile(c.ExpectedFile)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
expectedData := strings.TrimSpace(string(c))
|
||||
resultData = strings.TrimSpace(resultData)
|
||||
if expectedData != resultData {
|
||||
t.Errorf("expected results\n\nExpected\t(<)\nResult\t(>)\n\n%s\n\n", generateDiff(expectedData, resultData))
|
||||
}
|
||||
mustMatch(c.ExpectedFile, data)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// This is simple "diff" between 2 strings containing multiple lines.
|
||||
// It's not a comprehensive diff between the 2 strings.
|
||||
// It does not inditcate when lines are deleted.
|
||||
func generateDiff(source, target string) string {
|
||||
buf := new(bytes.Buffer)
|
||||
ss := bufio.NewScanner(strings.NewReader(source))
|
||||
ts := bufio.NewScanner(strings.NewReader(target))
|
||||
|
||||
emptySource := false
|
||||
emptyTarget := false
|
||||
|
||||
loop:
|
||||
for ln := 1; ; ln++ {
|
||||
var ll, rl string
|
||||
|
||||
sourceScan := ss.Scan()
|
||||
if sourceScan {
|
||||
ll = ss.Text()
|
||||
}
|
||||
|
||||
targetScan := ts.Scan()
|
||||
if targetScan {
|
||||
rl = ts.Text()
|
||||
}
|
||||
|
||||
switch {
|
||||
case !sourceScan && !targetScan:
|
||||
// no more lines
|
||||
break loop
|
||||
case sourceScan && targetScan:
|
||||
if ll != rl {
|
||||
fmt.Fprintf(buf, "line: %d\n", ln)
|
||||
fmt.Fprintf(buf, "< %s\n", ll)
|
||||
fmt.Fprintf(buf, "> %s\n", rl)
|
||||
}
|
||||
case !targetScan:
|
||||
if !emptyTarget {
|
||||
fmt.Fprintf(buf, "line: %d\n", ln)
|
||||
}
|
||||
fmt.Fprintf(buf, "< %s\n", ll)
|
||||
emptyTarget = true
|
||||
case !sourceScan:
|
||||
if !emptySource {
|
||||
fmt.Fprintf(buf, "line: %d\n", ln)
|
||||
}
|
||||
fmt.Fprintf(buf, "> %s\n", rl)
|
||||
emptySource = true
|
||||
}
|
||||
}
|
||||
|
||||
if emptySource {
|
||||
fmt.Fprintf(buf, "< [[NO MORE DATA]]")
|
||||
}
|
||||
|
||||
if emptyTarget {
|
||||
fmt.Fprintf(buf, "> [[NO MORE DATA]]")
|
||||
}
|
||||
|
||||
return buf.String()
|
||||
}
|
||||
|
||||
22
integration/testdata/add-tls-kind-k8s114.yaml
vendored
22
integration/testdata/add-tls-kind-k8s114.yaml
vendored
@@ -1,19 +1,19 @@
|
||||
---
|
||||
apiVersion: kind.sigs.k8s.io/v1alpha3
|
||||
kind: Cluster
|
||||
networking:
|
||||
apiServerAddress: "0.0.0.0"
|
||||
|
||||
kubeadmConfigPatchesJson6902:
|
||||
- group: kubelet.config.k8s.io
|
||||
version: v1beta1
|
||||
kind: KubeletConfiguration
|
||||
patch: |
|
||||
- op: add
|
||||
path: /tlsCipherSuites
|
||||
value: ["TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256","TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256"]
|
||||
- group: kubelet.config.k8s.io
|
||||
version: v1beta1
|
||||
kind: KubeletConfiguration
|
||||
patch: |
|
||||
- op: add
|
||||
path: /tlsCipherSuites
|
||||
value: ["TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256","TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256"]
|
||||
|
||||
nodes:
|
||||
# the control plane node config
|
||||
- role: control-plane
|
||||
image: "kindest/node:v1.14.6"
|
||||
|
||||
# the control plane node config
|
||||
- role: control-plane
|
||||
image: "kindest/node:v1.14.6"
|
||||
|
||||
22
integration/testdata/job-master.data
vendored
22
integration/testdata/job-master.data
vendored
@@ -29,8 +29,8 @@
|
||||
[PASS] 1.1.27 Ensure that the admission control plugin ServiceAccount is set(Scored)
|
||||
[FAIL] 1.1.28 Ensure that the --tls-cert-file and --tls-private-key-file arguments are set as appropriate (Scored)
|
||||
[FAIL] 1.1.29 Ensure that the --client-ca-file argument is set as appropriate (Scored)
|
||||
[WARN] 1.1.30 Ensure that the API Server only makes use of Strong Cryptographic Ciphers (Not Scored)
|
||||
[FAIL] 1.1.31 Ensure that the --etcd-cafile argument is set as appropriate (Scored)
|
||||
[FAIL] 1.1.30 Ensure that the --etcd-cafile argument is set as appropriate (Scored)
|
||||
[WARN] 1.1.31 Ensure that the API Server only makes use of Strong Cryptographic Ciphers (Not Scored)
|
||||
[FAIL] 1.1.32 Ensure that the --authorization-mode argument is set to Node (Scored)
|
||||
[FAIL] 1.1.33 Ensure that the admission control plugin NodeRestriction is set (Scored)
|
||||
[FAIL] 1.1.34 Ensure that the --encryption-provider-config argument is set as appropriate (Scored)
|
||||
@@ -92,10 +92,10 @@
|
||||
[WARN] 1.6.8 Place compensating controls in the form of PSP and RBAC for privileged containers usage (Not Scored)
|
||||
[INFO] 1.7 PodSecurityPolicies
|
||||
[WARN] 1.7.1 Do not admit privileged containers (Not Scored)
|
||||
[WARN] 1.7.2 Do not admit containers wishing to share the host process ID namespace (Not Scored)
|
||||
[WARN] 1.7.3 Do not admit containers wishing to share the host IPC namespace (Not Scored)
|
||||
[WARN] 1.7.4 Do not admit containers wishing to share the host network namespace (Not Scored)
|
||||
[WARN] 1.7.5 Do not admit containers with allowPrivilegeEscalation (Not Scored)
|
||||
[WARN] 1.7.2 Do not admit containers wishing to share the host process ID namespace (Scored)
|
||||
[WARN] 1.7.3 Do not admit containers wishing to share the host IPC namespace (Scored)
|
||||
[WARN] 1.7.4 Do not admit containers wishing to share the host network namespace (Scored)
|
||||
[WARN] 1.7.5 Do not admit containers with allowPrivilegeEscalation (Scored)
|
||||
[WARN] 1.7.6 Do not admit root containers (Not Scored)
|
||||
[WARN] 1.7.7 Do not admit containers with dangerous capabilities (Not Scored)
|
||||
|
||||
@@ -194,16 +194,16 @@ Then, edit the API server pod specification file /etc/kubernetes/manifests/kube-
|
||||
on the master node and set the client certificate authority file.
|
||||
--client-ca-file=<path/to/client-ca-file>
|
||||
|
||||
1.1.30 Edit the API server pod specification file /etc/kubernetes/manifests/kube-apiserver.yaml
|
||||
on the master node and set the below parameter.
|
||||
--tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256
|
||||
|
||||
1.1.31 Follow the Kubernetes documentation and set up the TLS connection between the
|
||||
1.1.30 Follow the Kubernetes documentation and set up the TLS connection between the
|
||||
apiserver and etcd. Then, edit the API server pod specification file
|
||||
/etc/kubernetes/manifests/kube-apiserver.yaml on the master node and set the etcd
|
||||
certificate authority file parameter.
|
||||
--etcd-cafile=<path/to/ca-file>
|
||||
|
||||
1.1.31 Edit the API server pod specification file /etc/kubernetes/manifests/kube-apiserver.yaml
|
||||
on the master node and set the below parameter.
|
||||
--tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256
|
||||
|
||||
1.1.32 Edit the API server pod specification file /etc/kubernetes/manifests/kube-apiserver.yaml
|
||||
on the master node and set the --authorization-mode parameter to a
|
||||
value that includes Node.
|
||||
|
||||
22
integration/testdata/job.data
vendored
22
integration/testdata/job.data
vendored
@@ -29,8 +29,8 @@
|
||||
[PASS] 1.1.27 Ensure that the admission control plugin ServiceAccount is set(Scored)
|
||||
[FAIL] 1.1.28 Ensure that the --tls-cert-file and --tls-private-key-file arguments are set as appropriate (Scored)
|
||||
[FAIL] 1.1.29 Ensure that the --client-ca-file argument is set as appropriate (Scored)
|
||||
[WARN] 1.1.30 Ensure that the API Server only makes use of Strong Cryptographic Ciphers (Not Scored)
|
||||
[FAIL] 1.1.31 Ensure that the --etcd-cafile argument is set as appropriate (Scored)
|
||||
[FAIL] 1.1.30 Ensure that the --etcd-cafile argument is set as appropriate (Scored)
|
||||
[WARN] 1.1.31 Ensure that the API Server only makes use of Strong Cryptographic Ciphers (Not Scored)
|
||||
[FAIL] 1.1.32 Ensure that the --authorization-mode argument is set to Node (Scored)
|
||||
[FAIL] 1.1.33 Ensure that the admission control plugin NodeRestriction is set (Scored)
|
||||
[FAIL] 1.1.34 Ensure that the --encryption-provider-config argument is set as appropriate (Scored)
|
||||
@@ -92,10 +92,10 @@
|
||||
[WARN] 1.6.8 Place compensating controls in the form of PSP and RBAC for privileged containers usage (Not Scored)
|
||||
[INFO] 1.7 PodSecurityPolicies
|
||||
[WARN] 1.7.1 Do not admit privileged containers (Not Scored)
|
||||
[WARN] 1.7.2 Do not admit containers wishing to share the host process ID namespace (Not Scored)
|
||||
[WARN] 1.7.3 Do not admit containers wishing to share the host IPC namespace (Not Scored)
|
||||
[WARN] 1.7.4 Do not admit containers wishing to share the host network namespace (Not Scored)
|
||||
[WARN] 1.7.5 Do not admit containers with allowPrivilegeEscalation (Not Scored)
|
||||
[WARN] 1.7.2 Do not admit containers wishing to share the host process ID namespace (Scored)
|
||||
[WARN] 1.7.3 Do not admit containers wishing to share the host IPC namespace (Scored)
|
||||
[WARN] 1.7.4 Do not admit containers wishing to share the host network namespace (Scored)
|
||||
[WARN] 1.7.5 Do not admit containers with allowPrivilegeEscalation (Scored)
|
||||
[WARN] 1.7.6 Do not admit root containers (Not Scored)
|
||||
[WARN] 1.7.7 Do not admit containers with dangerous capabilities (Not Scored)
|
||||
|
||||
@@ -194,16 +194,16 @@ Then, edit the API server pod specification file /etc/kubernetes/manifests/kube-
|
||||
on the master node and set the client certificate authority file.
|
||||
--client-ca-file=<path/to/client-ca-file>
|
||||
|
||||
1.1.30 Edit the API server pod specification file /etc/kubernetes/manifests/kube-apiserver.yaml
|
||||
on the master node and set the below parameter.
|
||||
--tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256
|
||||
|
||||
1.1.31 Follow the Kubernetes documentation and set up the TLS connection between the
|
||||
1.1.30 Follow the Kubernetes documentation and set up the TLS connection between the
|
||||
apiserver and etcd. Then, edit the API server pod specification file
|
||||
/etc/kubernetes/manifests/kube-apiserver.yaml on the master node and set the etcd
|
||||
certificate authority file parameter.
|
||||
--etcd-cafile=<path/to/ca-file>
|
||||
|
||||
1.1.31 Edit the API server pod specification file /etc/kubernetes/manifests/kube-apiserver.yaml
|
||||
on the master node and set the below parameter.
|
||||
--tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256
|
||||
|
||||
1.1.32 Edit the API server pod specification file /etc/kubernetes/manifests/kube-apiserver.yaml
|
||||
on the master node and set the --authorization-mode parameter to a
|
||||
value that includes Node.
|
||||
|
||||
42
job-eks.yaml
42
job-eks.yaml
@@ -1,3 +1,4 @@
|
||||
---
|
||||
apiVersion: batch/v1
|
||||
kind: Job
|
||||
metadata:
|
||||
@@ -7,26 +8,25 @@ spec:
|
||||
spec:
|
||||
hostPID: true
|
||||
containers:
|
||||
- name: kube-bench
|
||||
# Push the image to your ECR and then refer to it here
|
||||
image: <ID.dkr.ecr.region.amazonaws.com/aquasec/kube-bench:ref>
|
||||
command: ["kube-bench", "--version", "1.11"]
|
||||
volumeMounts:
|
||||
- name: var-lib-kubelet
|
||||
mountPath: /var/lib/kubelet
|
||||
- name: etc-systemd
|
||||
mountPath: /etc/systemd
|
||||
- name: etc-kubernetes
|
||||
mountPath: /etc/kubernetes
|
||||
- name: kube-bench
|
||||
# Push the image to your ECR and then refer to it here
|
||||
image: <ID.dkr.ecr.region.amazonaws.com/aquasec/kube-bench:ref>
|
||||
command: ["kube-bench", "--version", "1.11"]
|
||||
volumeMounts:
|
||||
- name: var-lib-kubelet
|
||||
mountPath: /var/lib/kubelet
|
||||
- name: etc-systemd
|
||||
mountPath: /etc/systemd
|
||||
- name: etc-kubernetes
|
||||
mountPath: /etc/kubernetes
|
||||
restartPolicy: Never
|
||||
volumes:
|
||||
- name: var-lib-kubelet
|
||||
hostPath:
|
||||
path: "/var/lib/kubelet"
|
||||
- name: etc-systemd
|
||||
hostPath:
|
||||
path: "/etc/systemd"
|
||||
- name: etc-kubernetes
|
||||
hostPath:
|
||||
path: "/etc/kubernetes"
|
||||
|
||||
- name: var-lib-kubelet
|
||||
hostPath:
|
||||
path: "/var/lib/kubelet"
|
||||
- name: etc-systemd
|
||||
hostPath:
|
||||
path: "/etc/systemd"
|
||||
- name: etc-kubernetes
|
||||
hostPath:
|
||||
path: "/etc/kubernetes"
|
||||
|
||||
45
job-iks.yaml
45
job-iks.yaml
@@ -1,3 +1,4 @@
|
||||
---
|
||||
apiVersion: batch/v1
|
||||
kind: Job
|
||||
metadata:
|
||||
@@ -7,27 +8,27 @@ spec:
|
||||
spec:
|
||||
hostPID: true
|
||||
containers:
|
||||
- name: kube-bench
|
||||
image: aquasec/kube-bench:latest
|
||||
command: ["kube-bench", "--version", "1.13", "node"]
|
||||
volumeMounts:
|
||||
- name: var-lib-kubelet
|
||||
mountPath: /var/lib/kubelet
|
||||
- name: etc-systemd
|
||||
mountPath: /etc/systemd
|
||||
- name: etc-kubernetes
|
||||
mountPath: /etc/kubernetes
|
||||
- name: kube-bench
|
||||
image: aquasec/kube-bench:latest
|
||||
command: ["kube-bench", "--version", "1.13", "node"]
|
||||
volumeMounts:
|
||||
- name: var-lib-kubelet
|
||||
mountPath: /var/lib/kubelet
|
||||
- name: etc-systemd
|
||||
mountPath: /etc/systemd
|
||||
- name: etc-kubernetes
|
||||
mountPath: /etc/kubernetes
|
||||
restartPolicy: Never
|
||||
volumes:
|
||||
- name: var-lib-kubelet
|
||||
hostPath:
|
||||
path: "/var/lib/kubelet"
|
||||
- name: etc-systemd
|
||||
hostPath:
|
||||
path: "/lib/systemd"
|
||||
- name: etc-kubernetes
|
||||
hostPath:
|
||||
path: "/etc/kubernetes"
|
||||
- name: usr-bin
|
||||
hostPath:
|
||||
path: "/usr/bin"
|
||||
- name: var-lib-kubelet
|
||||
hostPath:
|
||||
path: "/var/lib/kubelet"
|
||||
- name: etc-systemd
|
||||
hostPath:
|
||||
path: "/lib/systemd"
|
||||
- name: etc-kubernetes
|
||||
hostPath:
|
||||
path: "/etc/kubernetes"
|
||||
- name: usr-bin
|
||||
hostPath:
|
||||
path: "/usr/bin"
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
---
|
||||
apiVersion: batch/v1
|
||||
kind: Job
|
||||
metadata:
|
||||
@@ -6,33 +7,33 @@ spec:
|
||||
template:
|
||||
spec:
|
||||
hostPID: true
|
||||
nodeSelector:
|
||||
node-role.kubernetes.io/master: ""
|
||||
nodeSelector:
|
||||
node-role.kubernetes.io/master: ""
|
||||
tolerations:
|
||||
- key: node-role.kubernetes.io/master
|
||||
operator: Exists
|
||||
effect: NoSchedule
|
||||
- key: node-role.kubernetes.io/master
|
||||
operator: Exists
|
||||
effect: NoSchedule
|
||||
containers:
|
||||
- name: kube-bench
|
||||
image: aquasec/kube-bench:latest
|
||||
command: ["kube-bench","master"]
|
||||
volumeMounts:
|
||||
- name: var-lib-etcd
|
||||
mountPath: /var/lib/etcd
|
||||
- name: etc-kubernetes
|
||||
mountPath: /etc/kubernetes
|
||||
# /usr/bin is mounted to access kubectl / kubelet, for auto-detecting the Kubernetes version.
|
||||
# You can omit this mount if you specify --version as part of the command.
|
||||
- name: usr-bin
|
||||
mountPath: /usr/bin
|
||||
- name: kube-bench
|
||||
image: aquasec/kube-bench:latest
|
||||
command: ["kube-bench", "master"]
|
||||
volumeMounts:
|
||||
- name: var-lib-etcd
|
||||
mountPath: /var/lib/etcd
|
||||
- name: etc-kubernetes
|
||||
mountPath: /etc/kubernetes
|
||||
# /usr/bin is mounted to access kubectl / kubelet, for auto-detecting the Kubernetes version.
|
||||
# You can omit this mount if you specify --version as part of the command.
|
||||
- name: usr-bin
|
||||
mountPath: /usr/bin
|
||||
restartPolicy: Never
|
||||
volumes:
|
||||
- name: var-lib-etcd
|
||||
hostPath:
|
||||
path: "/var/lib/etcd"
|
||||
- name: etc-kubernetes
|
||||
hostPath:
|
||||
path: "/etc/kubernetes"
|
||||
- name: usr-bin
|
||||
hostPath:
|
||||
path: "/usr/bin"
|
||||
- name: var-lib-etcd
|
||||
hostPath:
|
||||
path: "/var/lib/etcd"
|
||||
- name: etc-kubernetes
|
||||
hostPath:
|
||||
path: "/etc/kubernetes"
|
||||
- name: usr-bin
|
||||
hostPath:
|
||||
path: "/usr/bin"
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
---
|
||||
apiVersion: batch/v1
|
||||
kind: Job
|
||||
metadata:
|
||||
@@ -7,31 +8,31 @@ spec:
|
||||
spec:
|
||||
hostPID: true
|
||||
containers:
|
||||
- name: kube-bench
|
||||
image: aquasec/kube-bench:latest
|
||||
command: ["kube-bench","node"]
|
||||
volumeMounts:
|
||||
- name: var-lib-kubelet
|
||||
mountPath: /var/lib/kubelet
|
||||
- name: etc-systemd
|
||||
mountPath: /etc/systemd
|
||||
- name: etc-kubernetes
|
||||
mountPath: /etc/kubernetes
|
||||
# /usr/bin is mounted to access kubectl / kubelet, for auto-detecting the Kubernetes version.
|
||||
# You can omit this mount if you specify --version as part of the command.
|
||||
- name: usr-bin
|
||||
mountPath: /usr/bin
|
||||
- name: kube-bench
|
||||
image: aquasec/kube-bench:latest
|
||||
command: ["kube-bench", "node"]
|
||||
volumeMounts:
|
||||
- name: var-lib-kubelet
|
||||
mountPath: /var/lib/kubelet
|
||||
- name: etc-systemd
|
||||
mountPath: /etc/systemd
|
||||
- name: etc-kubernetes
|
||||
mountPath: /etc/kubernetes
|
||||
# /usr/bin is mounted to access kubectl / kubelet, for auto-detecting the Kubernetes version.
|
||||
# You can omit this mount if you specify --version as part of the command.
|
||||
- name: usr-bin
|
||||
mountPath: /usr/bin
|
||||
restartPolicy: Never
|
||||
volumes:
|
||||
- name: var-lib-kubelet
|
||||
hostPath:
|
||||
path: "/var/lib/kubelet"
|
||||
- name: etc-systemd
|
||||
hostPath:
|
||||
path: "/etc/systemd"
|
||||
- name: etc-kubernetes
|
||||
hostPath:
|
||||
path: "/etc/kubernetes"
|
||||
- name: usr-bin
|
||||
hostPath:
|
||||
path: "/usr/bin"
|
||||
- name: var-lib-kubelet
|
||||
hostPath:
|
||||
path: "/var/lib/kubelet"
|
||||
- name: etc-systemd
|
||||
hostPath:
|
||||
path: "/etc/systemd"
|
||||
- name: etc-kubernetes
|
||||
hostPath:
|
||||
path: "/etc/kubernetes"
|
||||
- name: usr-bin
|
||||
hostPath:
|
||||
path: "/usr/bin"
|
||||
|
||||
63
job.yaml
63
job.yaml
@@ -1,3 +1,4 @@
|
||||
---
|
||||
apiVersion: batch/v1
|
||||
kind: Job
|
||||
metadata:
|
||||
@@ -10,36 +11,36 @@ spec:
|
||||
spec:
|
||||
hostPID: true
|
||||
containers:
|
||||
- name: kube-bench
|
||||
image: aquasec/kube-bench:latest
|
||||
command: ["kube-bench"]
|
||||
volumeMounts:
|
||||
- name: var-lib-etcd
|
||||
mountPath: /var/lib/etcd
|
||||
- name: var-lib-kubelet
|
||||
mountPath: /var/lib/kubelet
|
||||
- name: etc-systemd
|
||||
mountPath: /etc/systemd
|
||||
- name: etc-kubernetes
|
||||
mountPath: /etc/kubernetes
|
||||
# /usr/bin is mounted to access kubectl / kubelet, for auto-detecting the Kubernetes version.
|
||||
# You can omit this mount if you specify --version as part of the command.
|
||||
- name: usr-bin
|
||||
mountPath: /usr/bin
|
||||
- name: kube-bench
|
||||
image: aquasec/kube-bench:latest
|
||||
command: ["kube-bench"]
|
||||
volumeMounts:
|
||||
- name: var-lib-etcd
|
||||
mountPath: /var/lib/etcd
|
||||
- name: var-lib-kubelet
|
||||
mountPath: /var/lib/kubelet
|
||||
- name: etc-systemd
|
||||
mountPath: /etc/systemd
|
||||
- name: etc-kubernetes
|
||||
mountPath: /etc/kubernetes
|
||||
# /usr/bin is mounted to access kubectl / kubelet, for auto-detecting the Kubernetes version.
|
||||
# You can omit this mount if you specify --version as part of the command.
|
||||
- name: usr-bin
|
||||
mountPath: /usr/bin
|
||||
restartPolicy: Never
|
||||
volumes:
|
||||
- name: var-lib-etcd
|
||||
hostPath:
|
||||
path: "/var/lib/etcd"
|
||||
- name: var-lib-kubelet
|
||||
hostPath:
|
||||
path: "/var/lib/kubelet"
|
||||
- name: etc-systemd
|
||||
hostPath:
|
||||
path: "/etc/systemd"
|
||||
- name: etc-kubernetes
|
||||
hostPath:
|
||||
path: "/etc/kubernetes"
|
||||
- name: usr-bin
|
||||
hostPath:
|
||||
path: "/usr/bin"
|
||||
- name: var-lib-etcd
|
||||
hostPath:
|
||||
path: "/var/lib/etcd"
|
||||
- name: var-lib-kubelet
|
||||
hostPath:
|
||||
path: "/var/lib/kubelet"
|
||||
- name: etc-systemd
|
||||
hostPath:
|
||||
path: "/etc/systemd"
|
||||
- name: etc-kubernetes
|
||||
hostPath:
|
||||
path: "/etc/kubernetes"
|
||||
- name: usr-bin
|
||||
hostPath:
|
||||
path: "/usr/bin"
|
||||
|
||||
Reference in New Issue
Block a user