mirror of
https://github.com/aquasecurity/kube-bench.git
synced 2026-02-19 20:40:17 +00:00
Compare commits
16 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
277ec9c823 | ||
|
|
b1369832bc | ||
|
|
934b4aef96 | ||
|
|
e85de9e8af | ||
|
|
ded5aff482 | ||
|
|
b3a115963b | ||
|
|
e5c05a97f7 | ||
|
|
ba5ec8d4be | ||
|
|
d56afd4104 | ||
|
|
8894b1dc4f | ||
|
|
ff59938f94 | ||
|
|
cc43fcbb7e | ||
|
|
2f4f55a363 | ||
|
|
e9076233dd | ||
|
|
b1e41d345f | ||
|
|
ccc2b6c9ae |
38
README.md
38
README.md
@@ -5,22 +5,22 @@
|
||||
|
||||
<img src="images/kube-bench.png" width="200" alt="kube-bench logo">
|
||||
|
||||
kube-bench is a Go application that checks whether Kubernetes is deployed securely by running the checks documented in the CIS Kubernetes Benchmark.
|
||||
kube-bench is a Go application that checks whether Kubernetes is deployed securely by running the checks documented in the [CIS Kubernetes Benchmark](https://www.cisecurity.org/benchmark/kubernetes/).
|
||||
|
||||
Tests are configured with YAML files, making this tool easy to update as test specifications evolve.
|
||||
Tests are configured with YAML files, making this tool easy to update as test specifications evolve.
|
||||
|
||||

|
||||
|
||||
## CIS Kubernetes Benchmark support
|
||||
|
||||
kube-bench supports the tests for multiple versions of Kubernetes (1.6, 1.7 and 1.8) as defined in the CIS Benchmarks 1.0.0, 1.1.0 and 1.2.0 respectively. It will determine the test set to run based on the Kubernetes version running on the machine.
|
||||
kube-bench supports the tests for multiple versions of Kubernetes (1.6, 1.7, 1.8, and 1.11) as defined in the CIS Benchmarks 1.0.0, 1.1.0, 1.2.0, and 1.30 respectively. It will determine the test set to run based on the Kubernetes version running on the machine.
|
||||
|
||||
## Installation
|
||||
|
||||
You can choose to
|
||||
You can choose to
|
||||
* run kube-bench from inside a container (sharing PID namespace with the host)
|
||||
* run a container that installs kube-bench on the host, and then run kube-bench directly on the host
|
||||
* install the latest binaries from the [Releases page](https://github.com/aquasecurity/kube-bench/releases),
|
||||
* install the latest binaries from the [Releases page](https://github.com/aquasecurity/kube-bench/releases),
|
||||
* compile it from source.
|
||||
|
||||
### Running inside a container
|
||||
@@ -28,26 +28,28 @@ You can choose to
|
||||
You can avoid installing kube-bench on the host by running it inside a container using the host PID namespace.
|
||||
|
||||
```
|
||||
docker run --pid=host aquasec/kube-bench:latest <master|node>
|
||||
docker run --pid=host -t aquasec/kube-bench:latest <master|node>
|
||||
```
|
||||
|
||||
You can even use your own configs by mounting them over the default ones in `/opt/kube-bench/cfg/`
|
||||
|
||||
```
|
||||
docker run --pid=host -v path/to/my-config.yaml:/opt/kube-bench/cfg/config.yaml aquasec/kube-bench:latest <master|node>
|
||||
docker run --pid=host -t -v path/to/my-config.yaml:/opt/kube-bench/cfg/config.yaml aquasec/kube-bench:latest <master|node>
|
||||
```
|
||||
|
||||
> Note: the tests require either the kubelet or kubectl binary in the path in order to know the Kubernetes version. You can pass `-v $(which kubectl):/usr/bin/kubectl` to the above invocations to resolve this.
|
||||
|
||||
### Running in a kubernetes cluster
|
||||
Run the master check
|
||||
|
||||
```
|
||||
kubectl run --rm -i -t kube-bench-master --image=aquasec/kube-bench:latest --restart=Never --overrides="{ \"apiVersion\": \"v1\", \"spec\": { \"hostPID\": true, \"nodeSelector\": { \"kubernetes.io/role\": \"master\" }, \"tolerations\": [ { \"key\": \"node-role.kubernetes.io/master\", \"operator\": \"Exists\", \"effect\": \"NoSchedule\" } ] } }" -- master --version 1.8
|
||||
kubectl run --rm -i -t kube-bench-master --image=aquasec/kube-bench:latest --restart=Never --overrides="{ \"apiVersion\": \"v1\", \"spec\": { \"hostPID\": true, \"nodeSelector\": { \"kubernetes.io/role\": \"master\" }, \"tolerations\": [ { \"key\": \"node-role.kubernetes.io/master\", \"operator\": \"Exists\", \"effect\": \"NoSchedule\" } ] } }" -- master --version 1.11
|
||||
```
|
||||
|
||||
Run the node check
|
||||
|
||||
```
|
||||
kubectl run --rm -i -t kube-bench-node --image=aquasec/kube-bench:latest --restart=Never --overrides="{ \"apiVersion\": \"v1\", \"spec\": { \"hostPID\": true } }" -- node --version 1.8
|
||||
kubectl run --rm -i -t kube-bench-node --image=aquasec/kube-bench:latest --restart=Never --overrides="{ \"apiVersion\": \"v1\", \"spec\": { \"hostPID\": true } }" -- node --version 1.11
|
||||
```
|
||||
|
||||
### Installing from a container
|
||||
@@ -57,7 +59,7 @@ This command copies the kube-bench binary and configuration files to your host f
|
||||
docker run --rm -v `pwd`:/host aquasec/kube-bench:latest install
|
||||
```
|
||||
|
||||
You can then run `./kube-bench <master|node>`.
|
||||
You can then run `./kube-bench <master|node>`.
|
||||
|
||||
### Installing from sources
|
||||
|
||||
@@ -79,13 +81,13 @@ go build -o kube-bench .
|
||||
```
|
||||
|
||||
## Configuration
|
||||
Kubernetes config and binary file locations and names can vary from installation to installation, so these are configurable in the `cfg/config.yaml` file.
|
||||
Kubernetes config and binary file locations and names can vary from installation to installation, so these are configurable in the `cfg/config.yaml` file.
|
||||
|
||||
For each type of node (*master*, *node* or *federated*) there is a list of components, and for each component there is a set of binaries (*bins*) and config files (*confs*) that kube-bench will look for (in the order they are listed). If your installation uses a different binary name or config file location for a Kubernetes component, you can add it to `cfg/config.yaml`.
|
||||
For each type of node (*master*, *node* or *federated*) there is a list of components, and for each component there is a set of binaries (*bins*) and config files (*confs*) that kube-bench will look for (in the order they are listed). If your installation uses a different binary name or config file location for a Kubernetes component, you can add it to `cfg/config.yaml`.
|
||||
|
||||
* **bins** - If there is a *bins* list for a component, at least one of these binaries must be running. The tests will consider the parameters for the first binary in the list found to be running.
|
||||
* **podspecs** - From version 1.2.0 of the benchmark (tests for Kubernetes 1.8), the remediation instructions were updated to assume that the configuration for several kubernetes components is defined in a pod YAML file, and podspec settings define where to look for that configuration.
|
||||
* **confs** - If one of the listed config files is found, this will be considered for the test. Tests can continue even if no config file is found. If no file is found at any of the listed locations, and a *defaultconf* location is given for the component, the test will give remediation advice using the *defaultconf* location.
|
||||
* **bins** - If there is a *bins* list for a component, at least one of these binaries must be running. The tests will consider the parameters for the first binary in the list found to be running.
|
||||
* **podspecs** - From version 1.2.0 of the benchmark (tests for Kubernetes 1.8), the remediation instructions were updated to assume that the configuration for several kubernetes components is defined in a pod YAML file, and podspec settings define where to look for that configuration.
|
||||
* **confs** - If one of the listed config files is found, this will be considered for the test. Tests can continue even if no config file is found. If no file is found at any of the listed locations, and a *defaultconf* location is given for the component, the test will give remediation advice using the *defaultconf* location.
|
||||
* **unitfiles** - From version 1.2.0 of the benchmark (tests for Kubernetes 1.8), the remediation instructions were updated to assume that kubelet configuration is defined in a service file, and this setting defines where to look for that configuration.
|
||||
|
||||
## Test config YAML representation
|
||||
@@ -145,7 +147,7 @@ These operations are:
|
||||
- `has`: tests if the flag value contains the compared value.
|
||||
- `nothave`: tests if the flag value does not contain the compared value.
|
||||
|
||||
# Roadmap
|
||||
Going forward we plan to release updates to kube-bench to add support for new releases of the Benchmark, which in turn we can anticipate being made for each new Kubernetes release.
|
||||
# Roadmap
|
||||
Going forward we plan to release updates to kube-bench to add support for new releases of the Benchmark, which in turn we can anticipate being made for each new Kubernetes release.
|
||||
|
||||
We welcome PRs and issue reports.
|
||||
We welcome PRs and issue reports.
|
||||
|
||||
47
cfg/1.11/config.yaml
Normal file
47
cfg/1.11/config.yaml
Normal file
@@ -0,0 +1,47 @@
|
||||
---
|
||||
## Controls Files.
|
||||
# These are YAML files that hold all the details for running checks.
|
||||
#
|
||||
## Uncomment to use different control file paths.
|
||||
# masterControls: ./cfg/master.yaml
|
||||
# nodeControls: ./cfg/node.yaml
|
||||
# federatedControls: ./cfg/federated.yaml
|
||||
|
||||
master:
|
||||
apiserver:
|
||||
confs:
|
||||
- /etc/kubernetes/manifests/kube-apiserver.yaml
|
||||
- /etc/kubernetes/manifests/kube-apiserver.manifest
|
||||
defaultconf: /etc/kubernetes/manifests/kube-apiserver.yaml
|
||||
|
||||
scheduler:
|
||||
confs:
|
||||
- /etc/kubernetes/manifests/kube-scheduler.yaml
|
||||
- /etc/kubernetes/manifests/kube-scheduler.manifest
|
||||
defaultconf: /etc/kubernetes/manifests/kube-scheduler.yaml
|
||||
|
||||
controllermanager:
|
||||
confs:
|
||||
- /etc/kubernetes/manifests/kube-controller-manager.yaml
|
||||
- /etc/kubernetes/manifests/kube-controller-manager.manifest
|
||||
defaultconf: /etc/kubernetes/manifests/kube-controller-manager.yaml
|
||||
|
||||
etcd:
|
||||
confs:
|
||||
- /etc/kubernetes/manifests/etcd.yaml
|
||||
- /etc/kubernetes/manifests/etcd.manifest
|
||||
defaultconf: /etc/kubernetes/manifests/etcd.yaml
|
||||
|
||||
node:
|
||||
kubelet:
|
||||
confs:
|
||||
- /etc/systemd/system/kubelet.service.d/10-kubeadm.conf
|
||||
- /etc/kubernetes/kubelet.conf
|
||||
defaultconf: /etc/systemd/system/kubelet.service.d/10-kubeadm.conf
|
||||
|
||||
proxy:
|
||||
confs:
|
||||
- /etc/kubernetes/addons/kube-proxy-daemonset.yaml
|
||||
defaultconf: /etc/kubernetes/addons/kube-proxy-daemonset.yaml
|
||||
|
||||
|
||||
1442
cfg/1.11/master.yaml
Normal file
1442
cfg/1.11/master.yaml
Normal file
File diff suppressed because it is too large
Load Diff
497
cfg/1.11/node.yaml
Normal file
497
cfg/1.11/node.yaml
Normal file
@@ -0,0 +1,497 @@
|
||||
---
|
||||
controls:
|
||||
version: 1.11
|
||||
id: 2
|
||||
text: "Worker Node Security Configuration"
|
||||
type: "node"
|
||||
groups:
|
||||
- id: 2.1
|
||||
text: "Kubelet"
|
||||
checks:
|
||||
- id: 2.1.1
|
||||
text: "Ensure that the --allow-privileged argument is set to false (Scored)"
|
||||
audit: "ps -ef | grep $kubeletbin | grep -v grep"
|
||||
tests:
|
||||
test_items:
|
||||
- flag: "--allow-privileged"
|
||||
compare:
|
||||
op: eq
|
||||
value: false
|
||||
set: true
|
||||
remediation: |
|
||||
Edit the kubelet service file $kubeletconf
|
||||
on each worker node and set the below parameter in KUBELET_SYSTEM_PODS_ARGS variable.
|
||||
--allow-privileged=false
|
||||
Based on your system, restart the kubelet service. For example:
|
||||
systemctl daemon-reload
|
||||
systemctl restart kubelet.service
|
||||
scored: true
|
||||
|
||||
- id: 2.1.2
|
||||
text: "Ensure that the --anonymous-auth argument is set to false (Scored)"
|
||||
audit: "ps -ef | grep $kubeletbin | grep -v grep"
|
||||
tests:
|
||||
test_items:
|
||||
- flag: "--anonymous-auth"
|
||||
compare:
|
||||
op: eq
|
||||
value: false
|
||||
set: true
|
||||
remediation: |
|
||||
If using a Kubelet config file, edit the file to set authentication: anonymous: enabled to
|
||||
false .
|
||||
If using executable arguments, edit the kubelet service file
|
||||
$kubeletconf on each worker node and
|
||||
set the below parameter in KUBELET_SYSTEM_PODS_ARGS variable.
|
||||
--anonymous-auth=false
|
||||
Based on your system, restart the kubelet service. For example:
|
||||
systemctl daemon-reload
|
||||
systemctl restart kubelet.service
|
||||
scored: true
|
||||
|
||||
- id: 2.1.3
|
||||
text: "Ensure that the --authorization-mode argument is not set to AlwaysAllow (Scored)"
|
||||
audit: "ps -ef | grep $kubeletbin | grep -v grep"
|
||||
tests:
|
||||
test_items:
|
||||
- flag: "--authorization-mode"
|
||||
compare:
|
||||
op: nothave
|
||||
value: "AlwaysAllow"
|
||||
set: true
|
||||
remediation: |
|
||||
If using a Kubelet config file, edit the file to set authorization: mode to Webhook.
|
||||
If using executable arguments, edit the kubelet service file
|
||||
$kubeletconf on each worker node and
|
||||
set the below parameter in KUBELET_AUTHZ_ARGS variable.
|
||||
--authorization-mode=Webhook
|
||||
Based on your system, restart the kubelet service. For example:
|
||||
systemctl daemon-reload
|
||||
systemctl restart kubelet.service
|
||||
scored: true
|
||||
|
||||
- id: 2.1.4
|
||||
text: "Ensure that the --client-ca-file argument is set as appropriate (Scored)"
|
||||
audit: "ps -ef | grep $kubeletbin | grep -v grep"
|
||||
tests:
|
||||
test_items:
|
||||
- flag: "--client-ca-file"
|
||||
set: true
|
||||
remediation: |
|
||||
If using a Kubelet config file, edit the file to set authentication: x509: clientCAFile to
|
||||
the location of the client CA file.
|
||||
If using command line arguments, edit the kubelet service file
|
||||
$kubeletconf on each worker node and
|
||||
set the below parameter in KUBELET_AUTHZ_ARGS variable.
|
||||
--client-ca-file=<path/to/client-ca-file>
|
||||
Based on your system, restart the kubelet service. For example:
|
||||
systemctl daemon-reload
|
||||
systemctl restart kubelet.service
|
||||
scored: true
|
||||
|
||||
- id: 2.1.5
|
||||
text: "Ensure that the --read-only-port argument is set to 0 (Scored)"
|
||||
audit: "ps -ef | grep $kubeletbin | grep -v grep"
|
||||
tests:
|
||||
test_items:
|
||||
- flag: "--read-only-port"
|
||||
compare:
|
||||
op: eq
|
||||
value: 0
|
||||
set: true
|
||||
remediation: |
|
||||
If using a Kubelet config file, edit the file to set readOnlyPort to 0 .
|
||||
If using command line arguments, edit the kubelet service file
|
||||
$kubeletconf on each worker node and
|
||||
set the below parameter in KUBELET_SYSTEM_PODS_ARGS variable.
|
||||
--read-only-port=0
|
||||
Based on your system, restart the kubelet service. For example:
|
||||
systemctl daemon-reload
|
||||
systemctl restart kubelet.service
|
||||
scored: true
|
||||
|
||||
- id: 2.1.6
|
||||
text: "Ensure that the --streaming-connection-idle-timeout argument is not set to 0 (Scored)"
|
||||
audit: "ps -ef | grep $kubeletbin | grep -v grep"
|
||||
tests:
|
||||
test_items:
|
||||
- flag: "--streaming-connection-idle-timeout"
|
||||
compare:
|
||||
op: noteq
|
||||
value: 0
|
||||
set: true
|
||||
remediation: |
|
||||
If using a Kubelet config file, edit the file to set streamingConnectionIdleTimeout to a
|
||||
value other than 0.
|
||||
If using command line arguments, edit the kubelet service file
|
||||
$kubeletconf on each worker node and
|
||||
set the below parameter in KUBELET_SYSTEM_PODS_ARGS variable.
|
||||
--streaming-connection-idle-timeout=5m
|
||||
Based on your system, restart the kubelet service. For example:
|
||||
systemctl daemon-reload
|
||||
systemctl restart kubelet.service
|
||||
scored: true
|
||||
|
||||
- id: 2.1.7
|
||||
text: "Ensure that the --protect-kernel-defaults argument is set to true (Scored)"
|
||||
audit: "ps -ef | grep $kubeletbin | grep -v grep"
|
||||
tests:
|
||||
test_items:
|
||||
- flag: "--protect-kernel-defaults"
|
||||
compare:
|
||||
op: eq
|
||||
value: true
|
||||
set: true
|
||||
remediation: |
|
||||
If using a Kubelet config file, edit the file to set protectKernelDefaults: true .
|
||||
If using command line arguments, edit the kubelet service file
|
||||
$kubeletconf on each worker node and
|
||||
set the below parameter in KUBELET_SYSTEM_PODS_ARGS variable.
|
||||
--protect-kernel-defaults=true
|
||||
Based on your system, restart the kubelet service. For example:
|
||||
systemctl daemon-reload
|
||||
systemctl restart kubelet.service
|
||||
scored: true
|
||||
|
||||
- id: 2.1.8
|
||||
text: "Ensure that the --make-iptables-util-chains argument is set to true (Scored)"
|
||||
audit: "ps -ef | grep $kubeletbin | grep -v grep"
|
||||
tests:
|
||||
bin_op: or
|
||||
test_items:
|
||||
- flag: "--make-iptables-util-chains"
|
||||
compare:
|
||||
op: eq
|
||||
value: true
|
||||
set: true
|
||||
remediation: |
|
||||
If using a Kubelet config file, edit the file to set makeIPTablesUtilChains: true .
|
||||
If using command line arguments, edit the kubelet service file
|
||||
$kubeletconf on each worker node and
|
||||
remove the --make-iptables-util-chains argument from the
|
||||
KUBELET_SYSTEM_PODS_ARGS variable.
|
||||
Based on your system, restart the kubelet service. For example:
|
||||
systemctl daemon-reload
|
||||
systemctl restart kubelet.service
|
||||
scored: true
|
||||
|
||||
- id: 2.1.9
|
||||
text: "Ensure that the --hostname-override argument is not set (Scored)"
|
||||
audit: "ps -ef | grep $kubeletbin | grep -v grep"
|
||||
tests:
|
||||
test_items:
|
||||
- flag: "--hostname-override"
|
||||
set: false
|
||||
remediation: |
|
||||
Edit the kubelet service file $kubeletconf
|
||||
on each worker node and remove the --hostname-override argument from the
|
||||
KUBELET_SYSTEM_PODS_ARGS variable.
|
||||
Based on your system, restart the kubelet service. For example:
|
||||
systemctl daemon-reload
|
||||
systemctl restart kubelet.service
|
||||
scored: true
|
||||
|
||||
- id: 2.1.10
|
||||
text: "Ensure that the --event-qps argument is set to 0 (Scored)"
|
||||
audit: "ps -ef | grep $kubeletbin | grep -v grep"
|
||||
tests:
|
||||
test_items:
|
||||
- flag: "--event-qps"
|
||||
compare:
|
||||
op: eq
|
||||
value: 0
|
||||
set: true
|
||||
remediation: |
|
||||
If using a Kubelet config file, edit the file to set eventRecordQPS: 0 .
|
||||
If using command line arguments, edit the kubelet service file
|
||||
$kubeletconf on each worker node and
|
||||
set the below parameter in KUBELET_SYSTEM_PODS_ARGS variable.
|
||||
--event-qps=0
|
||||
Based on your system, restart the kubelet service. For example:
|
||||
systemctl daemon-reload
|
||||
systemctl restart kubelet.service
|
||||
scored: true
|
||||
|
||||
- id: 2.1.11
|
||||
text: "Ensure that the --tls-cert-file and --tls-private-key-file arguments are set as appropriate (Scored)"
|
||||
audit: "ps -ef | grep $kubeletbin | grep -v grep"
|
||||
tests:
|
||||
bin_op: and
|
||||
test_items:
|
||||
- flag: "--tls-cert-file"
|
||||
set: true
|
||||
- flag: "--tls-private-key-file"
|
||||
set: true
|
||||
remediation: |
|
||||
If using a Kubelet config file, edit the file to set tlsCertFile to the location of the certificate
|
||||
file to use to identify this Kubelet, and tlsPrivateKeyFile to the location of the
|
||||
corresponding private key file.
|
||||
If using command line arguments, edit the kubelet service file
|
||||
$kubeletconf on each worker node and
|
||||
set the below parameters in KUBELET_CERTIFICATE_ARGS variable.
|
||||
--tls-cert-file=<path/to/tls-certificate-file>
|
||||
file=<path/to/tls-key-file>
|
||||
Based on your system, restart the kubelet service. For example:
|
||||
systemctl daemon-reload
|
||||
systemctl restart kubelet.service
|
||||
scored: true
|
||||
|
||||
- id: 2.1.12
|
||||
text: "Ensure that the --cadvisor-port argument is set to 0 (Scored)"
|
||||
audit: "ps -ef | grep $kubeletbin | grep -v grep"
|
||||
tests:
|
||||
bin_op: or
|
||||
test_items:
|
||||
- flag: "--cadvisor-port"
|
||||
compare:
|
||||
op: eq
|
||||
value: 0
|
||||
set: true
|
||||
- flag: "--cadvisor-port"
|
||||
set: false
|
||||
remediation: |
|
||||
Edit the kubelet service file $kubeletconf
|
||||
on each worker node and set the below parameter in KUBELET_CADVISOR_ARGS variable.
|
||||
--cadvisor-port=0
|
||||
Based on your system, restart the kubelet service. For example:
|
||||
systemctl daemon-reload
|
||||
systemctl restart kubelet.service
|
||||
scored: true
|
||||
|
||||
- id: 2.1.13
|
||||
text: "Ensure that the --rotate-certificates argument is not set to false (Scored)"
|
||||
audit: "ps -ef | grep $kubeletbin | grep -v grep"
|
||||
tests:
|
||||
test_items:
|
||||
- flag: "--rotate-certificates"
|
||||
compare:
|
||||
op: eq
|
||||
value: true
|
||||
set: true
|
||||
remediation: |
|
||||
If using a Kubelet config file, edit the file to add the line rotateCertificates: true.
|
||||
If using command line arguments, edit the kubelet service file $kubeletconf
|
||||
on each worker node and add --rotate-certificates=true argument to the KUBELET_CERTIFICATE_ARGS variable.
|
||||
Based on your system, restart the kubelet service. For example:
|
||||
systemctl daemon-reload
|
||||
systemctl restart kubelet.service
|
||||
scored: true
|
||||
|
||||
- id: 2.1.14
|
||||
text: "Ensure that the RotateKubeletServerCertificate argument is set to true (Scored)"
|
||||
audit: "ps -ef | grep $kubeletbin | grep -v grep"
|
||||
tests:
|
||||
test_items:
|
||||
- flag: "RotateKubeletServerCertificate"
|
||||
compare:
|
||||
op: eq
|
||||
value: true
|
||||
set: true
|
||||
remediation: |
|
||||
Edit the kubelet service file $kubeletconf
|
||||
on each worker node and set the below parameter in KUBELET_CERTIFICATE_ARGS variable.
|
||||
--feature-gates=RotateKubeletServerCertificate=true
|
||||
Based on your system, restart the kubelet service. For example:
|
||||
systemctl daemon-reload
|
||||
systemctl restart kubelet.service
|
||||
scored: true
|
||||
|
||||
- id: 2.1.15
|
||||
text: "Ensure that the Kubelet only makes use of Strong Cryptographic Ciphers (Not Scored)"
|
||||
audit: "ps -ef | grep $kubeletbin | grep -v grep"
|
||||
tests:
|
||||
test_items:
|
||||
- flag: "--tls-cipher-suites"
|
||||
compare:
|
||||
op: eq
|
||||
value: "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256"
|
||||
set: true
|
||||
remediation: |
|
||||
If using a Kubelet config file, edit the file to set TLSCipherSuites: to TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 ,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384 ,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384 ,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256
|
||||
If using executable arguments, edit the kubelet service file $kubeletconf on each worker node and set the below parameter.
|
||||
--tls-cipher- suites=TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM _SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM _SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM _SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256
|
||||
scored: false
|
||||
|
||||
- id: 2.2
|
||||
text: "Configuration Files"
|
||||
checks:
|
||||
- id: 2.2.1
|
||||
text: "Ensure that the kubelet.conf file permissions are set to 644 or
|
||||
more restrictive (Scored)"
|
||||
audit: "/bin/sh -c 'if test -e $kubeletconf; then stat -c %a $kubeletconf; fi'"
|
||||
tests:
|
||||
bin_op: or
|
||||
test_items:
|
||||
- flag: "644"
|
||||
compare:
|
||||
op: eq
|
||||
value: "644"
|
||||
set: true
|
||||
- flag: "640"
|
||||
compare:
|
||||
op: eq
|
||||
value: "640"
|
||||
set: true
|
||||
- flag: "600"
|
||||
compare:
|
||||
op: eq
|
||||
value: "600"
|
||||
set: true
|
||||
remediation: |
|
||||
Run the below command (based on the file location on your system) on the each worker
|
||||
node. For example,
|
||||
chmod 644 $kubeletconf
|
||||
scored: true
|
||||
|
||||
- id: 2.2.2
|
||||
text: "Ensure that the kubelet.conf file ownership is set to root:root (Scored)"
|
||||
audit: "/bin/sh -c 'if test -e $kubeletconf; then stat -c %U:%G $kubeletconf; fi'"
|
||||
tests:
|
||||
test_items:
|
||||
- flag: "root:root"
|
||||
compare:
|
||||
op: eq
|
||||
value: root:root
|
||||
set: true
|
||||
remediation: |
|
||||
Run the below command (based on the file location on your system) on the each worker
|
||||
node. For example,
|
||||
chown root:root $kubeletconf
|
||||
scored: true
|
||||
|
||||
- id: 2.2.3
|
||||
text: "Ensure that the kubelet service file permissions are set to 644 or
|
||||
more restrictive (Scored)"
|
||||
audit: "/bin/sh -c 'if test -e $kubeletconf; then stat -c %a $kubeletconf; fi'"
|
||||
tests:
|
||||
bin_op: or
|
||||
test_items:
|
||||
- flag: "644"
|
||||
compare:
|
||||
op: eq
|
||||
value: 644
|
||||
set: true
|
||||
- flag: "640"
|
||||
compare:
|
||||
op: eq
|
||||
value: "640"
|
||||
set: true
|
||||
- flag: "600"
|
||||
compare:
|
||||
op: eq
|
||||
value: "600"
|
||||
set: true
|
||||
remediation: |
|
||||
Run the below command (based on the file location on your system) on the each worker
|
||||
node. For example,
|
||||
chmod 755 $kubeletconf
|
||||
scored: true
|
||||
|
||||
- id: 2.2.4
|
||||
text: "Ensure that the kubelet service file ownership is set to root:root (Scored)"
|
||||
audit: "/bin/sh -c 'if test -e $kubeletconf; then stat -c %U:%G $kubeletconf; fi'"
|
||||
tests:
|
||||
test_items:
|
||||
- flag: "root:root"
|
||||
set: true
|
||||
remediation: |
|
||||
Run the below command (based on the file location on your system) on the each worker
|
||||
node. For example,
|
||||
chown root:root $kubeletconf
|
||||
scored: true
|
||||
|
||||
- id: 2.2.5
|
||||
text: "Ensure that the proxy kubeconfig file permissions are set to 644 or more restrictive (Scored)"
|
||||
audit: "/bin/sh -c 'if test -e $proxyconf; then stat -c %a $proxyconf; fi'"
|
||||
tests:
|
||||
bin_op: or
|
||||
test_items:
|
||||
- flag: "644"
|
||||
compare:
|
||||
op: eq
|
||||
value: "644"
|
||||
set: true
|
||||
- flag: "640"
|
||||
compare:
|
||||
op: eq
|
||||
value: "640"
|
||||
set: true
|
||||
- flag: "600"
|
||||
compare:
|
||||
op: eq
|
||||
value: "600"
|
||||
set: true
|
||||
remediation: |
|
||||
Run the below command (based on the file location on your system) on the each worker
|
||||
node. For example,
|
||||
chmod 644 $proxyconf
|
||||
scored: true
|
||||
|
||||
- id: 2.2.6
|
||||
text: "Ensure that the proxy kubeconfig file ownership is set to root:root (Scored)"
|
||||
audit: "/bin/sh -c 'if test -e $proxyconf; then stat -c %U:%G $proxyconf; fi'"
|
||||
tests:
|
||||
test_items:
|
||||
- flag: "root:root"
|
||||
set: true
|
||||
remediation: |
|
||||
Run the below command (based on the file location on your system) on the each worker
|
||||
node. For example,
|
||||
chown root:root $proxyconf
|
||||
scored: true
|
||||
|
||||
- id: 2.2.7
|
||||
text: "Ensure that the certificate authorities file permissions are set to
|
||||
644 or more restrictive (Scored)"
|
||||
type: manual
|
||||
remediation: |
|
||||
Run the following command to modify the file permissions of the --client-ca-file
|
||||
chmod 644 <filename>
|
||||
scored: true
|
||||
|
||||
- id: 2.2.8
|
||||
text: "Ensure that the client certificate authorities file ownership is set to root:root (Scored)"
|
||||
audit: "/bin/sh -c 'if test -e $ca-file; then stat -c %U:%G $ca-file; fi'"
|
||||
type: manual
|
||||
remediation: |
|
||||
Run the following command to modify the ownership of the --client-ca-file .
|
||||
chown root:root <filename>
|
||||
scored: true
|
||||
|
||||
- id: 2.2.9
|
||||
text: "Ensure that the kubelet configuration file ownership is set to root:root (Scored)"
|
||||
audit: "/bin/sh -c 'if test -e $/var/lib/kubelet/config.yaml; then stat -c %U:%G $/var/lib/kubelet/config.yaml; fi'"
|
||||
tests:
|
||||
test_items:
|
||||
- flag: "root:root"
|
||||
set: true
|
||||
remediation: |
|
||||
Run the following command (using the config file location identied in the Audit step)
|
||||
chown root:root /etc/kubernetes/kubelet.conf
|
||||
scored: true
|
||||
|
||||
- id: 2.2.10
|
||||
text: "Ensure that the kubelet configuration file has permissions set to 644 or more restrictive (Scored)"
|
||||
audit: "/bin/sh -c 'if test -e $/var/lib/kubelet/config.yaml; then stat -c %a $/var/lib/kubelet/config.yaml; fi'"
|
||||
tests:
|
||||
bin_op: or
|
||||
test_items:
|
||||
- flag: "644"
|
||||
compare:
|
||||
op: eq
|
||||
value: "644"
|
||||
set: true
|
||||
- flag: "640"
|
||||
compare:
|
||||
op: eq
|
||||
value: "640"
|
||||
set: true
|
||||
- flag: "600"
|
||||
compare:
|
||||
op: eq
|
||||
value: "600"
|
||||
set: true
|
||||
remediation: |
|
||||
Run the following command (using the config file location identied in the Audit step)
|
||||
chmod 644 /var/lib/kubelet/config.yaml
|
||||
scored: true
|
||||
@@ -60,16 +60,17 @@ func handleError(err error, context string) (errmsg string) {
|
||||
// Check contains information about a recommendation in the
|
||||
// CIS Kubernetes 1.6+ document.
|
||||
type Check struct {
|
||||
ID string `yaml:"id" json:"test_number"`
|
||||
Text string `json:"test_desc"`
|
||||
ID string `yaml:"id" json:"test_number"`
|
||||
Text string `json:"test_desc"`
|
||||
Audit string `json:"omit"`
|
||||
Type string `json:"type"`
|
||||
Commands []*exec.Cmd `json:"omit"`
|
||||
Tests *tests `json:"omit"`
|
||||
Set bool `json:"omit"`
|
||||
Remediation string `json:"-"`
|
||||
TestInfo []string `json:"test_info"`
|
||||
State `json:"status"`
|
||||
Remediation string `json:"-"`
|
||||
TestInfo []string `json:"test_info"`
|
||||
State `json:"status"`
|
||||
ActualValue string `json:"actual_value"`
|
||||
}
|
||||
|
||||
// Run executes the audit commands specified in a check and outputs
|
||||
@@ -157,15 +158,25 @@ func (c *Check) Run() {
|
||||
i++
|
||||
}
|
||||
|
||||
if errmsgs != "" {
|
||||
glog.V(2).Info(errmsgs)
|
||||
finalOutput := c.Tests.execute(out.String())
|
||||
if finalOutput != nil {
|
||||
c.ActualValue = finalOutput.actualResult
|
||||
if finalOutput.testResult {
|
||||
c.State = PASS
|
||||
} else {
|
||||
c.State = FAIL
|
||||
}
|
||||
} else {
|
||||
errmsgs += handleError(
|
||||
fmt.Errorf("final output is nil"),
|
||||
fmt.Sprintf("failed to run: %s\n",
|
||||
c.Audit,
|
||||
),
|
||||
)
|
||||
}
|
||||
|
||||
res := c.Tests.execute(out.String())
|
||||
if res {
|
||||
c.State = PASS
|
||||
} else {
|
||||
c.State = FAIL
|
||||
if errmsgs != "" {
|
||||
glog.V(2).Info(errmsgs)
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -23,9 +23,9 @@ import (
|
||||
|
||||
// Controls holds all controls to check for master nodes.
|
||||
type Controls struct {
|
||||
ID string `yaml:"id" json:"id"`
|
||||
Version string `json:"version"`
|
||||
Text string `json:"text"`
|
||||
ID string `yaml:"id" json:"id"`
|
||||
Version string `json:"version"`
|
||||
Text string `json:"text"`
|
||||
Type NodeType `json:"node_type"`
|
||||
Groups []*Group `json:"tests"`
|
||||
Summary
|
||||
@@ -43,9 +43,9 @@ type Group struct {
|
||||
|
||||
// Summary is a summary of the results of control checks run.
|
||||
type Summary struct {
|
||||
Pass int `json:"total_pass"`
|
||||
Fail int `json:"total_fail"`
|
||||
Warn int `json:"total_warn"`
|
||||
Pass int `json:"total_pass"`
|
||||
Fail int `json:"total_fail"`
|
||||
Warn int `json:"total_warn"`
|
||||
}
|
||||
|
||||
// NewControls instantiates a new master Controls object.
|
||||
|
||||
@@ -17,7 +17,7 @@ groups:
|
||||
- id: 1
|
||||
text: "flag is not set"
|
||||
tests:
|
||||
test_item:
|
||||
test_items:
|
||||
- flag: "--basic-auth"
|
||||
set: false
|
||||
|
||||
|
||||
@@ -49,8 +49,13 @@ type compare struct {
|
||||
Value string
|
||||
}
|
||||
|
||||
func (t *testItem) execute(s string) (result bool) {
|
||||
result = false
|
||||
type testOutput struct {
|
||||
testResult bool
|
||||
actualResult string
|
||||
}
|
||||
|
||||
func (t *testItem) execute(s string) *testOutput {
|
||||
result := &testOutput{}
|
||||
match := strings.Contains(s, t.Flag)
|
||||
|
||||
if t.Set {
|
||||
@@ -78,57 +83,57 @@ func (t *testItem) execute(s string) (result bool) {
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
result.actualResult = strings.ToLower(flagVal)
|
||||
switch t.Compare.Op {
|
||||
case "eq":
|
||||
value := strings.ToLower(flagVal)
|
||||
// Do case insensitive comparaison for booleans ...
|
||||
if value == "false" || value == "true" {
|
||||
result = value == t.Compare.Value
|
||||
result.testResult = value == t.Compare.Value
|
||||
} else {
|
||||
result = flagVal == t.Compare.Value
|
||||
result.testResult = flagVal == t.Compare.Value
|
||||
}
|
||||
|
||||
case "noteq":
|
||||
value := strings.ToLower(flagVal)
|
||||
// Do case insensitive comparaison for booleans ...
|
||||
if value == "false" || value == "true" {
|
||||
result = !(value == t.Compare.Value)
|
||||
result.testResult = !(value == t.Compare.Value)
|
||||
} else {
|
||||
result = !(flagVal == t.Compare.Value)
|
||||
result.testResult = !(flagVal == t.Compare.Value)
|
||||
}
|
||||
|
||||
case "gt":
|
||||
a, b := toNumeric(flagVal, t.Compare.Value)
|
||||
result = a > b
|
||||
result.testResult = a > b
|
||||
|
||||
case "gte":
|
||||
a, b := toNumeric(flagVal, t.Compare.Value)
|
||||
result = a >= b
|
||||
result.testResult = a >= b
|
||||
|
||||
case "lt":
|
||||
a, b := toNumeric(flagVal, t.Compare.Value)
|
||||
result = a < b
|
||||
result.testResult = a < b
|
||||
|
||||
case "lte":
|
||||
a, b := toNumeric(flagVal, t.Compare.Value)
|
||||
result = a <= b
|
||||
result.testResult = a <= b
|
||||
|
||||
case "has":
|
||||
result = strings.Contains(flagVal, t.Compare.Value)
|
||||
result.testResult = strings.Contains(flagVal, t.Compare.Value)
|
||||
|
||||
case "nothave":
|
||||
result = !strings.Contains(flagVal, t.Compare.Value)
|
||||
result.testResult = !strings.Contains(flagVal, t.Compare.Value)
|
||||
}
|
||||
} else {
|
||||
result = isset
|
||||
result.testResult = isset
|
||||
}
|
||||
|
||||
} else {
|
||||
notset := !match
|
||||
result = notset
|
||||
result.testResult = notset
|
||||
}
|
||||
|
||||
return
|
||||
return result
|
||||
}
|
||||
|
||||
type tests struct {
|
||||
@@ -136,13 +141,19 @@ type tests struct {
|
||||
BinOp binOp `yaml:"bin_op"`
|
||||
}
|
||||
|
||||
func (ts *tests) execute(s string) (result bool) {
|
||||
res := make([]bool, len(ts.TestItems))
|
||||
func (ts *tests) execute(s string) *testOutput {
|
||||
finalOutput := &testOutput{}
|
||||
|
||||
for i, t := range ts.TestItems {
|
||||
res[i] = t.execute(s)
|
||||
res := make([]testOutput, len(ts.TestItems))
|
||||
if len(res) == 0 {
|
||||
return finalOutput
|
||||
}
|
||||
|
||||
for i, t := range ts.TestItems {
|
||||
res[i] = *(t.execute(s))
|
||||
}
|
||||
|
||||
var result bool
|
||||
// If no binary operation is specified, default to AND
|
||||
switch ts.BinOp {
|
||||
default:
|
||||
@@ -151,16 +162,19 @@ func (ts *tests) execute(s string) (result bool) {
|
||||
case and, "":
|
||||
result = true
|
||||
for i := range res {
|
||||
result = result && res[i]
|
||||
result = result && res[i].testResult
|
||||
}
|
||||
case or:
|
||||
result = false
|
||||
for i := range res {
|
||||
result = result || res[i]
|
||||
result = result || res[i].testResult
|
||||
}
|
||||
}
|
||||
|
||||
return
|
||||
finalOutput.testResult = result
|
||||
finalOutput.actualResult = res[0].actualResult
|
||||
|
||||
return finalOutput
|
||||
}
|
||||
|
||||
func toNumeric(a, b string) (c, d int) {
|
||||
|
||||
@@ -113,7 +113,7 @@ func TestTestExecute(t *testing.T) {
|
||||
}
|
||||
|
||||
for _, c := range cases {
|
||||
res := c.Tests.execute(c.str)
|
||||
res := c.Tests.execute(c.str).testResult
|
||||
if !res {
|
||||
t.Errorf("%s, expected:%v, got:%v\n", c.Text, true, res)
|
||||
}
|
||||
|
||||
@@ -44,7 +44,11 @@ func runChecks(nodetype check.NodeType) {
|
||||
file = federatedFile
|
||||
}
|
||||
|
||||
path, err := getConfigFilePath(kubeVersion, getKubeVersion(), file)
|
||||
runningVersion, err := getKubeVersion()
|
||||
if err != nil && kubeVersion == "" {
|
||||
exitWithError(fmt.Errorf("Version check failed: %s\nAlternatively, you can specify the version with --version", err))
|
||||
}
|
||||
path, err := getConfigFilePath(kubeVersion, runningVersion, file)
|
||||
if err != nil {
|
||||
exitWithError(fmt.Errorf("can't find %s controls file in %s: %v", nodetype, cfgDir, err))
|
||||
}
|
||||
|
||||
@@ -46,7 +46,7 @@ var (
|
||||
var RootCmd = &cobra.Command{
|
||||
Use: os.Args[0],
|
||||
Short: "Run CIS Benchmarks checks against a Kubernetes deployment",
|
||||
Long: `This tool runs the CIS Kubernetes Benchmark (http://www.cisecurity.org/benchmark/kubernetes/)`,
|
||||
Long: `This tool runs the CIS Kubernetes Benchmark (https://www.cisecurity.org/benchmark/kubernetes/)`,
|
||||
}
|
||||
|
||||
// Execute adds all child commands to the root command sets flags appropriately.
|
||||
|
||||
10
cmd/util.go
10
cmd/util.go
@@ -129,6 +129,8 @@ func getConfigFilePath(specifiedVersion string, runningVersion string, filename
|
||||
fileVersion = runningVersion
|
||||
}
|
||||
|
||||
glog.V(2).Info(fmt.Sprintf("Looking for config for version %s", fileVersion))
|
||||
|
||||
for {
|
||||
path = filepath.Join(cfgDir, fileVersion)
|
||||
file := filepath.Join(path, string(filename))
|
||||
@@ -265,19 +267,19 @@ func multiWordReplace(s string, subname string, sub string) string {
|
||||
return strings.Replace(s, subname, sub, -1)
|
||||
}
|
||||
|
||||
func getKubeVersion() string {
|
||||
func getKubeVersion() (string, error) {
|
||||
// These executables might not be on the user's path.
|
||||
_, err := exec.LookPath("kubectl")
|
||||
|
||||
if err != nil {
|
||||
_, err = exec.LookPath("kubelet")
|
||||
if err != nil {
|
||||
exitWithError(fmt.Errorf("Version check failed: need kubectl or kubelet binaries to get kubernetes version.\nAlternately, you can specify the version with --version"))
|
||||
return "", fmt.Errorf("need kubectl or kubelet binaries to get kubernetes version")
|
||||
}
|
||||
return getKubeVersionFromKubelet()
|
||||
return getKubeVersionFromKubelet(), nil
|
||||
}
|
||||
|
||||
return getKubeVersionFromKubectl()
|
||||
return getKubeVersionFromKubectl(), nil
|
||||
}
|
||||
|
||||
func getKubeVersionFromKubectl() string {
|
||||
|
||||
Reference in New Issue
Block a user