add checks for cis benchmarks of rh-1.8 (#1945)

Co-authored-by: afdesk <work@afdesk.com>
This commit is contained in:
LaibaBareera
2025-09-16 13:00:14 +05:00
committed by GitHub
parent e3becc9f19
commit 21dd168736
9 changed files with 2522 additions and 3 deletions

View File

@@ -298,7 +298,8 @@ version_mapping:
"ocp-3.10": "rh-0.7"
"ocp-3.11": "rh-0.7"
"ocp-4.0": "rh-1.0"
"ocp-4.17": "rh-1.4"
"ocp-4.11": "rh-1.4"
"ocp-4.13": "rh-1.8"
"aks-1.0": "aks-1.0"
"aks-1.7": "aks-1.7"
"ack-1.0": "ack-1.0"
@@ -471,6 +472,12 @@ target_mapping:
- "controlplane"
- "policies"
- "etcd"
"rh-1.8":
- "master"
- "node"
- "controlplane"
- "policies"
- "etcd"
"eks-stig-kubernetes-v1r6":
- "node"
- "controlplane"

2
cfg/rh-1.8/config.yaml Normal file
View File

@@ -0,0 +1,2 @@
---
## Version-specific settings that override the values in cfg/config.yaml

View File

@@ -0,0 +1,62 @@
---
controls:
version: rh-1.8
id: 3
text: "Control Plane Configuration"
type: "controlplane"
groups:
- id: 3.1
text: "Authentication and Authorization"
checks:
- id: 3.1.1
text: "Client certificate authentication should not be used for users (Manual)"
audit: |
# To verify user authentication is enabled
oc describe authentication
# To verify that an identity provider is configured
oc get identity
# To verify that a custom cluster-admin user exists
oc get clusterrolebindings -o=custom-columns=NAME:.metadata.name,ROLE:.roleRef.name,SUBJECT:.subjects[*].kind | grep cluster-admin | grep User
# To verity that kbueadmin is removed, no results should be returned
oc get secrets kubeadmin -n kube-system
type: manual
remediation: |
Configure an identity provider for the OpenShift cluster.
Understanding identity provider configuration | Authentication | OpenShift
Container Platform 4.5. Once an identity provider has been defined,
you can use RBAC to define and apply permissions.
After you define an identity provider and create a new cluster-admin user,
remove the kubeadmin user to improve cluster security.
scored: false
- id: 3.2
text: "Logging"
checks:
- id: 3.2.1
text: "Ensure that a minimal audit policy is created (Manual)"
audit: |
#To view kube apiserver log files
oc adm node-logs --role=master --path=kube-apiserver/
#To view openshift apiserver log files
oc adm node-logs --role=master --path=openshift-apiserver/
#To verify kube apiserver audit config
oc get configmap config -n openshift-kube-apiserver -ojson | jq -r '.data["config.yaml"]' | jq '.auditConfig[]?'
#To verify openshift apiserver audit config
oc get configmap config -n openshift-apiserver -ojson | jq -r '.data["config.yaml"]' | jq '.auditConfig[]?'
type: manual
remediation: |
No remediation required.
scored: false
- id: 3.2.2
text: "Ensure that the audit policy covers key security concerns (Manual)"
audit: |
#To verify openshift apiserver audit config
oc get configmap config -n openshift-kube-apiserver -ojson | jq -r '.data["config.yaml"]' | jq '.auditConfig.policyConfiguration.rules[]?'
#To verify kube apiserver audit config
oc get configmap config -n openshift-apiserver -ojson | jq -r '.data["config.yaml"]' | jq '.auditConfig.policyConfiguration.rules[]?'
type: manual
remediation: |
In OpenShift 4.6 and higher, if appropriate for your needs,
modify the audit policy.
scored: false

183
cfg/rh-1.8/etcd.yaml Normal file
View File

@@ -0,0 +1,183 @@
---
controls:
version: rh-1.8
id: 2
text: "Etcd"
type: "etcd"
groups:
- id: 2
text: "Etcd"
checks:
- id: 2.1
text: "Ensure that the --cert-file and --key-file arguments are set as appropriate (Manual)"
audit: |
# Get the node name where the pod is running
NODE_NAME=$(oc get pod "$HOSTNAME" -o=jsonpath='{.spec.nodeName}')
# Get the pod name in the openshift-etcd namespace
POD_NAME=$(oc get pods -n openshift-etcd -l app=etcd --field-selector spec.nodeName="$NODE_NAME" -o jsonpath='{.items[0].metadata.name}' 2>/dev/null)
if [ -z "$POD_NAME" ]; then
echo "No matching file found on the current node."
else
# Execute the stat command
oc exec -n openshift-etcd -c etcd "$POD_NAME" -- ps -o command= -C etcd | sed 's/.*\(--cert-file=[^ ]*\).*/\1/'
oc exec -n openshift-etcd -c etcd "$POD_NAME" -- ps -o command= -C etcd | sed 's/.*\(--key-file=[^ ]*\).*/\1/'
fi
use_multiple_values: true
tests:
test_items:
- flag: "file"
compare:
op: regex
value: '\/etc\/kubernetes\/static-pod-certs\/secrets\/etcd-all-(serving|certs)\/etcd-serving-.*\.(?:crt|key)'
remediation: |
OpenShift does not use the etcd-certfile or etcd-keyfile flags.
Certificates for etcd are managed by the etcd cluster operator.
scored: true
- id: 2.2
text: "Ensure that the --client-cert-auth argument is set to true (Manual)"
audit: |
# Get the node name where the pod is running
NODE_NAME=$(oc get pod "$HOSTNAME" -o=jsonpath='{.spec.nodeName}')
# Get the pod name in the openshift-etcd namespace
POD_NAME=$(oc get pods -n openshift-etcd -l app=etcd --field-selector spec.nodeName="$NODE_NAME" -o jsonpath='{.items[0].metadata.name}' 2>/dev/null)
if [ -z "$POD_NAME" ]; then
echo "No matching file found on the current node."
else
# Execute the stat command
oc exec -n openshift-etcd -c etcd "$POD_NAME" -- ps -o command= -C etcd | sed 's/.*\(--client-cert-auth=[^ ]*\).*/\1/'
fi
use_multiple_values: true
tests:
test_items:
- flag: "--client-cert-auth"
compare:
op: eq
value: true
remediation: |
This setting is managed by the cluster etcd operator. No remediation required."
scored: true
- id: 2.3
text: "Ensure that the --auto-tls argument is not set to true (Manual)"
audit: |
# Returns 0 if found, 1 if not found
# Get the node name where the pod is running
NODE_NAME=$(oc get pod "$HOSTNAME" -o=jsonpath='{.spec.nodeName}')
# Get the pod name in the openshift-etcd namespace
POD_NAME=$(oc get pods -n openshift-etcd -l app=etcd --field-selector spec.nodeName="$NODE_NAME" -o jsonpath='{.items[0].metadata.name}' 2>/dev/null)
if [ -z "$POD_NAME" ]; then
echo "No matching file found on the current node."
else
# Execute the stat command
oc exec -n openshift-etcd -c etcd "$POD_NAME" -- ps -o command= -C etcd | grep -- --auto-tls=true 2>/dev/null ; echo exit_code=$?
fi
use_multiple_values: true
tests:
test_items:
- flag: "exit_code"
compare:
op: eq
value: "1"
remediation: |
This setting is managed by the cluster etcd operator. No remediation required.
scored: true
- id: 2.4
text: "Ensure that the --peer-cert-file and --peer-key-file arguments are set as appropriate (Manual)"
audit: |
# Get the node name where the pod is running
NODE_NAME=$(oc get pod "$HOSTNAME" -o=jsonpath='{.spec.nodeName}')
# Get the pod name in the openshift-etcd namespace
POD_NAME=$(oc get pods -n openshift-etcd -l app=etcd --field-selector spec.nodeName="$NODE_NAME" -o jsonpath='{.items[0].metadata.name}' 2>/dev/null)
if [ -z "$POD_NAME" ]; then
echo "No matching file found on the current node."
else
# Execute the stat command
oc exec -n openshift-etcd -c etcd "$POD_NAME" -- ps -o command= -C etcd | sed 's/.*\(--peer-cert-file=[^ ]*\).*/\1/'
oc exec -n openshift-etcd -c etcd "$POD_NAME" -- ps -o command= -C etcd | sed 's/.*\(--peer-key-file=[^ ]*\).*/\1/'
fi
use_multiple_values: true
tests:
test_items:
- flag: "file"
compare:
op: regex
value: '\/etc\/kubernetes\/static-pod-certs\/secrets\/etcd-all-(peer|certs)\/etcd-peer-.*\.(?:crt|key)'
remediation: |
None. This configuration is managed by the etcd operator.
scored: true
- id: 2.5
text: "Ensure that the --peer-client-cert-auth argument is set to true (Manual)"
audit: |
# Get the node name where the pod is running
NODE_NAME=$(oc get pod "$HOSTNAME" -o=jsonpath='{.spec.nodeName}')
# Get the pod name in the openshift-etcd namespace
POD_NAME=$(oc get pods -n openshift-etcd -l app=etcd --field-selector spec.nodeName="$NODE_NAME" -o jsonpath='{.items[0].metadata.name}' 2>/dev/null)
if [ -z "$POD_NAME" ]; then
echo "No matching file found on the current node."
else
# Execute the stat command
oc exec -n openshift-etcd -c etcd "$POD_NAME" -- ps -o command= -C etcd | sed 's/.*\(--peer-client-cert-auth=[^ ]*\).*/\1/'
fi
use_multiple_values: true
tests:
test_items:
- flag: "--peer-client-cert-auth"
compare:
op: eq
value: true
remediation: |
This setting is managed by the cluster etcd operator. No remediation required.
scored: true
- id: 2.6
text: "Ensure that the --peer-auto-tls argument is not set to true (Manual)"
audit: |
# Returns 0 if found, 1 if not found
# Get the node name where the pod is running
NODE_NAME=$(oc get pod "$HOSTNAME" -o=jsonpath='{.spec.nodeName}')
# Get the pod name in the openshift-etcd namespace
POD_NAME=$(oc get pods -n openshift-etcd -l app=etcd --field-selector spec.nodeName="$NODE_NAME" -o jsonpath='{.items[0].metadata.name}' 2>/dev/null)
if [ -z "$POD_NAME" ]; then
echo "No matching file found on the current node."
else
# Execute the stat command
oc exec -n openshift-etcd -c etcd "$POD_NAME" -- ps -o command= -C etcd | grep -- --peer-auto-tls=true 2>/dev/null ; echo exit_code=$?
fi
use_multiple_values: true
tests:
test_items:
- flag: "exit_code"
compare:
op: eq
value: "1"
remediation: |
This setting is managed by the cluster etcd operator. No remediation required.
scored: true
- id: 2.7
text: "Ensure that a unique Certificate Authority is used for etcd (Manual)"
audit: |
# Get the node name where the pod is running
NODE_NAME=$(oc get pod "$HOSTNAME" -o=jsonpath='{.spec.nodeName}')
# Get the pod name in the openshift-etcd namespace
POD_NAME=$(oc get pods -n openshift-etcd -l app=etcd --field-selector spec.nodeName="$NODE_NAME" -o jsonpath='{.items[0].metadata.name}' 2>/dev/null)
if [ -z "$POD_NAME" ]; then
echo "No matching file found on the current node."
else
# Execute the stat command
oc exec -n openshift-etcd -c etcd "$POD_NAME" -- ps -o command= -C etcd | sed 's/.*\(--trusted-ca-file=[^ ]*\).*/\1/'
oc exec -n openshift-etcd -c etcd "$POD_NAME" -- ps -o command= -C etcd | sed 's/.*\(--peer-trusted-ca-file=[^ ]*\).*/\1/'
fi
use_multiple_values: true
tests:
test_items:
- flag: "file"
compare:
op: regex
value: '\/etc\/kubernetes\/static-pod-certs\/configmaps\/(?:etcd-(?:serving|peer-client)-ca\/ca-bundle\.crt|etcd-all-bundles\/server-ca-bundle\.crt)'
remediation: |
None required. Certificates for etcd are managed by the OpenShift cluster etcd operator.
scored: true

1285
cfg/rh-1.8/master.yaml Normal file

File diff suppressed because it is too large Load Diff

485
cfg/rh-1.8/node.yaml Normal file
View File

@@ -0,0 +1,485 @@
---
controls:
version: rh-1.8
id: 4
text: "Worker Nodes"
type: "node"
groups:
- id: 4.1
text: "Worker Node Configuration Files"
checks:
- id: 4.1.1
text: "Ensure that the kubelet service file permissions are set to 644 or more restrictive (Automated)"
audit: |
NODE_NAME=$(oc get pod $HOSTNAME -o=jsonpath='{.spec.nodeName}')
oc debug node/$NODE_NAME -- chroot /host stat -c "$NODE_NAME %n permissions=%a" /etc/systemd/system/kubelet.service 2> /dev/null
tests:
test_items:
- flag: "permissions"
compare:
op: bitmask
value: "644"
remediation: |
By default, the kubelet service file has permissions of 644.
scored: true
- id: 4.1.2
text: "Ensure that the kubelet service file ownership is set to root:root (Automated)"
audit: |
# Should return root:root for each node
NODE_NAME=$(oc get pod $HOSTNAME -o=jsonpath='{.spec.nodeName}')
oc debug node/$NODE_NAME -- chroot /host stat -c "$NODE_NAME %n %U:%G" /etc/systemd/system/kubelet.service 2> /dev/null
tests:
test_items:
- flag: root:root
remediation: |
By default, the kubelet service file has ownership of root:root.
scored: true
- id: 4.1.3
text: "If proxy kube proxy configuration file exists ensure permissions are set to 644 or more restrictive (Manual)"
audit: |
# Get the node name where the pod is running
NODE_NAME=$(oc get pod "$HOSTNAME" -o=jsonpath='{.spec.nodeName}')
# Get the pod name in the openshift-sdn namespace
POD_NAME=$(oc get pods -n openshift-sdn -l app=sdn --field-selector spec.nodeName="$NODE_NAME" -o jsonpath='{.items[0].metadata.name}' 2>/dev/null)
if [ -z "$POD_NAME" ]; then
echo "No matching pods found on the current node."
else
# Execute the stat command
oc exec -n openshift-sdn "$POD_NAME" -- stat -Lc "$i %n permissions=%a" /config/kube-proxy-config.yaml 2>/dev/null
fi
tests:
test_items:
- flag: "permissions"
set: true
compare:
op: bitmask
value: "644"
remediation: |
None needed.
scored: true
- id: 4.1.4
text: "If proxy kubeconfig file exists ensure ownership is set to root:root (Manual)"
audit: |
# Get the node name where the pod is running
NODE_NAME=$(oc get pod "$HOSTNAME" -o=jsonpath='{.spec.nodeName}')
# Get the pod name in the openshift-sdn namespace
POD_NAME=$(oc get pods -n openshift-sdn -l app=sdn --field-selector spec.nodeName="$NODE_NAME" -o jsonpath='{.items[0].metadata.name}' 2>/dev/null)
if [ -z "$POD_NAME" ]; then
echo "No matching pods found on the current node."
else
# Execute the stat command
oc exec -n openshift-sdn "$POD_NAME" -- stat -Lc "$i %n %U:%G" /config/kube-proxy-config.yaml 2>/dev/null
fi
use_multiple_values: true
tests:
test_items:
- flag: root:root
remediation: |
None required. The configuration is managed by OpenShift operators.
scored: true
- id: 4.1.5
text: "Ensure that the --kubeconfig kubelet.conf file permissions are set to 644 or more restrictive (Manual)"
audit: |
# Check permissions
NODE_NAME=$(oc get pod $HOSTNAME -o=jsonpath='{.spec.nodeName}')
oc debug node/$NODE_NAME -- chroot /host stat -c "$NODE_NAME %n permissions=%a" /etc/kubernetes/kubelet.conf 2> /dev/null
use_multiple_values: true
tests:
test_items:
- flag: "permissions"
compare:
op: bitmask
value: "644"
remediation: |
None required.
scored: true
- id: 4.1.6
text: "Ensure that the --kubeconfig kubelet.conf file ownership is set to root:root (Manual)"
audit: |
NODE_NAME=$(oc get pod $HOSTNAME -o=jsonpath='{.spec.nodeName}')
oc debug node/$NODE_NAME -- chroot /host stat -c "$NODE_NAME %n %U:%G" /etc/kubernetes/kubelet.conf 2> /dev/null
use_multiple_values: true
tests:
test_items:
- flag: root:root
remediation: |
None required.
scored: true
- id: 4.1.7
text: "Ensure that the certificate authorities file permissions are set to 644 or more restrictive"
audit: |
NODE_NAME=$(oc get pod $HOSTNAME -o=jsonpath='{.spec.nodeName}')
oc get --raw /api/v1/nodes/$NODE_NAME/proxy/configz | jq '.kubeletconfig.authentication.x509.clientCAFile'
oc debug node/$NODE_NAME -- chroot /host stat -c "$NODE_NAME permissions=%a" /etc/kubernetes/kubelet-ca.crt 2> /dev/null
tests:
test_items:
- flag: "/etc/kubernetes/kubelet-ca.crt"
- flag: "permissions"
compare:
op: bitmask
value: "644"
remediation: |
No remediation required. OpenShift sets /etc/kubernetes/kubelet-ca.crt to 644 by default.
If permissions are more permissive than 644, update with: chmod 644 /etc/kubernetes/kubelet-ca.crt
scored: true
- id: 4.1.8
text: "Ensure that the client certificate authorities file ownership is set to root:root (Automated)"
audit: |
NODE_NAME=$(oc get pod $HOSTNAME -o=jsonpath='{.spec.nodeName}')
oc debug node/$NODE_NAME -- chroot /host stat -c "$NODE_NAME %n %U:%G" /etc/kubernetes/kubelet-ca.crt 2> /dev/null
use_multiple_values: true
tests:
test_items:
- flag: root:root
remediation: |
None required.
scored: true
- id: 4.1.9
text: "Ensure that the kubelet --config configuration file has permissions set to 600 or more restrictive (Automated)"
audit: |
NODE_NAME=$(oc get pod $HOSTNAME -o=jsonpath='{.spec.nodeName}')
oc debug node/$NODE_NAME -- chroot /host stat -c "$NODE_NAME %n permissions=%a" /var/data/kubelet/config.json 2> /dev/null
use_multiple_values: true
tests:
test_items:
- flag: "permissions"
compare:
op: bitmask
value: "600"
remediation: |
None required.
scored: true
- id: 4.1.10
text: "Ensure that the kubelet configuration file ownership is set to root:root (Automated)"
audit: |
NODE_NAME=$(oc get pod $HOSTNAME -o=jsonpath='{.spec.nodeName}')
oc debug node/$NODE_NAME -- chroot /host stat -c "$NODE_NAME %n %U:%G" /var/data/kubelet/config.json 2> /dev/null
use_multiple_values: true
tests:
test_items:
- flag: root:root
remediation: |
None required.
scored: true
- id: 4.2
text: "Kubelet"
checks:
- id: 4.2.1
text: "Activate Garbage collection in OpenShift Container Platform 4, as appropriate (Manual)"
audit: |
echo "Retrieving and inspecting garbage collection configuration from node-local kubelet configz..."
NODE_NAME=$(oc get pod $HOSTNAME -o=jsonpath='{.spec.nodeName}')
oc get --raw /api/v1/nodes/$NODE_NAME/proxy/configz | jq '.kubeletconfig'
tests:
test_items:
- flag: "evictionHard"
- flag: "imageGCHighThresholdPercent"
- flag: "imageGCLowThresholdPercent"
- flag: "imageMinimumGCAge"
remediation: |
OpenShift manages node garbage collection through KubeletConfig custom resources per MachineConfigPool.
To configure or adjust garbage collection thresholds, follow the documentation:
https://docs.openshift.com/container-platform/latest/nodes/nodes/nodes-nodes-garbage-collection.html
Example: Create or modify a KubeletConfig object to include:
---
evictionHard:
"memory.available": "200Mi"
"nodefs.available": "10%"
"imagefs.available": "15%"
imageGCHighThresholdPercent: 85
imageGCLowThresholdPercent: 80
imageMinimumGCAge: "2m0s"
Then apply the `KubeletConfig` to the appropriate `MachineConfigPool`.
scored: true
- id: 4.2.2
text: "Ensure that the --anonymous-auth argument is set to false (Automated)"
audit: |
echo "Checking if anonymous-auth is disabled in kubelet configuration on the current node..."
NODE_NAME=$(oc get pod $HOSTNAME -o=jsonpath='{.spec.nodeName}')
oc get --raw /api/v1/nodes/$NODE_NAME/proxy/configz | jq '.kubeletconfig.authentication.anonymous.enabled'
tests:
test_items:
- flag: "false"
remediation: |
By default, OpenShift sets anonymous-auth to false in Kubelet configuration.
If this value is found to be true, create or patch a KubeletConfig object with:
---
kind: KubeletConfig
apiVersion: machineconfiguration.openshift.io/v1
metadata:
name: disable-anonymous-auth
spec:
kubeletConfig:
authentication:
anonymous:
enabled: false
Then apply this KubeletConfig to the appropriate MachineConfigPool.
See OpenShift documentation on configuring node-level security settings.
scored: true
- id: 4.2.3
text: "Ensure that the --authorization-mode argument is not set to AlwaysAllow (Automated)"
audit: |
echo "Checking kubelet authorization mode on the current node..."
NODE_NAME=$(oc get pod $HOSTNAME -o=jsonpath='{.spec.nodeName}')
oc get --raw /api/v1/nodes/$NODE_NAME/proxy/configz | jq '.kubeletconfig.authorization.mode'
tests:
test_items:
- flag: AlwaysAllow
set: false
remediation: |
No remediation required. By default, OpenShift uses secure authorization modes such as 'Webhook' and does not allow AlwaysAllow.
If AlwaysAllow is found, the node must be reconfigured using a KubeletConfig applied through the appropriate MachineConfigPool.
scored: true
- id: 4.2.4
text: "Ensure that the --client-ca-file argument is set as appropriate (Automated)"
audit: |
echo "Checking Kubelet 'clientCAFile' setting on current node..."
NODE_NAME=$(oc get pod $HOSTNAME -o=jsonpath='{.spec.nodeName}')
oc get --raw /api/v1/nodes/$NODE_NAME/proxy/configz \
| jq '.kubeletconfig.authentication.x509.clientCAFile'
tests:
test_items:
- flag: "/etc/kubernetes/kubelet-ca.crt"
remediation: |
No remediation required. OpenShift sets the clientCAFile by default to /etc/kubernetes/kubelet-ca.crt.
Manual modification is unsupported and unnecessary as OpenShift manages Kubelet certificate authentication via the Machine Config Operator.
scored: true
- id: 4.2.5
text: "Verify that the read only port is not used or is set to 0 (Automated)"
audit: |
echo "Checking 'kubelet-read-only-port' argument in openshift-kube-apiserver config..."
oc -n openshift-kube-apiserver get configmap config -o json \
| jq -r '.data["config.yaml"]' \
| yq '.apiServerArguments."kubelet-read-only-port"[0]'
tests:
test_items:
- flag: "0"
remediation: |
No remediation is required if the read-only port is set to 0.
If this value is not set to 0 (or the argument is missing), create a KubeletConfig object and apply it to the appropriate MachineConfigPool to disable the read-only port.
Example KubeletConfig:
---
apiVersion: machineconfiguration.openshift.io/v1
kind: KubeletConfig
metadata:
name: disable-readonly-port
spec:
kubeletConfig:
readOnlyPort: 0
scored: true
- id: 4.2.6
text: "Ensure that the --streaming-connection-idle-timeout argument is not set to 0 (automated)"
audit: |
NODE_NAME=$(oc get pod $HOSTNAME -o=jsonpath='{.spec.nodeName}')
oc get --raw /api/v1/nodes/$NODE_NAME/proxy/configz \
| jq '.kubeletconfig'
tests:
test_items:
- path: ".streamingConnectionIdleTimeout"
compare:
op: noteq
value: "0s"
remediation: |
By default, OpenShift sets streamingConnectionIdleTimeout to 4h0m0s.
If it is manually set to "0s", this disables timeouts — which is insecure.
To remediate, create a `KubeletConfig` CR with a safer timeout (e.g., 1h0m0s):
---
apiVersion: machineconfiguration.openshift.io/v1
kind: KubeletConfig
metadata:
name: set-streaming-timeout
spec:
kubeletConfig:
streamingConnectionIdleTimeout: "1h0m0s"
scored: true
- id: 4.2.7
text: "Ensure that the --make-iptables-util-chains argument is set to true (manual)"
audit: |
echo "Checking 'makeIPTablesUtilChains' setting in Kubelet config on current node..."
NODE_NAME=$(oc get pod $HOSTNAME -o=jsonpath='{.spec.nodeName}')
oc get --raw /api/v1/nodes/$NODE_NAME/proxy/configz \
| jq '.kubeletconfig'
tests:
test_items:
- path: ".makeIPTablesUtilChains"
compare:
op: eq
value: true
remediation: |
No remediation is required.
By default, OpenShift sets makeIPTablesUtilChains to true.
This allows Kubelet to manage iptables rules and keep them in sync with the dynamic pod network configuration.
scored: true
- id: 4.2.8
text: "Ensure that the kubeAPIQPS [--event-qps] argument is set to 0 or a level which ensures appropriate event capture (manual)"
audit: |
echo "Checking 'kubeAPIQPS' setting in Kubelet config on current node..."
NODE_NAME=$(oc get pod $HOSTNAME -o=jsonpath='{.spec.nodeName}')
oc get --raw /api/v1/nodes/$NODE_NAME/proxy/configz \
| jq '.kubeletconfig'
tests:
test_items:
- path: ".kubeAPIQPS"
compare:
op: gte
value: 1
remediation: |
OpenShift sets kubeAPIQPS to a default of 50, which is appropriate in most environments.
If kubeAPIQPS is set to 0, event rate limiting is disabled, which can overwhelm the kubelet with excessive events.
To configure a proper limit, create or modify a `KubeletConfig` resource with an appropriate value:
---
apiVersion: machineconfiguration.openshift.io/v1
kind: KubeletConfig
metadata:
name: set-kubeapiqps
spec:
kubeletConfig:
kubeAPIQPS: 50
scored: true
- id: 4.2.9
text: "Ensure that the --tls-cert-file and --tls-private-key-file arguments are set as appropriate (manual)"
audit: |
oc get configmap config -n openshift-kube-apiserver -ojson \
| jq -r '.data["config.yaml"]' \
| jq -r '.apiServerArguments["kubelet-client-certificate"][]?'
oc get configmap config -n openshift-kube-apiserver -ojson \
| jq -r '.data["config.yaml"]' \
| jq -r '.apiServerArguments["kubelet-client-key"][]?'
tests:
bin_op: and
test_items:
- flag: "/etc/kubernetes/static-pod-certs/secrets/kubelet-client/tls.crt"
- flag: "/etc/kubernetes/static-pod-certs/secrets/kubelet-client/tls.key"
remediation: |
No remediation is required. OpenShift manages secure TLS connections to kubelets by default using its internal certificate authority.
These X.509 certificates are rotated and validated automatically by the platform.
Manual modifications to the TLS paths or keys are not supported and can lead to cluster issues.
scored: true
- id: 4.2.10
text: "Ensure that the --rotate-certificates argument is not set to false (manual)"
audit: |
NODE_NAME=$(oc get pod $HOSTNAME -o=jsonpath='{.spec.nodeName}')
oc get --raw /api/v1/nodes/$NODE_NAME/proxy/configz \
| jq '.kubeletconfig'
tests:
test_items:
- path: ".rotateCertificates"
compare:
op: eq
value: true
remediation: |
No remediation required. By default, OpenShift enables certificate rotation via rotateCertificates=true.
If disabled, you must either enable rotation via KubeletConfig or implement external certificate renewal.
Example remediation using KubeletConfig:
---
apiVersion: machineconfiguration.openshift.io/v1
kind: KubeletConfig
metadata:
name: enable-cert-rotation
spec:
kubeletConfig:
rotateCertificates: true
scored: true
- id: 4.2.11
text: "Verify that the RotateKubeletServerCertificate argument is set to true (manual)"
audit: |
echo "Checking that RotateKubeletServerCertificate is enabled in kubelet config on current node..."
NODE_NAME=$(oc get pod $HOSTNAME -o=jsonpath='{.spec.nodeName}')
echo "Verifying feature gate: RotateKubeletServerCertificate"
oc get --raw /api/v1/nodes/$NODE_NAME/proxy/configz \
| jq '.kubeletconfig.featureGates.RotateKubeletServerCertificate'
echo "Verifying that certificate rotation is enabled"
oc get --raw /api/v1/nodes/$NODE_NAME/proxy/configz \
| jq '.kubeletconfig.rotateCertificates'
tests:
bin_op: and
test_items:
- flag: "RotateKubeletServerCertificate"
compare:
op: eq
value: true
- flag: "rotateCertificates"
compare:
op: eq
value: true
remediation: |
No remediation is required. OpenShift enables RotateKubeletServerCertificate by default and manages certificate rotation automatically.
If the feature gate or rotation setting is disabled, configure a `KubeletConfig` CR and apply it to the MachineConfigPool:
---
apiVersion: machineconfiguration.openshift.io/v1
kind: KubeletConfig
metadata:
name: enable-server-cert-rotation
spec:
kubeletConfig:
rotateCertificates: true
featureGates:
RotateKubeletServerCertificate: true
scored: true
- id: 4.2.13
text: "Ensure that the Kubelet only makes use of Strong Cryptographic Ciphers (Manual)"
audit: |
# needs verification
# verify cipher suites
oc describe --namespace=openshift-ingress-operator ingresscontroller/default
oc get kubeapiservers.operator.openshift.io cluster -o json |jq .spec.observedConfig.servingInfo
oc get openshiftapiservers.operator.openshift.io cluster -o json |jq .spec.observedConfig.servingInfo
oc get cm -n openshift-authentication v4-0-config-system-cliconfig -o jsonpath='{.data.v4\-0\-config\-system\-cliconfig}' | jq .servingInfo
#check value for tlsSecurityProfile; null is returned if default is used
oc get kubeapiservers.operator.openshift.io cluster -o json |jq .spec.tlsSecurityProfile
type: manual
remediation: |
Follow the directions above and in the OpenShift documentation to configure the tlsSecurityProfile.
Configuring Ingress
scored: false

486
cfg/rh-1.8/policies.yaml Normal file
View File

@@ -0,0 +1,486 @@
---
controls:
version: rh-1.8
id: 5
text: "Policies"
type: "policies"
groups:
- id: 5.1
text: "RBAC and Service Accounts"
checks:
- id: 5.1.1
text: "Ensure that the cluster-admin role is only used where required (Manual)"
type: "manual"
audit: |
#To get a list of users and service accounts with the cluster-admin role
oc get clusterrolebindings -o=customcolumns=NAME:.metadata.name,ROLE:.roleRef.name,SUBJECT:.subjects[*].kind |
grep cluster-admin
#To verity that kbueadmin is removed, no results should be returned
oc get secrets kubeadmin -n kube-system
remediation: |
Identify all clusterrolebindings to the cluster-admin role. Check if they are used and if they need this role or if they could use a role with fewer privileges.
Where possible, first bind users to a lower privileged role and then remove the clusterrolebinding to the cluster-admin role :
oc delete clusterrolebinding [name]
scored: false
- id: 5.1.2
text: "Minimize access to secrets (Manual)"
type: "manual"
remediation: |
Where possible, remove get, list and watch access to secret objects in the cluster.
scored: false
- id: 5.1.3
text: "Minimize wildcard use in Roles and ClusterRoles (Manual)"
type: "manual"
remediation: |
Where possible replace any use of wildcards in clusterroles and roles with specific
objects or actions.
scored: false
- id: 5.1.4
text: "Minimize access to create pods (Manual)"
type: "manual"
remediation: |
Where possible, remove create access to pod objects in the cluster.
scored: false
- id: 5.1.5
text: "Ensure that default service accounts are not actively used. (Manual)"
type: "manual"
remediation: |
None required.
scored: false
- id: 5.1.6
text: "Ensure that Service Account Tokens are only mounted where necessary (Manual)"
type: "manual"
remediation: |
Modify the definition of pods and service accounts which do not need to mount service
account tokens to disable it.
scored: false
- id: 5.2
text: "Security Context Constraints (SCCs)"
checks:
- id: 5.2.1
text: "Minimize the admission of privileged containers (Manual)"
audit: |
oc get scc -o json \
| jq -r '[.items[] | select(.allowPrivilegedContainer==false) | .metadata.name]
| length
| if . > 0 then "pass" else "fail" end'
tests:
test_items:
- flag: "pass"
remediation: |
If no SCCs exist that restrict privileged containers, create one by running:
oc create -f - <<EOF
apiVersion: security.openshift.io/v1
kind: SecurityContextConstraints
metadata:
name: restricted-no-priv
allowPrivilegedContainer: false
runAsUser:
type: MustRunAsRange
seLinuxContext:
type: MustRunAs
users: []
groups:
- system:authenticated
EOF
Then apply appropriate RBAC to assign this SCC only to necessary service accounts, groups, or users.
Carefully avoid assigning `allowPrivilegedContainer: true` in any SCC that is broadly bound.
scored: true
- id: 5.2.2
text: "Minimize the admission of containers wishing to share the host process ID namespace (Manual)"
audit: |
oc get scc -o json \
| jq -r '[.items[] | select(.allowHostPID==true) | .metadata.name]
| length
| if . > 0 then "pass" else "fail" end'
tests:
test_items:
- flag: "pass"
remediation: |
If SCCs with `allowHostPID: true` exist, ensure they are restricted to trusted service accounts only.
To create a restrictive SCC that prevents host PID sharing:
---
apiVersion: security.openshift.io/v1
kind: SecurityContextConstraints
metadata:
name: restricted-no-hostpid
allowHostPID: false
runAsUser:
type: MustRunAsRange
seLinuxContext:
type: MustRunAs
users: []
groups:
- system:authenticated
---
Apply the SCC and bind it only to users or groups that do **not** need hostPID access.
scored: true
- id: 5.2.3
text: "Minimize the admission of containers wishing to share the host IPC namespace (Manual)"
audit: |
oc get scc -o json \
| jq -r '[.items[] | select(.allowHostIPC==false) | .metadata.name]
| length
| if . > 0 then "pass" else "fail" end'
tests:
test_items:
- flag: "pass"
remediation: |
If no SCCs restrict hostIPC usage, create one that explicitly sets allowHostIPC: false:
---
apiVersion: security.openshift.io/v1
kind: SecurityContextConstraints
metadata:
name: restricted-no-hostipc
allowHostIPC: false
runAsUser:
type: MustRunAsRange
seLinuxContext:
type: MustRunAs
users: []
groups:
- system:authenticated
---
Then assign this SCC to general workloads and ensure any SCCs allowing hostIPC are tightly scoped via RBAC.
scored: true
- id: 5.2.4
text: "Minimize the admission of containers wishing to share the host network namespace (manual)"
audit: |
oc get scc -o json \
| jq -r '[.items[] | select(.allowHostNetwork==false) | .metadata.name]
| length
| if . > 0 then "pass" else "fail" end'
tests:
test_items:
- flag: "pass"
remediation: |
If no SCCs restrict host networking, create one by running:
---
apiVersion: security.openshift.io/v1
kind: SecurityContextConstraints
metadata:
name: restricted-no-hostnetwork
allowHostNetwork: false
runAsUser:
type: MustRunAsRange
seLinuxContext:
type: MustRunAs
users: []
groups:
- system:authenticated
---
Ensure only workloads that require `hostNetwork: true` (e.g., CNI, infra pods) are allowed to use SCCs where it is explicitly enabled. Restrict access to such SCCs using RBAC.
scored: true
- id: 5.2.5
text: "Minimize the admission of containers with allowPrivilegeEscalation (manual)"
audit: |
oc get scc -o json \
| jq -r '[.items[] | select(.allowPrivilegeEscalation==false) | .metadata.name]
| length
| if . > 0 then "pass" else "fail" end'
tests:
test_items:
- flag: "pass"
remediation: |
If no SCCs exist that restrict the use of privilege escalation, create a custom SCC:
---
apiVersion: security.openshift.io/v1
kind: SecurityContextConstraints
metadata:
name: restricted-no-priv-escalation
allowPrivilegeEscalation: false
runAsUser:
type: MustRunAsRange
seLinuxContext:
type: MustRunAs
users: []
groups:
- system:authenticated
---
Assign this SCC only to workloads and users that **do not require** the ability to escalate privileges.
Use RBAC to restrict access to SCCs where `allowPrivilegeEscalation` is `true` to only trusted service accounts or admin roles.
scored: true
- id: 5.2.6
text: "Minimize the admission of root containers (manual)"
audit: |
sccs=$(oc get scc -o json | jq -r '.items[] | select(.runAsUser.type == "MustRunAsNonRoot") | .metadata.name')
if [[ -n "$sccs" ]]; then
echo "pass"
else
echo "fail"
fi
tests:
test_items:
- flag: "pass"
remediation: |
If no SCC is found with `runAsUser.type: MustRunAsNonRoot`, create one as follows:
---
apiVersion: security.openshift.io/v1
kind: SecurityContextConstraints
metadata:
name: restricted-nonroot
allowPrivilegeEscalation: false
runAsUser:
type: MustRunAsNonRoot
seLinuxContext:
type: MustRunAs
users: []
groups:
- system:authenticated
---
Assign this SCC only to workloads that must not run as root.
If an SCC allows `RunAsAny`, audit and restrict access using RBAC to prevent misuse.
scored: true
- id: 5.2.7
text: "Minimize the admission of containers with the NET_RAW capability (manual)"
audit: |
oc get scc -o json \
| jq -r '[.items[]
| select((.requiredDropCapabilities // []) | index("ALL"))
| .metadata.name]
| length
| if . > 0 then "pass" else "fail" end'
tests:
test_items:
- flag: "pass"
remediation: |
If no SCCs drop ALL capabilities, create a custom SCC that explicitly drops NET_RAW:
---
apiVersion: security.openshift.io/v1
kind: SecurityContextConstraints
metadata:
name: restricted-no-netraw
requiredDropCapabilities:
- NET_RAW
allowPrivilegedContainer: false
runAsUser:
type: MustRunAsRange
seLinuxContext:
type: MustRunAs
users: []
groups:
- system:authenticated
---
Apply this SCC to workloads that do not require NET_RAW.
If NET_RAW is required (e.g., for low-level networking apps), isolate those workloads with a specific SCC and restrict access via RBAC.
scored: true
- id: 5.2.8
text: "Minimize the admission of containers with added capabilities (manual)"
audit: |
oc get scc -o json \
| jq -r '[.items[]
| select(.allowedCapabilities == null)
| .metadata.name]
| length
| if . > 0 then "pass" else "fail" end'
oc get scc -o json \
| jq -r '[.items[]
| select(.defaultAddCapabilities == null)
| .metadata.name]
| length
| if . > 0 then "true" else "false" end'
tests:
test_items:
- flag: "pass"
- flag: "true"
remediation: |
If no SCCs restrict added capabilities, create a custom SCC as shown below:
---
apiVersion: security.openshift.io/v1
kind: SecurityContextConstraints
metadata:
name: restricted-no-added-caps
allowPrivilegedContainer: false
allowedCapabilities: []
defaultAddCapabilities: []
runAsUser:
type: MustRunAsRange
seLinuxContext:
type: MustRunAs
users: []
groups:
- system:authenticated
---
Assign this SCC to workloads that do **not** require elevated capabilities.
Create separate SCCs for workloads that require specific capabilities, and use RBAC to tightly restrict access to them.
scored: true
- id: 5.2.9
text: "Minimize the admission of containers with capabilities assigned (manual)"
audit: |
oc get scc -o json \
| jq -r '[.items[]
| select((.requiredDropCapabilities // []) | index("ALL"))
| .metadata.name]
| length
| if . > 0 then "true" else "false" end'
tests:
test_items:
- flag: "true"
remediation: |
If no SCCs drop all capabilities, create one that sets 'requiredDropCapabilities: [ALL]':
---
apiVersion: security.openshift.io/v1
kind: SecurityContextConstraints
metadata:
name: restricted-drop-all-capabilities
requiredDropCapabilities:
- ALL
allowPrivilegedContainer: false
runAsUser:
type: MustRunAsRange
seLinuxContext:
type: MustRunAs
users: []
groups:
- system:authenticated
---
Apply this SCC to general-purpose workloads that do not require elevated Linux capabilities.
If certain workloads require capabilities, create a separate SCC with minimal permissions and scope it using RBAC.
scored: true
- id: 5.2.10
text: "Minimize access to privileged Security Context Constraints (Manual)"
type: "manual"
remediation: |
Remove any users and groups who do not need access to an SCC, following the
principle of least privilege.
You can remove users and groups from an SCC using the oc edit scc $NAME
command.
Additionally, you can create your own SCCs that contain the container functionality you
need for a particular use case and assign that SCC to users and groups if the default
SCCs are not appropriate for your use case.
scored: false
- id: 5.3
text: "Network Policies and CNI"
checks:
- id: 5.3.1
text: "Ensure that the CNI in use supports Network Policies (Manual)"
type: "manual"
remediation: |
None required.
scored: false
- id: 5.3.2
text: "Ensure that all Namespaces have Network Policies defined (Manual)"
type: "manual"
audit: |
#Run the following command and review the NetworkPolicy objects created in the cluster.
oc -n all get networkpolicy
remediation: |
Follow the documentation and create NetworkPolicy objects as you need them.
scored: false
- id: 5.4
text: "Secrets Management"
checks:
- id: 5.4.1
text: "Prefer using secrets as files over secrets as environment variables (Manual)"
type: "manual"
audit: |
#Run the following command to find references to objects which use environment variables defined from secrets.
oc get all -o jsonpath='{range .items[?(@..secretKeyRef)]} {.kind}
{.metadata.name} {"\n"}{end}' -A
remediation: |
If possible, rewrite application code to read secrets from mounted secret files, rather than
from environment variables.
scored: false
- id: 5.4.2
text: "Consider external secret storage (Manual)"
type: "manual"
remediation: |
Refer to the secrets management options offered by your cloud provider or a third-party
secrets management solution.
scored: false
- id: 5.5
text: "Extensible Admission Control"
checks:
- id: 5.5.1
text: "Configure Image Provenance using image controller configuration parameters (Manual)"
type: "manual"
remediation: |
Follow the OpenShift documentation: [Image configuration resources](https://docs.openshift.com/container-platform/4.5/openshift_images/image-configuration.html
scored: false
- id: 5.7
text: "General Policies"
checks:
- id: 5.7.1
text: "Create administrative boundaries between resources using namespaces (Manual)"
type: "manual"
audit: |
#Run the following command and review the namespaces created in the cluster.
oc get namespaces
#Ensure that these namespaces are the ones you need and are adequately administered as per your requirements.
remediation: |
Follow the documentation and create namespaces for objects in your deployment as you need
them.
scored: false
- id: 5.7.2
text: "Ensure that the seccomp profile is set to docker/default in your pod definitions (Manual)"
type: "manual"
remediation: |
To enable the default seccomp profile, use the reserved value /runtime/default that will
make sure that the pod uses the default policy available on the host.
scored: false
- id: 5.7.3
text: "Apply Security Context to Your Pods and Containers (Manual)"
type: "manual"
remediation: |
Follow the Kubernetes documentation and apply security contexts to your pods. For a
suggested list of security contexts, you may refer to the CIS Security Benchmark for Docker
Containers.
scored: false
- id: 5.7.4
text: "The default namespace should not be used (Manual)"
type: "manual"
audit: |
#Run this command to list objects in default namespace
oc project default
oc get all
#The only entries there should be system managed resources such as the kubernetes and openshift service
remediation: |
Ensure that namespaces are created to allow for appropriate segregation of Kubernetes
resources and that all new resources are created in a specific namespace.
scored: false

View File

@@ -546,8 +546,10 @@ func getPlatformBenchmarkVersion(platform Platform) string {
return "rh-0.7"
case "4.1":
return "rh-1.0"
case "4.11", "4.12", "4.13":
case "4.11":
return "rh-1.4"
case "4.13":
return "rh-1.8"
}
case "vmware":
return "tkgi-1.2.53"
@@ -623,7 +625,7 @@ func getOpenShiftInfo() Platform {
func getOcpValidVersion(ocpVer string) (string, error) {
ocpOriginal := ocpVer
valid := []string{"3.10", "4.1", "4.11", "4.12", "4.13"}
valid := []string{"3.10", "4.1", "4.11", "4.13"}
for !isEmpty(ocpVer) {
glog.V(3).Info(fmt.Sprintf("getOcpBenchmarkVersion check for ocp: %q \n", ocpVer))
if slices.Contains(valid, ocpVer) {

View File

@@ -720,6 +720,13 @@ func Test_getPlatformBenchmarkVersion(t *testing.T) {
},
want: "rh-1.4",
},
{
name: "openshift4",
args: args{
platform: Platform{Name: "ocp", Version: "4.13"},
},
want: "rh-1.8",
},
{
name: "openshift4",
args: args{