From 21dd168736bbd75ae395f632dfdffba469cc4655 Mon Sep 17 00:00:00 2001 From: LaibaBareera <89480808+LaibaBareera@users.noreply.github.com> Date: Tue, 16 Sep 2025 13:00:14 +0500 Subject: [PATCH] add checks for cis benchmarks of rh-1.8 (#1945) Co-authored-by: afdesk --- cfg/config.yaml | 9 +- cfg/rh-1.8/config.yaml | 2 + cfg/rh-1.8/controlplane.yaml | 62 ++ cfg/rh-1.8/etcd.yaml | 183 +++++ cfg/rh-1.8/master.yaml | 1285 ++++++++++++++++++++++++++++++++++ cfg/rh-1.8/node.yaml | 485 +++++++++++++ cfg/rh-1.8/policies.yaml | 486 +++++++++++++ cmd/util.go | 6 +- cmd/util_test.go | 7 + 9 files changed, 2522 insertions(+), 3 deletions(-) create mode 100644 cfg/rh-1.8/config.yaml create mode 100644 cfg/rh-1.8/controlplane.yaml create mode 100644 cfg/rh-1.8/etcd.yaml create mode 100644 cfg/rh-1.8/master.yaml create mode 100644 cfg/rh-1.8/node.yaml create mode 100644 cfg/rh-1.8/policies.yaml diff --git a/cfg/config.yaml b/cfg/config.yaml index 19db2887..f0d97cc4 100644 --- a/cfg/config.yaml +++ b/cfg/config.yaml @@ -298,7 +298,8 @@ version_mapping: "ocp-3.10": "rh-0.7" "ocp-3.11": "rh-0.7" "ocp-4.0": "rh-1.0" - "ocp-4.17": "rh-1.4" + "ocp-4.11": "rh-1.4" + "ocp-4.13": "rh-1.8" "aks-1.0": "aks-1.0" "aks-1.7": "aks-1.7" "ack-1.0": "ack-1.0" @@ -471,6 +472,12 @@ target_mapping: - "controlplane" - "policies" - "etcd" + "rh-1.8": + - "master" + - "node" + - "controlplane" + - "policies" + - "etcd" "eks-stig-kubernetes-v1r6": - "node" - "controlplane" diff --git a/cfg/rh-1.8/config.yaml b/cfg/rh-1.8/config.yaml new file mode 100644 index 00000000..b7839455 --- /dev/null +++ b/cfg/rh-1.8/config.yaml @@ -0,0 +1,2 @@ +--- +## Version-specific settings that override the values in cfg/config.yaml diff --git a/cfg/rh-1.8/controlplane.yaml b/cfg/rh-1.8/controlplane.yaml new file mode 100644 index 00000000..0d5def4d --- /dev/null +++ b/cfg/rh-1.8/controlplane.yaml @@ -0,0 +1,62 @@ +--- +controls: +version: rh-1.8 +id: 3 +text: "Control Plane Configuration" +type: "controlplane" +groups: + - id: 3.1 + text: "Authentication and Authorization" + checks: + - id: 3.1.1 + text: "Client certificate authentication should not be used for users (Manual)" + audit: | + # To verify user authentication is enabled + oc describe authentication + # To verify that an identity provider is configured + oc get identity + # To verify that a custom cluster-admin user exists + oc get clusterrolebindings -o=custom-columns=NAME:.metadata.name,ROLE:.roleRef.name,SUBJECT:.subjects[*].kind | grep cluster-admin | grep User + # To verity that kbueadmin is removed, no results should be returned + oc get secrets kubeadmin -n kube-system + type: manual + remediation: | + Configure an identity provider for the OpenShift cluster. + Understanding identity provider configuration | Authentication | OpenShift + Container Platform 4.5. Once an identity provider has been defined, + you can use RBAC to define and apply permissions. + After you define an identity provider and create a new cluster-admin user, + remove the kubeadmin user to improve cluster security. + scored: false + + - id: 3.2 + text: "Logging" + checks: + - id: 3.2.1 + text: "Ensure that a minimal audit policy is created (Manual)" + audit: | + #To view kube apiserver log files + oc adm node-logs --role=master --path=kube-apiserver/ + #To view openshift apiserver log files + oc adm node-logs --role=master --path=openshift-apiserver/ + #To verify kube apiserver audit config + oc get configmap config -n openshift-kube-apiserver -ojson | jq -r '.data["config.yaml"]' | jq '.auditConfig[]?' + #To verify openshift apiserver audit config + oc get configmap config -n openshift-apiserver -ojson | jq -r '.data["config.yaml"]' | jq '.auditConfig[]?' + type: manual + remediation: | + No remediation required. + scored: false + + - id: 3.2.2 + text: "Ensure that the audit policy covers key security concerns (Manual)" + audit: | + #To verify openshift apiserver audit config + oc get configmap config -n openshift-kube-apiserver -ojson | jq -r '.data["config.yaml"]' | jq '.auditConfig.policyConfiguration.rules[]?' + #To verify kube apiserver audit config + oc get configmap config -n openshift-apiserver -ojson | jq -r '.data["config.yaml"]' | jq '.auditConfig.policyConfiguration.rules[]?' + type: manual + remediation: | + In OpenShift 4.6 and higher, if appropriate for your needs, + modify the audit policy. + scored: false diff --git a/cfg/rh-1.8/etcd.yaml b/cfg/rh-1.8/etcd.yaml new file mode 100644 index 00000000..8e4aa0db --- /dev/null +++ b/cfg/rh-1.8/etcd.yaml @@ -0,0 +1,183 @@ +--- +controls: +version: rh-1.8 +id: 2 +text: "Etcd" +type: "etcd" +groups: + - id: 2 + text: "Etcd" + checks: + - id: 2.1 + text: "Ensure that the --cert-file and --key-file arguments are set as appropriate (Manual)" + audit: | + # Get the node name where the pod is running + NODE_NAME=$(oc get pod "$HOSTNAME" -o=jsonpath='{.spec.nodeName}') + # Get the pod name in the openshift-etcd namespace + POD_NAME=$(oc get pods -n openshift-etcd -l app=etcd --field-selector spec.nodeName="$NODE_NAME" -o jsonpath='{.items[0].metadata.name}' 2>/dev/null) + if [ -z "$POD_NAME" ]; then + echo "No matching file found on the current node." + else + # Execute the stat command + oc exec -n openshift-etcd -c etcd "$POD_NAME" -- ps -o command= -C etcd | sed 's/.*\(--cert-file=[^ ]*\).*/\1/' + oc exec -n openshift-etcd -c etcd "$POD_NAME" -- ps -o command= -C etcd | sed 's/.*\(--key-file=[^ ]*\).*/\1/' + fi + use_multiple_values: true + tests: + test_items: + - flag: "file" + compare: + op: regex + value: '\/etc\/kubernetes\/static-pod-certs\/secrets\/etcd-all-(serving|certs)\/etcd-serving-.*\.(?:crt|key)' + remediation: | + OpenShift does not use the etcd-certfile or etcd-keyfile flags. + Certificates for etcd are managed by the etcd cluster operator. + scored: true + + - id: 2.2 + text: "Ensure that the --client-cert-auth argument is set to true (Manual)" + audit: | + # Get the node name where the pod is running + NODE_NAME=$(oc get pod "$HOSTNAME" -o=jsonpath='{.spec.nodeName}') + # Get the pod name in the openshift-etcd namespace + POD_NAME=$(oc get pods -n openshift-etcd -l app=etcd --field-selector spec.nodeName="$NODE_NAME" -o jsonpath='{.items[0].metadata.name}' 2>/dev/null) + if [ -z "$POD_NAME" ]; then + echo "No matching file found on the current node." + else + # Execute the stat command + oc exec -n openshift-etcd -c etcd "$POD_NAME" -- ps -o command= -C etcd | sed 's/.*\(--client-cert-auth=[^ ]*\).*/\1/' + fi + use_multiple_values: true + tests: + test_items: + - flag: "--client-cert-auth" + compare: + op: eq + value: true + remediation: | + This setting is managed by the cluster etcd operator. No remediation required." + scored: true + + - id: 2.3 + text: "Ensure that the --auto-tls argument is not set to true (Manual)" + audit: | + # Returns 0 if found, 1 if not found + # Get the node name where the pod is running + NODE_NAME=$(oc get pod "$HOSTNAME" -o=jsonpath='{.spec.nodeName}') + # Get the pod name in the openshift-etcd namespace + POD_NAME=$(oc get pods -n openshift-etcd -l app=etcd --field-selector spec.nodeName="$NODE_NAME" -o jsonpath='{.items[0].metadata.name}' 2>/dev/null) + if [ -z "$POD_NAME" ]; then + echo "No matching file found on the current node." + else + # Execute the stat command + oc exec -n openshift-etcd -c etcd "$POD_NAME" -- ps -o command= -C etcd | grep -- --auto-tls=true 2>/dev/null ; echo exit_code=$? + fi + use_multiple_values: true + tests: + test_items: + - flag: "exit_code" + compare: + op: eq + value: "1" + remediation: | + This setting is managed by the cluster etcd operator. No remediation required. + scored: true + + - id: 2.4 + text: "Ensure that the --peer-cert-file and --peer-key-file arguments are set as appropriate (Manual)" + audit: | + # Get the node name where the pod is running + NODE_NAME=$(oc get pod "$HOSTNAME" -o=jsonpath='{.spec.nodeName}') + # Get the pod name in the openshift-etcd namespace + POD_NAME=$(oc get pods -n openshift-etcd -l app=etcd --field-selector spec.nodeName="$NODE_NAME" -o jsonpath='{.items[0].metadata.name}' 2>/dev/null) + if [ -z "$POD_NAME" ]; then + echo "No matching file found on the current node." + else + # Execute the stat command + oc exec -n openshift-etcd -c etcd "$POD_NAME" -- ps -o command= -C etcd | sed 's/.*\(--peer-cert-file=[^ ]*\).*/\1/' + oc exec -n openshift-etcd -c etcd "$POD_NAME" -- ps -o command= -C etcd | sed 's/.*\(--peer-key-file=[^ ]*\).*/\1/' + fi + use_multiple_values: true + tests: + test_items: + - flag: "file" + compare: + op: regex + value: '\/etc\/kubernetes\/static-pod-certs\/secrets\/etcd-all-(peer|certs)\/etcd-peer-.*\.(?:crt|key)' + remediation: | + None. This configuration is managed by the etcd operator. + scored: true + + - id: 2.5 + text: "Ensure that the --peer-client-cert-auth argument is set to true (Manual)" + audit: | + # Get the node name where the pod is running + NODE_NAME=$(oc get pod "$HOSTNAME" -o=jsonpath='{.spec.nodeName}') + # Get the pod name in the openshift-etcd namespace + POD_NAME=$(oc get pods -n openshift-etcd -l app=etcd --field-selector spec.nodeName="$NODE_NAME" -o jsonpath='{.items[0].metadata.name}' 2>/dev/null) + if [ -z "$POD_NAME" ]; then + echo "No matching file found on the current node." + else + # Execute the stat command + oc exec -n openshift-etcd -c etcd "$POD_NAME" -- ps -o command= -C etcd | sed 's/.*\(--peer-client-cert-auth=[^ ]*\).*/\1/' + fi + use_multiple_values: true + tests: + test_items: + - flag: "--peer-client-cert-auth" + compare: + op: eq + value: true + remediation: | + This setting is managed by the cluster etcd operator. No remediation required. + scored: true + + - id: 2.6 + text: "Ensure that the --peer-auto-tls argument is not set to true (Manual)" + audit: | + # Returns 0 if found, 1 if not found + # Get the node name where the pod is running + NODE_NAME=$(oc get pod "$HOSTNAME" -o=jsonpath='{.spec.nodeName}') + # Get the pod name in the openshift-etcd namespace + POD_NAME=$(oc get pods -n openshift-etcd -l app=etcd --field-selector spec.nodeName="$NODE_NAME" -o jsonpath='{.items[0].metadata.name}' 2>/dev/null) + if [ -z "$POD_NAME" ]; then + echo "No matching file found on the current node." + else + # Execute the stat command + oc exec -n openshift-etcd -c etcd "$POD_NAME" -- ps -o command= -C etcd | grep -- --peer-auto-tls=true 2>/dev/null ; echo exit_code=$? + fi + use_multiple_values: true + tests: + test_items: + - flag: "exit_code" + compare: + op: eq + value: "1" + remediation: | + This setting is managed by the cluster etcd operator. No remediation required. + scored: true + + - id: 2.7 + text: "Ensure that a unique Certificate Authority is used for etcd (Manual)" + audit: | + # Get the node name where the pod is running + NODE_NAME=$(oc get pod "$HOSTNAME" -o=jsonpath='{.spec.nodeName}') + # Get the pod name in the openshift-etcd namespace + POD_NAME=$(oc get pods -n openshift-etcd -l app=etcd --field-selector spec.nodeName="$NODE_NAME" -o jsonpath='{.items[0].metadata.name}' 2>/dev/null) + if [ -z "$POD_NAME" ]; then + echo "No matching file found on the current node." + else + # Execute the stat command + oc exec -n openshift-etcd -c etcd "$POD_NAME" -- ps -o command= -C etcd | sed 's/.*\(--trusted-ca-file=[^ ]*\).*/\1/' + oc exec -n openshift-etcd -c etcd "$POD_NAME" -- ps -o command= -C etcd | sed 's/.*\(--peer-trusted-ca-file=[^ ]*\).*/\1/' + fi + use_multiple_values: true + tests: + test_items: + - flag: "file" + compare: + op: regex + value: '\/etc\/kubernetes\/static-pod-certs\/configmaps\/(?:etcd-(?:serving|peer-client)-ca\/ca-bundle\.crt|etcd-all-bundles\/server-ca-bundle\.crt)' + remediation: | + None required. Certificates for etcd are managed by the OpenShift cluster etcd operator. + scored: true diff --git a/cfg/rh-1.8/master.yaml b/cfg/rh-1.8/master.yaml new file mode 100644 index 00000000..25a3ac00 --- /dev/null +++ b/cfg/rh-1.8/master.yaml @@ -0,0 +1,1285 @@ +--- +controls: +version: rh-1.8 +id: 1 +text: "Control Plane Components" +type: "master" +groups: + - id: 1.1 + text: "Master Node Configuration Files" + checks: + - id: 1.1.1 + text: "Ensure that the API server pod specification file permissions are set to 600 or more restrictive (Manual)" + audit: | + # Get the node name where the pod is running + NODE_NAME=$(oc get pod "$HOSTNAME" -o=jsonpath='{.spec.nodeName}') + + # Get the pod name in the openshift-kube-apiserver namespace + POD_NAME=$(oc get pods -n openshift-kube-apiserver -l app=openshift-kube-apiserver --field-selector spec.nodeName="$NODE_NAME" -o jsonpath='{.items[0].metadata.name}' 2>/dev/null) + + if [ -z "$POD_NAME" ]; then + echo "No matching pods found on the current node." + else + # Execute the stat command + oc exec -n openshift-kube-apiserver "$POD_NAME" -- stat -c "$POD_NAME %n permissions=%a" /etc/kubernetes/static-pod-resources/kube-apiserver-pod.yaml + fi + use_multiple_values: true + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "600" + remediation: | + No remediation required; file permissions are managed by the operator. + scored: true + + - id: 1.1.2 + text: "Ensure that the API server pod specification file ownership is set to root:root (Manual)" + audit: | + # Get the node name where the pod is running + NODE_NAME=$(oc get pod "$HOSTNAME" -o=jsonpath='{.spec.nodeName}') + + # Get the pod name in the openshift-kube-apiserver namespace + POD_NAME=$(oc get pods -n openshift-kube-apiserver -l app=openshift-kube-apiserver --field-selector spec.nodeName="$NODE_NAME" -o jsonpath='{.items[0].metadata.name}' 2>/dev/null) + + if [ -z "$POD_NAME" ]; then + echo "No matching pods found on the current node." + else + # Execute the stat command + oc exec -n openshift-kube-apiserver "$POD_NAME" -- stat -c "$POD_NAME %n %U:%G" /etc/kubernetes/static-pod-resources/kube-apiserver-pod.yaml + fi + use_multiple_values: true + tests: + test_items: + - flag: "root:root" + remediation: | + No remediation required; file permissions are managed by the operator. + scored: true + + - id: 1.1.3 + text: "Ensure that the controller manager pod specification file permissions are set to 600 or more restrictive (Manual)" + audit: | + # Get the node name where the pod is running + NODE_NAME=$(oc get pod "$HOSTNAME" -o=jsonpath='{.spec.nodeName}') + + # Get the pod name in the openshift-kube-controller-manager namespace + POD_NAME=$(oc get pods -n openshift-kube-controller-manager -l app=kube-controller-manager --field-selector spec.nodeName="$NODE_NAME" -o jsonpath='{.items[0].metadata.name}' 2>/dev/null) + + if [ -z "$POD_NAME" ]; then + echo "No matching pods found on the current node." + else + # Execute the stat command + oc exec -n openshift-kube-controller-manager "$POD_NAME" -- stat -c "$POD_NAME %n permissions=%a" /etc/kubernetes/static-pod-resources/kube-controller-manager-pod.yaml + fi + use_multiple_values: true + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "600" + remediation: | + No remediation required; file permissions are managed by the operator. + scored: true + + - id: 1.1.4 + text: "Ensure that the controller manager pod specification file ownership is set to root:root (Manual)" + audit: | + # Get the node name where the pod is running + NODE_NAME=$(oc get pod "$HOSTNAME" -o=jsonpath='{.spec.nodeName}') + + # Get the pod name in the openshift-kube-controller-manager namespace + POD_NAME=$(oc get pods -n openshift-kube-controller-manager -l app=kube-controller-manager --field-selector spec.nodeName="$NODE_NAME" -o jsonpath='{.items[0].metadata.name}' 2>/dev/null) + + if [ -z "$POD_NAME" ]; then + echo "No matching pods found on the current node." + else + # Execute the stat command + oc exec -n openshift-kube-controller-manager "$POD_NAME" -- stat -c "$POD_NAME %n %U:%G" /etc/kubernetes/static-pod-resources/kube-controller-manager-pod.yaml + fi + use_multiple_values: true + tests: + test_items: + - flag: "root:root" + remediation: | + No remediation required; file permissions are managed by the operator. + scored: true + + - id: 1.1.5 + text: "Ensure that the scheduler pod specification file permissions are set to 600 or more restrictive (Manual)" + audit: | + # Get the node name where the pod is running + NODE_NAME=$(oc get pod "$HOSTNAME" -o=jsonpath='{.spec.nodeName}') + + # Get the pod name in the openshift-kube-scheduler namespace + POD_NAME=$(oc get pods -n openshift-kube-scheduler -l app=openshift-kube-scheduler --field-selector spec.nodeName="$NODE_NAME" -o jsonpath='{.items[0].metadata.name}' 2>/dev/null) + + if [ -z "$POD_NAME" ]; then + echo "No matching pods found on the current node." + else + # Execute the stat command + oc exec -n openshift-kube-scheduler "$POD_NAME" -- stat -c "$POD_NAME %n permissions=%a" /etc/kubernetes/static-pod-resources/kube-scheduler-pod.yaml + fi + use_multiple_values: true + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "600" + remediation: | + No remediation required; file permissions are managed by the operator. + scored: true + + - id: 1.1.6 + text: "Ensure that the scheduler pod specification file ownership is set to root:root (Manual))" + audit: | + # Get the node name where the pod is running + NODE_NAME=$(oc get pod "$HOSTNAME" -o=jsonpath='{.spec.nodeName}') + + # Get the pod name in the openshift-kube-scheduler namespace + POD_NAME=$(oc get pods -n openshift-kube-scheduler -l app=openshift-kube-scheduler --field-selector spec.nodeName="$NODE_NAME" -o jsonpath='{.items[0].metadata.name}' 2>/dev/null) + + if [ -z "$POD_NAME" ]; then + echo "No matching pods found on the current node." + else + # Execute the stat command + oc exec -n openshift-kube-scheduler "$POD_NAME" -- stat -c "$POD_NAME %n %U:%G" /etc/kubernetes/static-pod-resources/kube-scheduler-pod.yaml + fi + use_multiple_values: true + tests: + test_items: + - flag: "root:root" + remediation: | + No remediation required; file permissions are managed by the operator. + scored: true + + - id: 1.1.7 + text: "Ensure that the etcd pod specification file permissions are set to 600 or more restrictive (Manual))" + audit: | + # Get the node name where the pod is running + NODE_NAME=$(oc get pod "$HOSTNAME" -o=jsonpath='{.spec.nodeName}') + + # Get the pod name in the openshift-etcd namespace + POD_NAME=$(oc get pods -n openshift-etcd -l app=etcd --field-selector spec.nodeName="$NODE_NAME" -o jsonpath='{.items[0].metadata.name}' 2>/dev/null) + + if [ -z "$POD_NAME" ]; then + echo "No matching pods found on the current node." + else + # Execute the stat command + oc rsh -n openshift-etcd "$POD_NAME" stat -c "$POD_NAME %n permissions=%a" /etc/kubernetes/manifests/etcd-pod.yaml + fi + use_multiple_values: true + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "600" + remediation: | + No remediation required; file permissions are managed by the operator. + scored: true + + - id: 1.1.8 + text: "Ensure that the etcd pod specification file ownership is set to root:root (Manual)" + audit: | + # Get the node name where the pod is running + NODE_NAME=$(oc get pod "$HOSTNAME" -o=jsonpath='{.spec.nodeName}') + + # Get the pod name in the openshift-etcd namespace + POD_NAME=$(oc get pods -n openshift-etcd -l app=etcd --field-selector spec.nodeName="$NODE_NAME" -o jsonpath='{.items[0].metadata.name}' 2>/dev/null) + + if [ -z "$POD_NAME" ]; then + echo "No matching pods found on the current node." + else + # Execute the stat command + oc rsh -n openshift-etcd "$POD_NAME" stat -c "$POD_NAME %n %U:%G" /etc/kubernetes/manifests/etcd-pod.yaml + fi + use_multiple_values: true + tests: + test_items: + - flag: "root:root" + remediation: | + No remediation required; file permissions are managed by the operator. + scored: true + + - id: 1.1.9 + text: "Ensure that the Container Network Interface file permissions are set to 600 or more restrictive (Manual)" + audit: | + # Get the node name where the pod is running + NODE_NAME=$(oc get pod "$HOSTNAME" -o=jsonpath='{.spec.nodeName}') + # For CNI multus + # Get the pod name in the openshift-multus namespace + POD_NAME=$(oc get pods -n openshift-multus -l app=multus --field-selector spec.nodeName="$NODE_NAME" -o jsonpath='{.items[0].metadata.name}' 2>/dev/null) + + if [ -z "$POD_NAME" ]; then + echo "No matching pods found on the current node." + else + # Execute the stat command + oc exec -n openshift-multus "$POD_NAME" -- /bin/bash -c "stat -c \"$i %n permissions=%a\" /host/etc/cni/net.d/*.conf"; 2>/dev/null + oc exec -n openshift-multus "$POD_NAME" -- /bin/bash -c "stat -c \"$i %n permissions=%a\" /host/var/run/multus/cni/net.d/*.conf"; 2>/dev/null + fi + # For SDN pods + POD_NAME=$(oc get pods -n openshift-sdn -l app=sdn --field-selector spec.nodeName="$NODE_NAME" -o jsonpath='{.items[0].metadata.name}' 2>/dev/null) + + if [ -z "$POD_NAME" ]; then + echo "No matching pods found on the current node." + else + # Execute the stat command + oc exec -n openshift-sdn "$POD_NAME" -- find /var/lib/cni/networks/openshift-sdn -type f -exec stat -c "$i %n permissions=%a" {} \; 2>/dev/null + oc exec -n openshift-sdn "$POD_NAME" -- find /var/run/openshift-sdn -type f -exec stat -c "$i %n permissions=%a" {} \; 2>/dev/null + fi + + # For OVS pods + POD_NAME=$(oc get pods -n openshift-sdn -l app=ovs --field-selector spec.nodeName="$NODE_NAME" -o jsonpath='{.items[0].metadata.name}' 2>/dev/null) + + if [ -z "$POD_NAME" ]; then + echo "No matching pods found on the current node." + else + # Execute the stat command + oc exec -n openshift-sdn "$POD_NAME" -- find /var/run/openvswitch -type f -exec stat -c "$i %n permissions=%a" {} \; 2>/dev/null + oc exec -n openshift-sdn "$POD_NAME" -- find /etc/openvswitch -type f -exec stat -c "$i %n permissions=%a" {} \; 2>/dev/null + oc exec -n openshift-sdn "$POD_NAME" -- find /run/openvswitch -type f -exec stat -c "$i %n permissions=%a" {} \; 2>/dev/null + fi + use_multiple_values: true + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "600" + remediation: | + No remediation required; file permissions are managed by the operator. + scored: true + + - id: 1.1.10 + text: "Ensure that the Container Network Interface file ownership is set to root:root (Manual)" + audit: | + # Get the node name where the pod is running + NODE_NAME=$(oc get pod "$HOSTNAME" -o=jsonpath='{.spec.nodeName}') + # For CNI multus + # Get the pod name in the openshift-multus namespace + POD_NAME=$(oc get pods -n openshift-multus -l app=multus --field-selector spec.nodeName="$NODE_NAME" -o jsonpath='{.items[0].metadata.name}' 2>/dev/null) + + if [ -z "$POD_NAME" ]; then + echo "No matching pods found on the current node." + else + # Execute the stat command + oc exec -n openshift-multus "$POD_NAME" -- /bin/bash -c "stat -c '$i %n %U:%G' /host/etc/cni/net.d/*.conf" 2>/dev/null + oc exec -n openshift-multus $i -- /bin/bash -c "stat -c '$i %n %U:%G' /host/var/run/multus/cni/net.d/*.conf" 2>/dev/null + fi + # For SDN pods + POD_NAME=$(oc get pods -n openshift-sdn -l app=sdn --field-selector spec.nodeName="$NODE_NAME" -o jsonpath='{.items[0].metadata.name}' 2>/dev/null) + + if [ -z "$POD_NAME" ]; then + echo "No matching pods found on the current node." + else + # Execute the stat command + oc exec -n openshift-sdn "$POD_NAME" -- find /var/lib/cni/networks/openshift-sdn -type f -exec stat -c "$i %n %U:%G" {} \; 2>/dev/null + oc exec -n openshift-sdn "$POD_NAME" -- find /var/run/openshift-sdn -type f -exec stat -c "$i %n %U:%G" {} \; 2>/dev/null + fi + # For OVS pods in 4.5 + POD_NAME=$(oc get pods -n openshift-sdn -l app=ovs --field-selector spec.nodeName="$NODE_NAME" -o jsonpath='{.items[0].metadata.name}' 2>/dev/null) + + if [ -z "$POD_NAME" ]; then + echo "No matching pods found on the current node." + else + # Execute the stat command + oc exec -n openshift-sdn "$POD_NAME" -- find /var/run/openvswitch -type f -exec stat -c "$i %n %U:%G" {} \; 2>/dev/null + oc exec -n openshift-sdn "$POD_NAME" -- find /etc/openvswitch -type f -exec stat -c "$i %n %U:%G" {} \; 2>/dev/null + oc exec -n openshift-sdn "$POD_NAME" -- find /run/openvswitch -type f -exec stat -c "$i %n %U:%G" {} \; 2>/dev/null + fi + use_multiple_values: true + tests: + test_items: + - flag: "root:root" + remediation: | + No remediation required; file permissions are managed by the operator. + scored: true + + - id: 1.1.11 + text: "Ensure that the etcd data directory permissions are set to 700 or more restrictive (Manual)" + audit: | + # Get the node name where the pod is running + NODE_NAME=$(oc get pod "$HOSTNAME" -o=jsonpath='{.spec.nodeName}') + + # Get the pod name in the openshift-etcd namespace + POD_NAME=$(oc get pods -n openshift-etcd -l app=etcd --field-selector spec.nodeName="$NODE_NAME" -o jsonpath='{.items[0].metadata.name}' 2>/dev/null) + + if [ -z "$POD_NAME" ]; then + echo "No matching pods found on the current node." + else + # Execute the stat command + oc exec -n openshift-etcd "$POD_NAME" -- stat -c "$POD_NAME %n permissions=%a" /var/lib/etcd/member + fi + use_multiple_values: true + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "700" + remediation: | + No remediation required; file permissions are managed by the operator. + scored: true + + - id: 1.1.12 + text: "Ensure that the etcd data directory ownership is set to etcd:etcd (Manual)" + audit: | + # Get the node name where the pod is running + NODE_NAME=$(oc get pod "$HOSTNAME" -o=jsonpath='{.spec.nodeName}') + + # Get the pod name in the openshift-etcd namespace + POD_NAME=$(oc get pods -n openshift-etcd -l app=etcd --field-selector spec.nodeName="$NODE_NAME" -o jsonpath='{.items[0].metadata.name}' 2>/dev/null) + + if [ -z "$POD_NAME" ]; then + echo "No matching pods found on the current node." + else + # Execute the stat command + oc exec -n openshift-etcd "$POD_NAME" -- stat -c "$POD_NAME %n %U:%G" /var/lib/etcd/member + fi + use_multiple_values: true + tests: + test_items: + - flag: "root:root" + remediation: | + No remediation required; file permissions are managed by the operator. + scored: true + + - id: 1.1.13 + text: "Ensure that the kubeconfig file permissions are set to 600 or more restrictive (Manual)" + audit: | + NODE_NAME=$(oc get pod $HOSTNAME -o=jsonpath='{.spec.nodeName}') + oc debug node/$NODE_NAME -- chroot /host stat -c "$NODE_NAME %n permissions=%a" /etc/kubernetes/kubeconfig 2> /dev/null + use_multiple_values: true + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "600" + remediation: | + No remediation required; file permissions are managed by the operator. + scored: true + + - id: 1.1.14 + text: "Ensure that the kubeconfig file ownership is set to root:root (Manual)" + audit: | + NODE_NAME=$(oc get pod $HOSTNAME -o=jsonpath='{.spec.nodeName}') + oc debug node/$NODE_NAME -- chroot /host stat -c "$NODE_NAME %n %U:%G" /etc/kubernetes/kubeconfig 2> /dev/null + use_multiple_values: true + tests: + test_items: + - flag: "root:root" + remediation: | + No remediation required; file permissions are managed by the operator. + scored: true + + - id: 1.1.15 + text: "Ensure that the scheduler kubeconfig file permissions are set to 600 or more restrictive (Manual)" + audit: | + # Get the node name where the pod is running + NODE_NAME=$(oc get pod "$HOSTNAME" -o=jsonpath='{.spec.nodeName}') + + # Get the pod name in the openshift-kube-scheduler namespace + POD_NAME=$(oc get pods -n openshift-kube-scheduler -l app=openshift-kube-scheduler --field-selector spec.nodeName="$NODE_NAME" -o jsonpath='{.items[0].metadata.name}' 2>/dev/null) + + if [ -z "$POD_NAME" ]; then + echo "No matching pods found on the current node." + else + # Execute the stat command + oc exec -n openshift-kube-scheduler "$POD_NAME" -- stat -c "$POD_NAME %n permissions=%a" /etc/kubernetes/static-pod-resources/configmaps/scheduler-kubeconfig/kubeconfig + fi + use_multiple_values: true + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "600" + remediation: | + No remediation required; file permissions are managed by the operator. + scored: true + + - id: 1.1.16 + text: "Ensure that the scheduler kubeconfig file ownership is set to root:root (Manual)" + audit: | + # Get the node name where the pod is running + NODE_NAME=$(oc get pod "$HOSTNAME" -o=jsonpath='{.spec.nodeName}') + + # Get the pod name in the openshift-kube-scheduler namespace + POD_NAME=$(oc get pods -n openshift-kube-scheduler -l app=openshift-kube-scheduler --field-selector spec.nodeName="$NODE_NAME" -o jsonpath='{.items[0].metadata.name}' 2>/dev/null) + + if [ -z "$POD_NAME" ]; then + echo "No matching pods found on the current node." + else + # Execute the stat command + oc exec -n openshift-kube-scheduler "$POD_NAME" -- stat -c "$POD_NAME %n %U:%G" /etc/kubernetes/static-pod-resources/configmaps/scheduler-kubeconfig/kubeconfig + fi + use_multiple_values: true + tests: + test_items: + - flag: "root:root" + remediation: | + No remediation required; file permissions are managed by the operator. + scored: true + + - id: 1.1.17 + text: "Ensure that the controller-manager kubeconfig file permissions are set to 600 or more restrictive (Manual)" + audit: | + # Get the node name where the pod is running + NODE_NAME=$(oc get pod "$HOSTNAME" -o=jsonpath='{.spec.nodeName}') + + # Get the pod name in the openshift-kube-controller-manager namespace + POD_NAME=$(oc get pods -n openshift-kube-controller-manager -l app=kube-controller-manager --field-selector spec.nodeName="$NODE_NAME" -o jsonpath='{.items[0].metadata.name}' 2>/dev/null) + + if [ -z "$POD_NAME" ]; then + echo "No matching pods found on the current node." + else + # Execute the stat command + oc exec -n openshift-kube-controller-manager "$POD_NAME" -- stat -c "$POD_NAME %n permissions=%a" /etc/kubernetes/static-pod-resources/configmaps/controller-manager-kubeconfig/kubeconfig + fi + use_multiple_values: true + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "600" + remediation: | + No remediation required; file permissions are managed by the operator. + scored: true + + - id: 1.1.18 + text: "Ensure that the controller-manager kubeconfig file ownership is set to root:root (Manual)" + audit: | + # Get the node name where the pod is running + NODE_NAME=$(oc get pod "$HOSTNAME" -o=jsonpath='{.spec.nodeName}') + + # Get the pod name in the openshift-kube-controller-manager namespace + POD_NAME=$(oc get pods -n openshift-kube-controller-manager -l app=kube-controller-manager --field-selector spec.nodeName="$NODE_NAME" -o jsonpath='{.items[0].metadata.name}' 2>/dev/null) + + if [ -z "$POD_NAME" ]; then + echo "No matching pods found on the current node." + else + # Execute the stat command + oc exec -n openshift-kube-controller-manager "$POD_NAME" -- stat -c "$POD_NAME %n %U:%G" /etc/kubernetes/static-pod-resources/configmaps/controller-manager-kubeconfig/kubeconfig + fi + use_multiple_values: true + tests: + test_items: + - flag: "root:root" + remediation: | + No remediation required; file permissions are managed by the operator. + scored: true + + - id: 1.1.19 + text: "Ensure that the OpenShift PKI directory and file ownership is set to root:root (Manual)" + audit: | + # Should return root:root for all files and directories + # Get the node name where the pod is running + NODE_NAME=$(oc get pod "$HOSTNAME" -o=jsonpath='{.spec.nodeName}') + + # Get the pod name in the openshift-kube-controller-manager namespace + POD_NAME=$(oc get pods -n openshift-kube-apiserver -l app=openshift-kube-apiserver --field-selector spec.nodeName="$NODE_NAME" -o jsonpath='{.items[0].metadata.name}' 2>/dev/null) + + if [ -z "$POD_NAME" ]; then + echo "No matching pods found on the current node." + else + # echo $i static-pod-certs + oc exec -n openshift-kube-apiserver "$POD_NAME" -c kube-apiserver -- find /etc/kubernetes/static-pod-certs -type d -wholename '*/secrets*' -exec stat -c "$i %n %U:%G" {} \; + oc exec -n openshift-kube-apiserver "$POD_NAME" -c kube-apiserver -- find /etc/kubernetes/static-pod-certs -type f -wholename '*/secrets*' -exec stat -c "$i %n %U:%G" {} \; + # echo $i static-pod-resources + oc exec -n openshift-kube-apiserver "$POD_NAME" -c kube-apiserver -- find /etc/kubernetes/static-pod-resources -type d -wholename '*/secrets*' -exec stat -c "$i %n %U:%G" {} \; + oc exec -n openshift-kube-apiserver "$POD_NAME" -c kube-apiserver -- find /etc/kubernetes/static-pod-resources -type f -wholename '*/secrets*' -exec stat -c "$i %n %U:%G" {} \; + fi + use_multiple_values: true + tests: + test_items: + - flag: "root:root" + remediation: | + No remediation required; file permissions are managed by the operator. + scored: true + + - id: 1.1.20 + text: "Ensure that the OpenShift PKI certificate file permissions are set to 600 or more restrictive (Manual)" + audit: | + # Get the node name where the pod is running + NODE_NAME=$(oc get pod "$HOSTNAME" -o=jsonpath='{.spec.nodeName}') + + # Get the pod name in the openshift-kube-apiserver namespace + POD_NAME=$(oc get pods -n openshift-kube-apiserver -l app=openshift-kube-apiserver --field-selector spec.nodeName="$NODE_NAME" -o jsonpath='{.items[0].metadata.name}' 2>/dev/null) + + if [ -z "$POD_NAME" ]; then + echo "No matching pods found on the current node." + else + # Execute the stat command + oc exec -n openshift-kube-apiserver "$POD_NAME" -c kube-apiserver -- find /etc/kubernetes/static-pod-certs -type f -wholename '*/secrets/*.crt' -exec stat -c "$POD_NAME %n permissions=%a" {} \; + fi + use_multiple_values: true + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "600" + remediation: | + No remediation required; file permissions are managed by the operator. + scored: true + + - id: 1.1.21 + text: "Ensure that the OpenShift PKI key file permissions are set to 600 (Manual)" + audit: | + # Get the node name where the pod is running + NODE_NAME=$(oc get pod "$HOSTNAME" -o=jsonpath='{.spec.nodeName}') + + # Get the pod name in the openshift-kube-apiserver namespace + POD_NAME=$(oc get pods -n openshift-kube-apiserver -l app=openshift-kube-apiserver --field-selector spec.nodeName="$NODE_NAME" -o jsonpath='{.items[0].metadata.name}' 2>/dev/null) + + if [ -z "$POD_NAME" ]; then + echo "No matching pods found on the current node." + else + # Execute the stat command + oc exec -n openshift-kube-apiserver "$POD_NAME" -c kube-apiserver -- find /etc/kubernetes/static-pod-certs -type f -wholename '*/secrets/*.key' -exec stat -c "$POD_NAME %n permissions=%a" {} \; + fi + use_multiple_values: true + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "600" + remediation: | + No remediation required; file permissions are managed by the operator. + scored: true + + - id: 1.2 + text: "API Server" + checks: + - id: 1.2.1 + text: "Ensure that anonymous requests are authorized (Manual)" + audit: | + found=0 + + echo "# ClusterRoleBindings granting permissions to system:unauthenticated" + crb_out="$(oc get clusterrolebindings -o json 2>/dev/null \ + | jq -r '.items[] + | select(.subjects[]? | select(.kind=="Group" and .name=="system:unauthenticated")) + | .metadata.name + " -> " + .roleRef.kind + "/" + .roleRef.name' \ + | sort -u)" + if [ -n "$crb_out" ]; then + echo "$crb_out" + found=1 + else + echo "(none)" + fi + + echo + echo "# Namespaced RoleBindings granting permissions to system:unauthenticated" + rb_out="$(oc get rolebindings -A -o json 2>/dev/null \ + | jq -r '.items[] + | select(.subjects[]? | select(.kind=="Group" and .name=="system:unauthenticated")) + | (.metadata.namespace + "/" + .metadata.name) + " -> " + .roleRef.kind + "/" + .roleRef.name' \ + | sort -u)" + if [ -n "$rb_out" ]; then + echo "$rb_out" + found=1 + else + echo "(none)" + fi + + # Provide a simple flag for the test harness + if [ $found -eq 1 ]; then + echo "unauthenticated_bindings_present" + else + echo "unauthenticated_bindings_missing" + fi + tests: + test_items: + - flag: "unauthenticated_bindings_present" + set: true + remediation: | + None required. The default configuration should not be modified. + scored: true + + - id: 1.2.2 + text: "Use HTTPS for kubelet connections (Manual)" + audit: | + CFG=$(oc -n openshift-kube-apiserver get cm config -o jsonpath='{.data.config\.yaml}') + + # Extract kubelet client cert/key paths (support both layouts) + CERT_FILE=$(printf '%s\n' "$CFG" \ + | grep -Eo '/etc/kubernetes/static-pod-(resources/kube-apiserver-certs|certs)/secrets/kubelet-client/tls\.crt' \ + | head -n1) + + KEY_FILE=$(printf '%s\n' "$CFG" \ + | grep -Eo '/etc/kubernetes/static-pod-(resources/kube-apiserver-certs|certs)/secrets/kubelet-client/tls\.key' \ + | head -n1) + + # 1) pass/fail on presence of both files + if [ -n "$CERT_FILE" ] && [ -n "$KEY_FILE" ]; then + echo "pass" + else + echo "fail" + fi + KUBELET_HTTPS=$(printf '%s\n' "$CFG" \ + | grep -Eo '(^|[[:space:]])kubelet-https:[[:space:]]*(true|false)' \ + | awk -F: '{print $2}' \ + | tr -d '[:space:]' \ + | head -n1) + + if [ "$KUBELET_HTTPS" = "false" ]; then + echo "false" + else + echo "true" + fi + + oc -n openshift-apiserver describe secret serving-cert | grep -E 'tls\.crt|tls\.key|Type:' + tests: + bin_op: and + test_items: + - flag: "pass" + - flag: "true" + - flag: "kubernetes.io/tls" + - flag: "tls.crt" + - flag: "tls.key" + remediation: | + OpenShift does not use the legacy --kubelet-https flag; TLS is enforced via + kubelet client cert/key arguments and cluster CAs. Ensure: + - apiServerArguments.kubelet-client-certificate[0] points to a real file + - apiServerArguments.kubelet-client-key[0] points to a real file + - The openshift-apiserver 'serving-cert' secret is type kubernetes.io/tls and contains tls.crt and tls.key + scored: false + + - id: 1.2.3 + text: "Ensure that the kubelet uses certificates to authenticate (Manual)" + audit: | + oc get configmap config -n openshift-kube-apiserver -ojson | jq -r '.data["config.yaml"]' | jq '.apiServerArguments["kubelet-client-certificate"]' + oc get configmap config -n openshift-kube-apiserver -ojson | jq -r'.data["config.yaml"]' | jq '.apiServerArguments["kubelet-client-key"]' + oc -n openshift-apiserver describe secret serving-cert + tests: + test_items: + - flag: "/etc/kubernetes/static-pod-certs/secrets/kubelet-client/tls.crt" + - flag: "/etc/kubernetes/static-pod-certs/secrets/kubelet-client/tls.key" + - flag: "kubernetes.io/tls" + remediation: | + No remediation is required. + OpenShift automatically manages kubelet authentication using X.509 certificates issued by the internal platform CA. + Manual modification of these certificates is not supported and can disrupt platform components. + scored: true + + - id: 1.2.4 + text: "Verify that the kubelet certificate authority is set as appropriate (Manual)" + audit: | + oc get configmap config -n openshift-kube-apiserver -ojson \ + | jq -r '.data["config.yaml"]' \ + | jq '.apiServerArguments["kubelet-certificate-authority"]' + tests: + test_items: + - flag: "/etc/kubernetes/static-pod-resources/configmaps/kubelet-serving-ca/ca-bundle.crt" + remediation: | + No remediation is required. + OpenShift uses internal X.509 certificates and platform-managed CAs to verify kubelet server identities. + This is not user-configurable and should not be modified. + scored: true + + - id: 1.2.5 + text: "Ensure that the --authorization-mode argument is not set to AlwaysAllow (Manual)" + audit: | + oc get configmap config -n openshift-kube-apiserver -o json \ + | jq -r '.data["config.yaml"]' \ + | jq '.apiServerArguments["authorization-mode"]' + audit_config: | + oc get configmap config -n openshift-kube-apiserver -ojson | jq -r '.data["config.yaml"]' | jq '.apiServerArguments' + tests: + bin_op: or + test_items: + - path: "{.authorization-mode}" + compare: + op: nothave + value: "AlwaysAllow" + - path: "{.authorization-mode}" + flag: "authorization-mode" + set: false + remediation: | + No remediation required. + OpenShift does not support the 'AlwaysAllow' authorization mode. + The API server is bootstrapped with secure authorization mechanisms including RBAC and Node by default. + scored: true + + - id: 1.2.6 + text: "Verify that RBAC is enabled (Manual)" + audit: | + oc get configmap config -n openshift-kube-apiserver -o json \ + | jq -r '.data["config.yaml"]' \ + | jq '.apiServerArguments["authorization-mode"]' + audit_config: | + oc get configmap config -n openshift-kube-apiserver -ojson | jq -r '.data["config.yaml"]' | jq '.apiServerArguments' + tests: + bin_op: or + test_items: + - path: "{.authorization-mode}" + compare: + op: has + value: "RBAC" + - path: "{.authorization-mode}" + flag: "authorization-mode" + set: false + remediation: | + No remediation is required. + OpenShift is configured at bootstrap time to use Role-Based Access Control (RBAC) as the default authorization mode. + RBAC is always enabled, and cannot be disabled through configuration. + scored: true + + + - id: 1.2.7 + text: "Ensure that the APIPriorityAndFairness feature gate is enabled (Manual)" + audit: | + oc get kubeapiservers.operator.openshift.io cluster -o json | jq '.spec.observedConfig.apiServerArguments' + tests: + test_items: + - flag: "APIPriorityAndFairness=true" + remediation: | + No remediation is required + scored: true + + - id: 1.2.8 + text: "Ensure that the admission control plugin AlwaysAdmit is not set (Manual)" + audit: | + oc -n openshift-kube-apiserver get configmap config -o json | jq -r '.data."config.yaml"' | jq '.apiServerArguments."enable-admission-plugins"' + tests: + test_items: + - flag: "AlwaysAdmit" + set: false + remediation: | + No remediation is required. The AlwaysAdmit admission controller cannot be enabled in OpenShift. + scored: true + + - id: 1.2.9 + text: "Ensure that the admission control plugin AlwaysPullImages is set (Manual)" + audit: | + oc -n openshift-kube-apiserver get configmap config -o json | jq -r '.data."config.yaml"' | jq '.apiServerArguments."enable-admission-plugins"' + tests: + test_items: + - flag: "AlwaysPullImages" + set: false + remediation: | + None required. + scored: true + + - id: 1.2.10 + text: "Ensure that the admission control plugin ServiceAccount is set (Manual)" + audit: | + oc -n openshift-kube-apiserver get configmap config -o json | jq -r '.data."config.yaml"' | jq '.apiServerArguments."enable-admission-plugins"' + tests: + test_items: + - flag: "ServiceAccount" + set: true + remediation: | + None required. OpenShift is configured to use service accounts by default. + scored: true + + - id: 1.2.11 + text: "Ensure that the admission control plugin NamespaceLifecycle is set (Manual)" + audit: | + oc -n openshift-kube-apiserver get configmap config -o json | jq -r '.data."config.yaml"' | jq '.apiServerArguments."enable-admission-plugins"' + output=$(oc -n openshift-kube-apiserver get configmap config -o json | jq -r '.data."config.yaml"' | jq '.apiServerArguments."enable-admission-plugins"') + [ "$output" == "null" ] && echo "ocp 4.5 has NamespaceLifecycle compiled" || echo $output + tests: + test_items: + - flag: "NamespaceLifecycle" + remediation: | + Ensure that the --disable-admission-plugins parameter does not include NamespaceLifecycle. + scored: true + + - id: 1.2.12 + text: "Ensure that the admission control plugin SecurityContextConstraint is set (Manual)" + audit: | + oc -n openshift-kube-apiserver get configmap config -o json | jq -r '.data."config.yaml"' | jq '.apiServerArguments."enable-admission-plugins"' + output=$(oc -n openshift-kube-apiserver get configmap config -o json | jq -r '.data."config.yaml"' | jq '.apiServerArguments."enable-admission-plugins"') + [ "$output" == "null" ] && echo "ocp 4.5 has SecurityContextConstraint compiled" || echo $output + tests: + test_items: + - flag: "security.openshift.io/SecurityContextConstraint" + remediation: | + None required. Security Context Constraints are enabled by default in OpenShift and cannot be disabled. + scored: true + + - id: 1.2.13 + text: "Ensure that the admission control plugin NodeRestriction is set (Manual)" + audit: | + oc -n openshift-kube-apiserver get configmap config -o json | jq -r '.data."config.yaml"' | jq '.apiServerArguments."enable-admission-plugins"' + output=$(oc -n openshift-kube-apiserver get configmap config -o json | jq -r '.data."config.yaml"' | jq '.apiServerArguments."enable-admission-plugins"') + [ "$output" == "null" ] && echo "ocp 4.5 has NodeRestriction compiled" || echo $output + tests: + test_items: + - flag: "NodeRestriction" + remediation: | + The NodeRestriction plugin cannot be disabled. + scored: true + + - id: 1.2.14 + text: "Ensure that the --insecure-bind-address argument is not set (manual)" + audit: | + # Get the insecure-bind-address value + insecure_bind_address=$(oc get kubeapiservers.operator.openshift.io cluster -ojson \ + | jq -r '.spec.observedConfig.apiServerArguments["insecure-bind-address"][]?') + + # Get port from openshift-kube-apiserver + kube_api_port=$(oc -n openshift-kube-apiserver get endpoints -o jsonpath='{.items[*].subsets[*].ports[*].port}') + + # Get port from openshift-apiserver + openshift_api_port=$(oc -n openshift-apiserver get endpoints -o jsonpath='{.items[*].subsets[*].ports[*].port}') + + # Evaluate logic + [[ -z "$insecure_bind_address" ]] && \ + [[ "$kube_api_port" == *"6443"* ]] && \ + [[ "$openshift_api_port" == *"8443"* ]] && echo "pass" || echo "fail" + tests: + test_items: + - flag: "pass" + remediation: | + No remediation is required. + By default, OpenShift uses secure HTTPS ports (6443 and 8443) for all API communications. + The API servers are not configured to expose insecure ports and are isolated within the pod network. + scored: true + + + - id: 1.2.15 + text: "Ensure that the --insecure-port argument is set to 0 (Manual)" + audit: | + oc -n openshift-kube-apiserver get endpoints -o jsonpath='{.items[*].subsets[*].ports[*].port}' + tests: + test_items: + - flag: "6443" + remediation: | + None required. The configuration is managed by the API server operator. + scored: true + + - id: 1.2.16 + text: "Ensure that the --secure-port argument is not set to 0 (Manual)" + audit: | + BIND_ADDR=$(oc get kubeapiservers.operator.openshift.io cluster -o json \ + | jq -r '.spec.observedConfig.servingInfo.bindAddress') + + PORTS=$(oc get pods -n openshift-kube-apiserver -l app=openshift-kube-apiserver \ + -o jsonpath='{.items[*].spec.containers[?(@.name=="kube-apiserver")].ports[*].containerPort}') + + if [ "$BIND_ADDR" = "0.0.0.0:6443" ] && echo "$PORTS" | grep -q '\b6443\b'; then + echo "pass" + else + echo "fail" + fi + tests: + test_items: + - flag: "pass" + remediation: | + None required. OpenShift serves the API securely over port 6443 with TLS, authentication, and authorization. + The insecure API port is not exposed or configurable by default. + scored: true + + - id: 1.2.17 + text: "Ensure that the healthz endpoint is protected by RBAC (Manual)" + type: manual + remediation: | + None required as profiling data is protected by RBAC. + scored: false + + - id: 1.2.18 + text: "Ensure that the --audit-log-path argument is set (Manual)" + audit: | + # Get kube-apiserver audit log path + kube_path=$(oc get configmap config -n openshift-kube-apiserver -ojson \ + | jq -r '.data["config.yaml"]' \ + | jq -r '.apiServerArguments["audit-log-path"][]?') + + # Get OpenShift apiserver audit log path + os_path=$(oc get configmap config -n openshift-apiserver -ojson \ + | jq -r '.data["config.yaml"]' \ + | jq -r '.apiServerArguments["audit-log-path"][]?') + + # Check if log file exists in kube-apiserver pod + kube_pod=$(oc get pods -n openshift-kube-apiserver -l app=openshift-kube-apiserver -o jsonpath='{.items[0].metadata.name}') + oc rsh -n openshift-kube-apiserver -c kube-apiserver $kube_pod ls "$kube_path" >/dev/null 2>&1 + kube_exists=$? + + # Check if log file exists in openshift-apiserver pod + os_pod=$(oc get pods -n openshift-apiserver -l apiserver=true -o jsonpath='{.items[0].metadata.name}') + oc rsh -n openshift-apiserver $os_pod ls "$os_path" >/dev/null 2>&1 + os_exists=$? + + # Evaluate all conditions + [[ "$kube_path" == "/var/log/kube-apiserver/audit.log" ]] && \ + [[ "$os_path" == "/var/log/openshift-apiserver/audit.log" ]] && \ + [[ $kube_exists -eq 0 ]] && \ + [[ $os_exists -eq 0 ]] && echo "pass" || echo "fail" + tests: + test_items: + - flag: "pass" + remediation: | + No remediation is required. + OpenShift manages audit logging automatically via the apiserver configuration. + By default, the audit log paths are: + - /var/log/kube-apiserver/audit.log + - /var/log/openshift-apiserver/audit.log + scored: true + + - id: 1.2.19 + text: "Ensure that the audit logs are forwarded off the cluster for retention (Manual)" + type: "manual" + remediation: | + Follow the documentation for log forwarding. Forwarding logs to third party systems + https://docs.openshift.com/container-platform/4.5/logging/cluster-logging-external.html + scored: false + + - id: 1.2.20 + text: "Ensure that the maximumRetainedFiles argument is set to 10 or as appropriate (Manual)" + audit: | + VALUE=$(oc get configmap config -n openshift-kube-apiserver -ojson \ + | jq -r '.data["config.yaml"]' \ + | jq -r '.apiServerArguments["audit-log-maxbackup"][0] // empty') + + if [ -n "$VALUE" ] && [ "$VALUE" -ge 10 ]; then + echo "pass (current=$VALUE)" + else + echo "fail (current=$VALUE)" + fi + tests: + test_items: + - flag: "pass" + remediation: | + No remediation required. + By default, OpenShift retains 10 audit log backup files. + This provides sufficient log history for incident investigation and audit review. + scored: true + + - id: 1.2.21 + text: "Configure Kubernetes API Server Maximum Audit Log Size (Manual)" + audit: | + VALUE=$(oc get configmap config -n openshift-kube-apiserver -ojson \ + | jq -r '.data["config.yaml"]' \ + | jq -r '.apiServerArguments["audit-log-maxsize"][0] // empty') + + if [ -n "$VALUE" ] && [ "$VALUE" -ge 100 ]; then + echo "pass (current=$VALUE)" + else + echo "fail (current=$VALUE)" + fi + tests: + test_items: + - flag: "pass" + remediation: | + Set the audit-log-maxsize parameter to 100 or as an appropriate number. + maximumFileSizeMegabytes: 100 + scored: true + + - id: 1.2.22 + text: "Ensure that the --request-timeout argument is set (Manual)" + audit: | + VALUE=$(oc get configmap config -n openshift-kube-apiserver -ojson \ + | jq -r '.data["config.yaml"]' \ + | jq -r '.apiServerArguments["min-request-timeout"][0] // empty') + + if [ -n "$VALUE" ] && [ "$VALUE" -eq 3600 ]; then + echo "pass (current=$VALUE)" + else + echo "fail (current=$VALUE)" + fi + tests: + test_items: + - flag: "pass" + remediation: | + TBD + scored: true + + - id: 1.2.23 + text: "Ensure that the --service-account-lookup argument is set to true (Manual)" + audit: | + oc get configmap config -n openshift-kube-apiserver -ojson | jq -r + '.data["config.yaml"]' | jq '.apiServerArguments."service-account-lookup"[]' + tests: + test_items: + - flag: "true" + remediation: | + TBD + scored: true + + - id: 1.2.24 + text: "Ensure that the --service-account-key-file argument is set as appropriate (Manual)" + audit: | + oc get configmap config -n openshift-kube-apiserver -ojson | jq -r '.data["config.yaml"]' | jq -r .serviceAccountPublicKeyFiles[] + tests: + bin_op: and + test_items: + - flag: "/etc/kubernetes/static-pod-resources/configmaps/sa-token-signing-certs" + - flag: "/etc/kubernetes/static-pod-resources/configmaps/bound-sa-token-signing-certs" + remediation: | + The OpenShift API server does not use the service-account-key-file argument. + The ServiceAccount token authenticator is configured with serviceAccountConfig.publicKeyFiles. + OpenShift does not reuse the apiserver TLS key. This is not configurable. + scored: true + + - id: 1.2.25 + text: "Ensure that the --etcd-certfile and --etcd-keyfile arguments are set as appropriate (Manual)" + audit: | + oc get configmap config -n openshift-kube-apiserver -ojson \ + | jq -r '.data["config.yaml"]' \ + | jq -r '.apiServerArguments["etcd-certfile"][]?' + + oc get configmap config -n openshift-kube-apiserver -ojson \ + | jq -r '.data["config.yaml"]' \ + | jq -r '.apiServerArguments["etcd-keyfile"][]?' + tests: + bin_op: and + test_items: + - flag: "/etc/kubernetes/static-pod-resources/secrets/etcd-client/tls.crt" + - flag: "/etc/kubernetes/static-pod-resources/secrets/etcd-client/tls.key" + remediation: | + No remediation is required. + OpenShift automatically manages X.509 client certificates and TLS encryption for secure communication with etcd. + These settings are handled by the platform and should not be manually modified. + scored: true + + - id: 1.2.26 + text: "Ensure that the --tls-cert-file and --tls-private-key-file arguments are set as appropriate (Manual)" + audit: | + oc get configmap config -n openshift-kube-apiserver -ojson \ + | jq -r '.data["config.yaml"]' \ + | jq -r '.apiServerArguments["tls-cert-file"][]?' + + oc get configmap config -n openshift-kube-apiserver -ojson \ + | jq -r '.data["config.yaml"]' \ + | jq -r '.apiServerArguments["tls-private-key-file"][]?' + + tests: + bin_op: and + test_items: + - flag: "/etc/kubernetes/static-pod-certs/secrets/service-network-serving-certkey/tls.crt" + - flag: "/etc/kubernetes/static-pod-certs/secrets/service-network-serving-certkey/tls.key" + remediation: | + No remediation is required. OpenShift automatically configures the API server with valid X.509 certificates and TLS keys. + These are used to encrypt traffic between the API server and clients, including kubelets and users. + Certificate rotation and lifecycle management are handled by the OpenShift platform. + scored: true + + + - id: 1.2.27 + text: "Ensure that the --client-ca-file argument is set as appropriate (Manual)" + audit: | + oc get configmap config -n openshift-kube-apiserver -ojson | \ + jq -r '.data["config.yaml"]' | \ + jq -r .servingInfo.clientCA + tests: + test_items: + - flag: "/etc/kubernetes/static-pod-certs/configmaps/client-ca/ca-bundle.crt" + remediation: | + OpenShift automatically manages TLS authentication for the API server communication with the node/kublet. + This is not configurable. You may optionally set a custom default certificate to be used by the API + server when serving content in order to enable clients to access the API server at a different host name + or without the need to distribute the cluster-managed certificate authority (CA) certificates to the clients. + + User-provided certificates must be provided in a kubernetes.io/tls type Secret in the openshift-config namespace. + Update the API server cluster configuration, + the apiserver/cluster resource, to enable the use of the user-provided certificate. + scored: true + + - id: 1.2.28 + text: "Ensure that the --etcd-cafile argument is set as appropriate (Manual)" + audit: | + oc get configmap config -n openshift-kube-apiserver -ojson | \ + jq -r '.data["config.yaml"]' | \ + jq -r '.apiServerArguments["etcd-cafile"]' + tests: + test_items: + - flag: "/etc/kubernetes/static-pod-resources/configmaps/etcd-serving-ca/ca-bundle.crt" + remediation: | + None required. OpenShift generates the etcd-cafile and sets the arguments appropriately in the API server. Communication with etcd is secured by the etcd serving CA. + scored: true + + - id: 1.2.29 + text: "Ensure that encryption providers are appropriately configured (Manual)" + audit: | + oc get openshiftapiserver -o=jsonpath='{range .items[0].status.conditions[?(@.type=="Encrypted")]}{.reason}{"\n"}{.message}{"\n"}' + tests: + test_items: + - flag: "EncryptionCompleted" + remediation: | + Follow the Kubernetes documentation and configure a EncryptionConfig file. + In this file, choose aescbc, kms or secretbox as the encryption provider. + scored: true + + - id: 1.2.30 + text: "Ensure that the API Server only makes use of Strong Cryptographic Ciphers (Manual)" + type: manual + audit: | + oc get cm -n openshift-authentication v4-0-config-system-cliconfig -o jsonpath='{.data.v4\-0\-config\-system\-cliconfig}' | jq .servingInfo + oc get kubeapiservers.operator.openshift.io cluster -o json |jq.spec.observedConfig.servingInfo + oc get openshiftapiservers.operator.openshift.io cluster -o json |jq.spec.observedConfig.servingInfo + oc describe --namespace=openshift-ingress-operator ingresscontroller/default + remediation: | + Verify that the tlsSecurityProfile is set to the value you chose. + Note: The HAProxy Ingress controller image does not support TLS 1.3 + and because the Modern profile requires TLS 1.3, it is not supported. + The Ingress Operator converts the Modern profile to Intermediate. + The Ingress Operator also converts the TLS 1.0 of an Old or Custom profile to 1.1, + and TLS 1.3 of a Custom profile to 1.2. + scored: false + + - id: 1.2.31 + text: "Ensure unsupported configuration overrides are not used (Manual)" + audit: | + oc get kubeapiserver/cluster -o jsonpath='{.spec.unsupportedConfigOverrides}' + tests: + test_items: + - flag: "null" + remediation: | + No remediation is required. + OpenShift has deprecated and disabled unsupportedConfigOverrides. + This field should remain null and must not be used in any supported configuration. + scored: true + + - id: 1.3 + text: "Controller Manager" + checks: + + - id: 1.3.1 + text: "Ensure that controller manager healthz endpoints are protected by RBAC (Manual)" + type: manual + audit: | + # Verify configuration for ports, livenessProbe, readinessProbe, healthz + oc -n openshift-kube-controller-manager get cm kube-controller-manager-pod -o json | jq -r '.data."pod.yaml"' | jq '.spec.containers' + # Verify endpoints + oc -n openshift-kube-controller-manager describe endpoints + # Test to validate RBAC enabled on the controller endpoint; check with non-admin role + oc project openshift-kube-controller-manage + POD=$(oc get pods -n openshift-kube-controller-manager -l app=kube-controller-manager -o jsonpath='{.items[0].metadata.name}') + PORT=$(oc get pods -n openshift-kube-controller-manager -l app=kube-controller-manager -o jsonpath='{.items[0].spec.containers[0].ports[0].hostPort}') + # Following should return 403 Forbidden + oc rsh -n openshift-kube-controller-manager ${POD} curl https://localhost:${PORT}/metrics -k + # Create a service account to test RBAC + oc create -n openshift-kube-controller-manager sa permission-test-sa + # Should return 403 Forbidden + SA_TOKEN=$(oc sa -n openshift-kube-controller-manager get-token permission-test-sa) + oc rsh -n openshift-kube-controller-manager ${POD} curl https://localhost:${PORT}/metrics -H "Authorization: Bearer $SA_TOKEN" -k + # Cleanup + oc delete -n openshift-kube-controller-manager sa permission-test-sa + # As cluster admin, should succeed + CLUSTER_ADMIN_TOKEN=$(oc whoami -t) + oc rsh -n openshift-kube-controller-manager ${POD} curl https://localhost:${PORT}/metrics -H "Authorization: Bearer $CLUSTER_ADMIN_TOKEN" -k + remediation: | + None required; profiling is protected by RBAC. + scored: false + + - id: 1.3.2 + text: "Ensure that the --use-service-account-credentials argument is set to true (Manual)" + audit: | + oc get configmaps config -n openshift-kube-controller-manager -ojson | \ + jq -r '.data["config.yaml"]' | \ + jq -r '.extendedArguments["use-service-account-credentials"][]' + tests: + test_items: + - flag: "true" + remediation: | + The OpenShift Controller Manager operator manages and updates the OpenShift Controller Manager. + The Kubernetes Controller Manager operator manages and updates the Kubernetes Controller Manager deployed on top of OpenShift. + This operator is configured via KubeControllerManager custom resource. + scored: true + + - id: 1.3.3 + text: "Ensure that the --service-account-private-key-file argument is set as appropriate (Manual)" + audit: | + oc get configmaps config -n openshift-kube-controller-manager -ojson | \ + jq -r '.data["config.yaml"]' | \ + jq -r '.extendedArguments["service-account-private-key-file"][]' + tests: + test_items: + - flag: "/etc/kubernetes/static-pod-resources/secrets/service-account-private-key/service-account.key" + remediation: | + None required. + OpenShift manages the service account credentials for the scheduler automatically. + scored: true + + - id: 1.3.4 + text: "Ensure that the --root-ca-file argument is set as appropriate (Manual)" + audit: | + oc get configmaps config -n openshift-kube-controller-manager -ojson | \ + jq -r '.data["config.yaml"]' | \ + jq -r '.extendedArguments["root-ca-file"][]' + tests: + test_items: + - flag: "/etc/kubernetes/static-pod-resources/configmaps/serviceaccount-ca/ca-bundle.crt" + remediation: | + None required. + Certificates for OpenShift platform components are automatically created and rotated by the OpenShift Container Platform. + scored: true + + + - id: 1.4 + text: "Scheduler" + checks: + - id: 1.4.1 + text: "Ensure that the healthz endpoints for the scheduler are protected by RBAC (Manual)" + type: manual + audit: | + # check configuration for ports, livenessProbe, readinessProbe, healthz + oc -n openshift-kube-scheduler get cm kube-scheduler-pod -o json | jq -r '.data."pod.yaml"' | jq '.spec.containers' + # Test to verify endpoints + oc -n openshift-kube-scheduler describe endpoints + # Test to validate RBAC enabled on the scheduler endpoint; check with non-admin role + oc project openshift-kube-scheduler + POD=$(oc get pods -l app=openshift-kube-scheduler -o jsonpath='{.items[0].metadata.name}') + PORT=$(oc get pod $POD -o jsonpath='{.spec.containers[0].livenessProbe.httpGet.port}') + # Should return 403 Forbidden + oc rsh ${POD} curl http://localhost:${PORT}/metrics -k + # Create a service account to test RBAC + oc create sa permission-test-sa + # Should return 403 Forbidden + SA_TOKEN=$(oc sa get-token permission-test-sa) + oc rsh ${POD} curl http://localhost:${PORT}/metrics -H "Authorization: Bearer $SA_TOKEN" -k + # Cleanup + oc delete sa permission-test-sa + # As cluster admin, should succeed + CLUSTER_ADMIN_TOKEN=$(oc whoami -t) + oc rsh ${POD} curl http://localhost:${PORT}/metrics -H "Authorization: Bearer $CLUSTER_ADMIN_TOKEN" -k + remediation: | + A fix to this issue: https://bugzilla.redhat.com/show_bug.cgi?id=1889488 None required. + Profiling is protected by RBAC and cannot be disabled. + scored: false + + - id: 1.4.2 + text: "Verify that the scheduler API service is protected by RBAC (Manual)" + type: manual + audit: | + echo "Describing kube-scheduler endpoints..." + oc -n openshift-kube-scheduler describe endpoints + + echo "Checking pod configuration for kube-scheduler to confirm no --bind-address or insecure arguments..." + oc -n openshift-kube-scheduler get cm kube-scheduler-pod -o json \ + | jq -r '.data["pod.yaml"]' \ + | jq '.spec.containers[] | select(.name=="kube-scheduler") | .args' + + echo "Testing access to metrics endpoint as unauthenticated user..." + oc project openshift-kube-scheduler + export POD=$(oc get pods -l app=openshift-kube-scheduler -o jsonpath='{.items[0].metadata.name}') + export POD_IP=$(oc get pods -l app=openshift-kube-scheduler -o jsonpath='{.items[0].status.podIP}') + export PORT=$(oc get pod $POD -o jsonpath='{.spec.containers[0].livenessProbe.httpGet.port}') + oc rsh $POD curl -k -s -o /dev/null -w "%{http_code}" https://$POD_IP:$PORT/metrics + + echo "Testing access with unprivileged service account..." + oc create sa permission-test-sa + export SA_TOKEN=$(oc create token permission-test-sa) + oc rsh $POD curl -k -s -o /dev/null -w "%{http_code}" https://$POD_IP:$PORT/metrics -H "Authorization: Bearer $SA_TOKEN" + + echo "Testing access with cluster-admin..." + export CLUSTER_ADMIN_TOKEN=$(oc whoami -t) + oc rsh $POD curl -k -s -o /dev/null -w "%{http_code}" https://$POD_IP:$PORT/metrics -H "Authorization: Bearer $CLUSTER_ADMIN_TOKEN" + + # Cleanup + unset CLUSTER_ADMIN_TOKEN POD PORT SA_TOKEN POD_IP + oc delete sa permission-test-sa + remediation: | + By default, the --bind-address argument is not present, + the readinessProbe and livenessProbe arguments are set to 10251 and the port argument is set to 0. + Check the status of this issue: https://bugzilla.redhat.com/show_bug.cgi?id=1889488 + scored: false diff --git a/cfg/rh-1.8/node.yaml b/cfg/rh-1.8/node.yaml new file mode 100644 index 00000000..6327efbe --- /dev/null +++ b/cfg/rh-1.8/node.yaml @@ -0,0 +1,485 @@ +--- +controls: +version: rh-1.8 +id: 4 +text: "Worker Nodes" +type: "node" +groups: + - id: 4.1 + text: "Worker Node Configuration Files" + checks: + - id: 4.1.1 + text: "Ensure that the kubelet service file permissions are set to 644 or more restrictive (Automated)" + audit: | + NODE_NAME=$(oc get pod $HOSTNAME -o=jsonpath='{.spec.nodeName}') + oc debug node/$NODE_NAME -- chroot /host stat -c "$NODE_NAME %n permissions=%a" /etc/systemd/system/kubelet.service 2> /dev/null + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "644" + remediation: | + By default, the kubelet service file has permissions of 644. + scored: true + + - id: 4.1.2 + text: "Ensure that the kubelet service file ownership is set to root:root (Automated)" + audit: | + # Should return root:root for each node + NODE_NAME=$(oc get pod $HOSTNAME -o=jsonpath='{.spec.nodeName}') + oc debug node/$NODE_NAME -- chroot /host stat -c "$NODE_NAME %n %U:%G" /etc/systemd/system/kubelet.service 2> /dev/null + tests: + test_items: + - flag: root:root + remediation: | + By default, the kubelet service file has ownership of root:root. + scored: true + + - id: 4.1.3 + text: "If proxy kube proxy configuration file exists ensure permissions are set to 644 or more restrictive (Manual)" + audit: | + # Get the node name where the pod is running + NODE_NAME=$(oc get pod "$HOSTNAME" -o=jsonpath='{.spec.nodeName}') + # Get the pod name in the openshift-sdn namespace + POD_NAME=$(oc get pods -n openshift-sdn -l app=sdn --field-selector spec.nodeName="$NODE_NAME" -o jsonpath='{.items[0].metadata.name}' 2>/dev/null) + + if [ -z "$POD_NAME" ]; then + echo "No matching pods found on the current node." + else + # Execute the stat command + oc exec -n openshift-sdn "$POD_NAME" -- stat -Lc "$i %n permissions=%a" /config/kube-proxy-config.yaml 2>/dev/null + fi + tests: + test_items: + - flag: "permissions" + set: true + compare: + op: bitmask + value: "644" + remediation: | + None needed. + scored: true + + - id: 4.1.4 + text: "If proxy kubeconfig file exists ensure ownership is set to root:root (Manual)" + audit: | + # Get the node name where the pod is running + NODE_NAME=$(oc get pod "$HOSTNAME" -o=jsonpath='{.spec.nodeName}') + # Get the pod name in the openshift-sdn namespace + POD_NAME=$(oc get pods -n openshift-sdn -l app=sdn --field-selector spec.nodeName="$NODE_NAME" -o jsonpath='{.items[0].metadata.name}' 2>/dev/null) + + if [ -z "$POD_NAME" ]; then + echo "No matching pods found on the current node." + else + # Execute the stat command + oc exec -n openshift-sdn "$POD_NAME" -- stat -Lc "$i %n %U:%G" /config/kube-proxy-config.yaml 2>/dev/null + fi + use_multiple_values: true + tests: + test_items: + - flag: root:root + remediation: | + None required. The configuration is managed by OpenShift operators. + scored: true + + - id: 4.1.5 + text: "Ensure that the --kubeconfig kubelet.conf file permissions are set to 644 or more restrictive (Manual)" + audit: | + # Check permissions + NODE_NAME=$(oc get pod $HOSTNAME -o=jsonpath='{.spec.nodeName}') + oc debug node/$NODE_NAME -- chroot /host stat -c "$NODE_NAME %n permissions=%a" /etc/kubernetes/kubelet.conf 2> /dev/null + use_multiple_values: true + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "644" + remediation: | + None required. + scored: true + + - id: 4.1.6 + text: "Ensure that the --kubeconfig kubelet.conf file ownership is set to root:root (Manual)" + audit: | + NODE_NAME=$(oc get pod $HOSTNAME -o=jsonpath='{.spec.nodeName}') + oc debug node/$NODE_NAME -- chroot /host stat -c "$NODE_NAME %n %U:%G" /etc/kubernetes/kubelet.conf 2> /dev/null + use_multiple_values: true + tests: + test_items: + - flag: root:root + remediation: | + None required. + scored: true + + - id: 4.1.7 + text: "Ensure that the certificate authorities file permissions are set to 644 or more restrictive" + audit: | + NODE_NAME=$(oc get pod $HOSTNAME -o=jsonpath='{.spec.nodeName}') + oc get --raw /api/v1/nodes/$NODE_NAME/proxy/configz | jq '.kubeletconfig.authentication.x509.clientCAFile' + oc debug node/$NODE_NAME -- chroot /host stat -c "$NODE_NAME permissions=%a" /etc/kubernetes/kubelet-ca.crt 2> /dev/null + tests: + test_items: + - flag: "/etc/kubernetes/kubelet-ca.crt" + - flag: "permissions" + compare: + op: bitmask + value: "644" + remediation: | + No remediation required. OpenShift sets /etc/kubernetes/kubelet-ca.crt to 644 by default. + If permissions are more permissive than 644, update with: chmod 644 /etc/kubernetes/kubelet-ca.crt + scored: true + + + - id: 4.1.8 + text: "Ensure that the client certificate authorities file ownership is set to root:root (Automated)" + audit: | + NODE_NAME=$(oc get pod $HOSTNAME -o=jsonpath='{.spec.nodeName}') + oc debug node/$NODE_NAME -- chroot /host stat -c "$NODE_NAME %n %U:%G" /etc/kubernetes/kubelet-ca.crt 2> /dev/null + use_multiple_values: true + tests: + test_items: + - flag: root:root + remediation: | + None required. + scored: true + + - id: 4.1.9 + text: "Ensure that the kubelet --config configuration file has permissions set to 600 or more restrictive (Automated)" + audit: | + NODE_NAME=$(oc get pod $HOSTNAME -o=jsonpath='{.spec.nodeName}') + oc debug node/$NODE_NAME -- chroot /host stat -c "$NODE_NAME %n permissions=%a" /var/data/kubelet/config.json 2> /dev/null + use_multiple_values: true + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "600" + remediation: | + None required. + scored: true + + - id: 4.1.10 + text: "Ensure that the kubelet configuration file ownership is set to root:root (Automated)" + audit: | + NODE_NAME=$(oc get pod $HOSTNAME -o=jsonpath='{.spec.nodeName}') + oc debug node/$NODE_NAME -- chroot /host stat -c "$NODE_NAME %n %U:%G" /var/data/kubelet/config.json 2> /dev/null + use_multiple_values: true + tests: + test_items: + - flag: root:root + remediation: | + None required. + scored: true + + - id: 4.2 + text: "Kubelet" + checks: + - id: 4.2.1 + text: "Activate Garbage collection in OpenShift Container Platform 4, as appropriate (Manual)" + audit: | + echo "Retrieving and inspecting garbage collection configuration from node-local kubelet configz..." + + NODE_NAME=$(oc get pod $HOSTNAME -o=jsonpath='{.spec.nodeName}') + oc get --raw /api/v1/nodes/$NODE_NAME/proxy/configz | jq '.kubeletconfig' + tests: + test_items: + - flag: "evictionHard" + - flag: "imageGCHighThresholdPercent" + - flag: "imageGCLowThresholdPercent" + - flag: "imageMinimumGCAge" + remediation: | + OpenShift manages node garbage collection through KubeletConfig custom resources per MachineConfigPool. + To configure or adjust garbage collection thresholds, follow the documentation: + https://docs.openshift.com/container-platform/latest/nodes/nodes/nodes-nodes-garbage-collection.html + + Example: Create or modify a KubeletConfig object to include: + --- + evictionHard: + "memory.available": "200Mi" + "nodefs.available": "10%" + "imagefs.available": "15%" + imageGCHighThresholdPercent: 85 + imageGCLowThresholdPercent: 80 + imageMinimumGCAge: "2m0s" + + Then apply the `KubeletConfig` to the appropriate `MachineConfigPool`. + scored: true + + - id: 4.2.2 + text: "Ensure that the --anonymous-auth argument is set to false (Automated)" + audit: | + echo "Checking if anonymous-auth is disabled in kubelet configuration on the current node..." + + NODE_NAME=$(oc get pod $HOSTNAME -o=jsonpath='{.spec.nodeName}') + oc get --raw /api/v1/nodes/$NODE_NAME/proxy/configz | jq '.kubeletconfig.authentication.anonymous.enabled' + tests: + test_items: + - flag: "false" + remediation: | + By default, OpenShift sets anonymous-auth to false in Kubelet configuration. + If this value is found to be true, create or patch a KubeletConfig object with: + + --- + kind: KubeletConfig + apiVersion: machineconfiguration.openshift.io/v1 + metadata: + name: disable-anonymous-auth + spec: + kubeletConfig: + authentication: + anonymous: + enabled: false + + Then apply this KubeletConfig to the appropriate MachineConfigPool. + See OpenShift documentation on configuring node-level security settings. + scored: true + + - id: 4.2.3 + text: "Ensure that the --authorization-mode argument is not set to AlwaysAllow (Automated)" + audit: | + echo "Checking kubelet authorization mode on the current node..." + + NODE_NAME=$(oc get pod $HOSTNAME -o=jsonpath='{.spec.nodeName}') + oc get --raw /api/v1/nodes/$NODE_NAME/proxy/configz | jq '.kubeletconfig.authorization.mode' + tests: + test_items: + - flag: AlwaysAllow + set: false + remediation: | + No remediation required. By default, OpenShift uses secure authorization modes such as 'Webhook' and does not allow AlwaysAllow. + If AlwaysAllow is found, the node must be reconfigured using a KubeletConfig applied through the appropriate MachineConfigPool. + scored: true + + + - id: 4.2.4 + text: "Ensure that the --client-ca-file argument is set as appropriate (Automated)" + audit: | + echo "Checking Kubelet 'clientCAFile' setting on current node..." + + NODE_NAME=$(oc get pod $HOSTNAME -o=jsonpath='{.spec.nodeName}') + oc get --raw /api/v1/nodes/$NODE_NAME/proxy/configz \ + | jq '.kubeletconfig.authentication.x509.clientCAFile' + tests: + test_items: + - flag: "/etc/kubernetes/kubelet-ca.crt" + remediation: | + No remediation required. OpenShift sets the clientCAFile by default to /etc/kubernetes/kubelet-ca.crt. + Manual modification is unsupported and unnecessary as OpenShift manages Kubelet certificate authentication via the Machine Config Operator. + scored: true + + + - id: 4.2.5 + text: "Verify that the read only port is not used or is set to 0 (Automated)" + audit: | + echo "Checking 'kubelet-read-only-port' argument in openshift-kube-apiserver config..." + + oc -n openshift-kube-apiserver get configmap config -o json \ + | jq -r '.data["config.yaml"]' \ + | yq '.apiServerArguments."kubelet-read-only-port"[0]' + tests: + test_items: + - flag: "0" + remediation: | + No remediation is required if the read-only port is set to 0. + If this value is not set to 0 (or the argument is missing), create a KubeletConfig object and apply it to the appropriate MachineConfigPool to disable the read-only port. + + Example KubeletConfig: + --- + apiVersion: machineconfiguration.openshift.io/v1 + kind: KubeletConfig + metadata: + name: disable-readonly-port + spec: + kubeletConfig: + readOnlyPort: 0 + scored: true + + + - id: 4.2.6 + text: "Ensure that the --streaming-connection-idle-timeout argument is not set to 0 (automated)" + audit: | + NODE_NAME=$(oc get pod $HOSTNAME -o=jsonpath='{.spec.nodeName}') + oc get --raw /api/v1/nodes/$NODE_NAME/proxy/configz \ + | jq '.kubeletconfig' + tests: + test_items: + - path: ".streamingConnectionIdleTimeout" + compare: + op: noteq + value: "0s" + remediation: | + By default, OpenShift sets streamingConnectionIdleTimeout to 4h0m0s. + If it is manually set to "0s", this disables timeouts — which is insecure. + + To remediate, create a `KubeletConfig` CR with a safer timeout (e.g., 1h0m0s): + --- + apiVersion: machineconfiguration.openshift.io/v1 + kind: KubeletConfig + metadata: + name: set-streaming-timeout + spec: + kubeletConfig: + streamingConnectionIdleTimeout: "1h0m0s" + scored: true + + - id: 4.2.7 + text: "Ensure that the --make-iptables-util-chains argument is set to true (manual)" + audit: | + echo "Checking 'makeIPTablesUtilChains' setting in Kubelet config on current node..." + + NODE_NAME=$(oc get pod $HOSTNAME -o=jsonpath='{.spec.nodeName}') + oc get --raw /api/v1/nodes/$NODE_NAME/proxy/configz \ + | jq '.kubeletconfig' + tests: + test_items: + - path: ".makeIPTablesUtilChains" + compare: + op: eq + value: true + remediation: | + No remediation is required. + By default, OpenShift sets makeIPTablesUtilChains to true. + This allows Kubelet to manage iptables rules and keep them in sync with the dynamic pod network configuration. + scored: true + + + - id: 4.2.8 + text: "Ensure that the kubeAPIQPS [--event-qps] argument is set to 0 or a level which ensures appropriate event capture (manual)" + audit: | + echo "Checking 'kubeAPIQPS' setting in Kubelet config on current node..." + + NODE_NAME=$(oc get pod $HOSTNAME -o=jsonpath='{.spec.nodeName}') + oc get --raw /api/v1/nodes/$NODE_NAME/proxy/configz \ + | jq '.kubeletconfig' + tests: + test_items: + - path: ".kubeAPIQPS" + compare: + op: gte + value: 1 + remediation: | + OpenShift sets kubeAPIQPS to a default of 50, which is appropriate in most environments. + If kubeAPIQPS is set to 0, event rate limiting is disabled, which can overwhelm the kubelet with excessive events. + + To configure a proper limit, create or modify a `KubeletConfig` resource with an appropriate value: + + --- + apiVersion: machineconfiguration.openshift.io/v1 + kind: KubeletConfig + metadata: + name: set-kubeapiqps + spec: + kubeletConfig: + kubeAPIQPS: 50 + scored: true + + + - id: 4.2.9 + text: "Ensure that the --tls-cert-file and --tls-private-key-file arguments are set as appropriate (manual)" + audit: | + oc get configmap config -n openshift-kube-apiserver -ojson \ + | jq -r '.data["config.yaml"]' \ + | jq -r '.apiServerArguments["kubelet-client-certificate"][]?' + + oc get configmap config -n openshift-kube-apiserver -ojson \ + | jq -r '.data["config.yaml"]' \ + | jq -r '.apiServerArguments["kubelet-client-key"][]?' + tests: + bin_op: and + test_items: + - flag: "/etc/kubernetes/static-pod-certs/secrets/kubelet-client/tls.crt" + - flag: "/etc/kubernetes/static-pod-certs/secrets/kubelet-client/tls.key" + remediation: | + No remediation is required. OpenShift manages secure TLS connections to kubelets by default using its internal certificate authority. + These X.509 certificates are rotated and validated automatically by the platform. + Manual modifications to the TLS paths or keys are not supported and can lead to cluster issues. + scored: true + + + - id: 4.2.10 + text: "Ensure that the --rotate-certificates argument is not set to false (manual)" + audit: | + NODE_NAME=$(oc get pod $HOSTNAME -o=jsonpath='{.spec.nodeName}') + oc get --raw /api/v1/nodes/$NODE_NAME/proxy/configz \ + | jq '.kubeletconfig' + tests: + test_items: + - path: ".rotateCertificates" + compare: + op: eq + value: true + remediation: | + No remediation required. By default, OpenShift enables certificate rotation via rotateCertificates=true. + If disabled, you must either enable rotation via KubeletConfig or implement external certificate renewal. + + Example remediation using KubeletConfig: + --- + apiVersion: machineconfiguration.openshift.io/v1 + kind: KubeletConfig + metadata: + name: enable-cert-rotation + spec: + kubeletConfig: + rotateCertificates: true + scored: true + + - id: 4.2.11 + text: "Verify that the RotateKubeletServerCertificate argument is set to true (manual)" + audit: | + echo "Checking that RotateKubeletServerCertificate is enabled in kubelet config on current node..." + + NODE_NAME=$(oc get pod $HOSTNAME -o=jsonpath='{.spec.nodeName}') + + echo "Verifying feature gate: RotateKubeletServerCertificate" + oc get --raw /api/v1/nodes/$NODE_NAME/proxy/configz \ + | jq '.kubeletconfig.featureGates.RotateKubeletServerCertificate' + + echo "Verifying that certificate rotation is enabled" + oc get --raw /api/v1/nodes/$NODE_NAME/proxy/configz \ + | jq '.kubeletconfig.rotateCertificates' + tests: + bin_op: and + test_items: + - flag: "RotateKubeletServerCertificate" + compare: + op: eq + value: true + - flag: "rotateCertificates" + compare: + op: eq + value: true + remediation: | + No remediation is required. OpenShift enables RotateKubeletServerCertificate by default and manages certificate rotation automatically. + If the feature gate or rotation setting is disabled, configure a `KubeletConfig` CR and apply it to the MachineConfigPool: + + --- + apiVersion: machineconfiguration.openshift.io/v1 + kind: KubeletConfig + metadata: + name: enable-server-cert-rotation + spec: + kubeletConfig: + rotateCertificates: true + featureGates: + RotateKubeletServerCertificate: true + scored: true + + - id: 4.2.13 + text: "Ensure that the Kubelet only makes use of Strong Cryptographic Ciphers (Manual)" + audit: | + # needs verification + # verify cipher suites + oc describe --namespace=openshift-ingress-operator ingresscontroller/default + oc get kubeapiservers.operator.openshift.io cluster -o json |jq .spec.observedConfig.servingInfo + oc get openshiftapiservers.operator.openshift.io cluster -o json |jq .spec.observedConfig.servingInfo + oc get cm -n openshift-authentication v4-0-config-system-cliconfig -o jsonpath='{.data.v4\-0\-config\-system\-cliconfig}' | jq .servingInfo + #check value for tlsSecurityProfile; null is returned if default is used + oc get kubeapiservers.operator.openshift.io cluster -o json |jq .spec.tlsSecurityProfile + type: manual + remediation: | + Follow the directions above and in the OpenShift documentation to configure the tlsSecurityProfile. + Configuring Ingress + scored: false diff --git a/cfg/rh-1.8/policies.yaml b/cfg/rh-1.8/policies.yaml new file mode 100644 index 00000000..506a9c00 --- /dev/null +++ b/cfg/rh-1.8/policies.yaml @@ -0,0 +1,486 @@ +--- +controls: +version: rh-1.8 +id: 5 +text: "Policies" +type: "policies" +groups: + - id: 5.1 + text: "RBAC and Service Accounts" + checks: + - id: 5.1.1 + text: "Ensure that the cluster-admin role is only used where required (Manual)" + type: "manual" + audit: | + #To get a list of users and service accounts with the cluster-admin role + oc get clusterrolebindings -o=customcolumns=NAME:.metadata.name,ROLE:.roleRef.name,SUBJECT:.subjects[*].kind | + grep cluster-admin + #To verity that kbueadmin is removed, no results should be returned + oc get secrets kubeadmin -n kube-system + remediation: | + Identify all clusterrolebindings to the cluster-admin role. Check if they are used and if they need this role or if they could use a role with fewer privileges. + Where possible, first bind users to a lower privileged role and then remove the clusterrolebinding to the cluster-admin role : + oc delete clusterrolebinding [name] + scored: false + + - id: 5.1.2 + text: "Minimize access to secrets (Manual)" + type: "manual" + remediation: | + Where possible, remove get, list and watch access to secret objects in the cluster. + scored: false + + - id: 5.1.3 + text: "Minimize wildcard use in Roles and ClusterRoles (Manual)" + type: "manual" + remediation: | + Where possible replace any use of wildcards in clusterroles and roles with specific + objects or actions. + scored: false + + - id: 5.1.4 + text: "Minimize access to create pods (Manual)" + type: "manual" + remediation: | + Where possible, remove create access to pod objects in the cluster. + scored: false + + - id: 5.1.5 + text: "Ensure that default service accounts are not actively used. (Manual)" + type: "manual" + remediation: | + None required. + scored: false + + - id: 5.1.6 + text: "Ensure that Service Account Tokens are only mounted where necessary (Manual)" + type: "manual" + remediation: | + Modify the definition of pods and service accounts which do not need to mount service + account tokens to disable it. + scored: false + + - id: 5.2 + text: "Security Context Constraints (SCCs)" + checks: + - id: 5.2.1 + text: "Minimize the admission of privileged containers (Manual)" + audit: | + oc get scc -o json \ + | jq -r '[.items[] | select(.allowPrivilegedContainer==false) | .metadata.name] + | length + | if . > 0 then "pass" else "fail" end' + tests: + test_items: + - flag: "pass" + remediation: | + If no SCCs exist that restrict privileged containers, create one by running: + + oc create -f - < 0 then "pass" else "fail" end' + tests: + test_items: + - flag: "pass" + remediation: | + If SCCs with `allowHostPID: true` exist, ensure they are restricted to trusted service accounts only. + + To create a restrictive SCC that prevents host PID sharing: + + --- + apiVersion: security.openshift.io/v1 + kind: SecurityContextConstraints + metadata: + name: restricted-no-hostpid + allowHostPID: false + runAsUser: + type: MustRunAsRange + seLinuxContext: + type: MustRunAs + users: [] + groups: + - system:authenticated + --- + + Apply the SCC and bind it only to users or groups that do **not** need hostPID access. + scored: true + + - id: 5.2.3 + text: "Minimize the admission of containers wishing to share the host IPC namespace (Manual)" + audit: | + oc get scc -o json \ + | jq -r '[.items[] | select(.allowHostIPC==false) | .metadata.name] + | length + | if . > 0 then "pass" else "fail" end' + tests: + test_items: + - flag: "pass" + remediation: | + If no SCCs restrict hostIPC usage, create one that explicitly sets allowHostIPC: false: + + --- + apiVersion: security.openshift.io/v1 + kind: SecurityContextConstraints + metadata: + name: restricted-no-hostipc + allowHostIPC: false + runAsUser: + type: MustRunAsRange + seLinuxContext: + type: MustRunAs + users: [] + groups: + - system:authenticated + --- + + Then assign this SCC to general workloads and ensure any SCCs allowing hostIPC are tightly scoped via RBAC. + scored: true + + - id: 5.2.4 + text: "Minimize the admission of containers wishing to share the host network namespace (manual)" + audit: | + oc get scc -o json \ + | jq -r '[.items[] | select(.allowHostNetwork==false) | .metadata.name] + | length + | if . > 0 then "pass" else "fail" end' + tests: + test_items: + - flag: "pass" + remediation: | + If no SCCs restrict host networking, create one by running: + + --- + apiVersion: security.openshift.io/v1 + kind: SecurityContextConstraints + metadata: + name: restricted-no-hostnetwork + allowHostNetwork: false + runAsUser: + type: MustRunAsRange + seLinuxContext: + type: MustRunAs + users: [] + groups: + - system:authenticated + --- + + Ensure only workloads that require `hostNetwork: true` (e.g., CNI, infra pods) are allowed to use SCCs where it is explicitly enabled. Restrict access to such SCCs using RBAC. + scored: true + + - id: 5.2.5 + text: "Minimize the admission of containers with allowPrivilegeEscalation (manual)" + audit: | + oc get scc -o json \ + | jq -r '[.items[] | select(.allowPrivilegeEscalation==false) | .metadata.name] + | length + | if . > 0 then "pass" else "fail" end' + tests: + test_items: + - flag: "pass" + remediation: | + If no SCCs exist that restrict the use of privilege escalation, create a custom SCC: + + --- + apiVersion: security.openshift.io/v1 + kind: SecurityContextConstraints + metadata: + name: restricted-no-priv-escalation + allowPrivilegeEscalation: false + runAsUser: + type: MustRunAsRange + seLinuxContext: + type: MustRunAs + users: [] + groups: + - system:authenticated + --- + + Assign this SCC only to workloads and users that **do not require** the ability to escalate privileges. + Use RBAC to restrict access to SCCs where `allowPrivilegeEscalation` is `true` to only trusted service accounts or admin roles. + scored: true + + + - id: 5.2.6 + text: "Minimize the admission of root containers (manual)" + audit: | + sccs=$(oc get scc -o json | jq -r '.items[] | select(.runAsUser.type == "MustRunAsNonRoot") | .metadata.name') + if [[ -n "$sccs" ]]; then + echo "pass" + else + echo "fail" + fi + tests: + test_items: + - flag: "pass" + remediation: | + If no SCC is found with `runAsUser.type: MustRunAsNonRoot`, create one as follows: + + --- + apiVersion: security.openshift.io/v1 + kind: SecurityContextConstraints + metadata: + name: restricted-nonroot + allowPrivilegeEscalation: false + runAsUser: + type: MustRunAsNonRoot + seLinuxContext: + type: MustRunAs + users: [] + groups: + - system:authenticated + --- + + Assign this SCC only to workloads that must not run as root. + If an SCC allows `RunAsAny`, audit and restrict access using RBAC to prevent misuse. + scored: true + + - id: 5.2.7 + text: "Minimize the admission of containers with the NET_RAW capability (manual)" + audit: | + oc get scc -o json \ + | jq -r '[.items[] + | select((.requiredDropCapabilities // []) | index("ALL")) + | .metadata.name] + | length + | if . > 0 then "pass" else "fail" end' + tests: + test_items: + - flag: "pass" + remediation: | + If no SCCs drop ALL capabilities, create a custom SCC that explicitly drops NET_RAW: + + --- + apiVersion: security.openshift.io/v1 + kind: SecurityContextConstraints + metadata: + name: restricted-no-netraw + requiredDropCapabilities: + - NET_RAW + allowPrivilegedContainer: false + runAsUser: + type: MustRunAsRange + seLinuxContext: + type: MustRunAs + users: [] + groups: + - system:authenticated + --- + + Apply this SCC to workloads that do not require NET_RAW. + If NET_RAW is required (e.g., for low-level networking apps), isolate those workloads with a specific SCC and restrict access via RBAC. + scored: true + + + - id: 5.2.8 + text: "Minimize the admission of containers with added capabilities (manual)" + audit: | + oc get scc -o json \ + | jq -r '[.items[] + | select(.allowedCapabilities == null) + | .metadata.name] + | length + | if . > 0 then "pass" else "fail" end' + oc get scc -o json \ + | jq -r '[.items[] + | select(.defaultAddCapabilities == null) + | .metadata.name] + | length + | if . > 0 then "true" else "false" end' + tests: + test_items: + - flag: "pass" + - flag: "true" + remediation: | + If no SCCs restrict added capabilities, create a custom SCC as shown below: + + --- + apiVersion: security.openshift.io/v1 + kind: SecurityContextConstraints + metadata: + name: restricted-no-added-caps + allowPrivilegedContainer: false + allowedCapabilities: [] + defaultAddCapabilities: [] + runAsUser: + type: MustRunAsRange + seLinuxContext: + type: MustRunAs + users: [] + groups: + - system:authenticated + --- + + Assign this SCC to workloads that do **not** require elevated capabilities. + Create separate SCCs for workloads that require specific capabilities, and use RBAC to tightly restrict access to them. + scored: true + + - id: 5.2.9 + text: "Minimize the admission of containers with capabilities assigned (manual)" + audit: | + oc get scc -o json \ + | jq -r '[.items[] + | select((.requiredDropCapabilities // []) | index("ALL")) + | .metadata.name] + | length + | if . > 0 then "true" else "false" end' + tests: + test_items: + - flag: "true" + remediation: | + If no SCCs drop all capabilities, create one that sets 'requiredDropCapabilities: [ALL]': + + --- + apiVersion: security.openshift.io/v1 + kind: SecurityContextConstraints + metadata: + name: restricted-drop-all-capabilities + requiredDropCapabilities: + - ALL + allowPrivilegedContainer: false + runAsUser: + type: MustRunAsRange + seLinuxContext: + type: MustRunAs + users: [] + groups: + - system:authenticated + --- + + Apply this SCC to general-purpose workloads that do not require elevated Linux capabilities. + If certain workloads require capabilities, create a separate SCC with minimal permissions and scope it using RBAC. + scored: true + + - id: 5.2.10 + text: "Minimize access to privileged Security Context Constraints (Manual)" + type: "manual" + remediation: | + Remove any users and groups who do not need access to an SCC, following the + principle of least privilege. + You can remove users and groups from an SCC using the oc edit scc $NAME + command. + Additionally, you can create your own SCCs that contain the container functionality you + need for a particular use case and assign that SCC to users and groups if the default + SCCs are not appropriate for your use case. + scored: false + + + - id: 5.3 + text: "Network Policies and CNI" + checks: + - id: 5.3.1 + text: "Ensure that the CNI in use supports Network Policies (Manual)" + type: "manual" + remediation: | + None required. + scored: false + + - id: 5.3.2 + text: "Ensure that all Namespaces have Network Policies defined (Manual)" + type: "manual" + audit: | + #Run the following command and review the NetworkPolicy objects created in the cluster. + oc -n all get networkpolicy + remediation: | + Follow the documentation and create NetworkPolicy objects as you need them. + scored: false + + - id: 5.4 + text: "Secrets Management" + checks: + - id: 5.4.1 + text: "Prefer using secrets as files over secrets as environment variables (Manual)" + type: "manual" + audit: | + #Run the following command to find references to objects which use environment variables defined from secrets. + oc get all -o jsonpath='{range .items[?(@..secretKeyRef)]} {.kind} + {.metadata.name} {"\n"}{end}' -A + remediation: | + If possible, rewrite application code to read secrets from mounted secret files, rather than + from environment variables. + scored: false + + - id: 5.4.2 + text: "Consider external secret storage (Manual)" + type: "manual" + remediation: | + Refer to the secrets management options offered by your cloud provider or a third-party + secrets management solution. + scored: false + + - id: 5.5 + text: "Extensible Admission Control" + checks: + - id: 5.5.1 + text: "Configure Image Provenance using image controller configuration parameters (Manual)" + type: "manual" + remediation: | + Follow the OpenShift documentation: [Image configuration resources](https://docs.openshift.com/container-platform/4.5/openshift_images/image-configuration.html + scored: false + + - id: 5.7 + text: "General Policies" + checks: + - id: 5.7.1 + text: "Create administrative boundaries between resources using namespaces (Manual)" + type: "manual" + audit: | + #Run the following command and review the namespaces created in the cluster. + oc get namespaces + #Ensure that these namespaces are the ones you need and are adequately administered as per your requirements. + remediation: | + Follow the documentation and create namespaces for objects in your deployment as you need + them. + scored: false + + - id: 5.7.2 + text: "Ensure that the seccomp profile is set to docker/default in your pod definitions (Manual)" + type: "manual" + remediation: | + To enable the default seccomp profile, use the reserved value /runtime/default that will + make sure that the pod uses the default policy available on the host. + scored: false + + - id: 5.7.3 + text: "Apply Security Context to Your Pods and Containers (Manual)" + type: "manual" + remediation: | + Follow the Kubernetes documentation and apply security contexts to your pods. For a + suggested list of security contexts, you may refer to the CIS Security Benchmark for Docker + Containers. + scored: false + + - id: 5.7.4 + text: "The default namespace should not be used (Manual)" + type: "manual" + audit: | + #Run this command to list objects in default namespace + oc project default + oc get all + #The only entries there should be system managed resources such as the kubernetes and openshift service + remediation: | + Ensure that namespaces are created to allow for appropriate segregation of Kubernetes + resources and that all new resources are created in a specific namespace. + scored: false diff --git a/cmd/util.go b/cmd/util.go index 7ef28a55..13434424 100644 --- a/cmd/util.go +++ b/cmd/util.go @@ -546,8 +546,10 @@ func getPlatformBenchmarkVersion(platform Platform) string { return "rh-0.7" case "4.1": return "rh-1.0" - case "4.11", "4.12", "4.13": + case "4.11": return "rh-1.4" + case "4.13": + return "rh-1.8" } case "vmware": return "tkgi-1.2.53" @@ -623,7 +625,7 @@ func getOpenShiftInfo() Platform { func getOcpValidVersion(ocpVer string) (string, error) { ocpOriginal := ocpVer - valid := []string{"3.10", "4.1", "4.11", "4.12", "4.13"} + valid := []string{"3.10", "4.1", "4.11", "4.13"} for !isEmpty(ocpVer) { glog.V(3).Info(fmt.Sprintf("getOcpBenchmarkVersion check for ocp: %q \n", ocpVer)) if slices.Contains(valid, ocpVer) { diff --git a/cmd/util_test.go b/cmd/util_test.go index 7f4117c3..f5bb23ec 100644 --- a/cmd/util_test.go +++ b/cmd/util_test.go @@ -720,6 +720,13 @@ func Test_getPlatformBenchmarkVersion(t *testing.T) { }, want: "rh-1.4", }, + { + name: "openshift4", + args: args{ + platform: Platform{Name: "ocp", Version: "4.13"}, + }, + want: "rh-1.8", + }, { name: "openshift4", args: args{