mirror of
https://github.com/aquasecurity/kube-bench.git
synced 2026-02-14 18:10:00 +00:00
Compare commits
60 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
f1807bb192 | ||
|
|
c1bee59a02 | ||
|
|
8e7777928f | ||
|
|
43a9034b2f | ||
|
|
5848e5df6c | ||
|
|
651df2d4ce | ||
|
|
1aa87bbebb | ||
|
|
06159c6edb | ||
|
|
22f5a1c559 | ||
|
|
0ec876e1bc | ||
|
|
9e8c6b2c7d | ||
|
|
581b68d985 | ||
|
|
379f11996f | ||
|
|
659b0c1cad | ||
|
|
8afa78abaf | ||
|
|
2938a24924 | ||
|
|
462a50341a | ||
|
|
60eb8104ad | ||
|
|
6eb894633a | ||
|
|
428f433fae | ||
|
|
8a3701577b | ||
|
|
e3e9e7d390 | ||
|
|
e25d283dd1 | ||
|
|
b48ee8511f | ||
|
|
315817617b | ||
|
|
227665c9e8 | ||
|
|
4fc9c0e7a8 | ||
|
|
1f401b1a50 | ||
|
|
10e0a78701 | ||
|
|
182cbaa71d | ||
|
|
7793925b22 | ||
|
|
1cf0f8cd92 | ||
|
|
c9382e4e96 | ||
|
|
ec1005509f | ||
|
|
5678009fae | ||
|
|
25f773b279 | ||
|
|
e044dcaffb | ||
|
|
6a39a2e516 | ||
|
|
496ec149bc | ||
|
|
c7d9863e57 | ||
|
|
0990df031b | ||
|
|
c64cf3d19d | ||
|
|
a983f0c9de | ||
|
|
691afc028c | ||
|
|
02305b2e7a | ||
|
|
6d234c5155 | ||
|
|
506198ce97 | ||
|
|
fd531a75a7 | ||
|
|
295b5e6aa9 | ||
|
|
6943f0690a | ||
|
|
f52c5acbe6 | ||
|
|
0fd581935e | ||
|
|
c4dc17c96c | ||
|
|
76804bf7fa | ||
|
|
014ac455b5 | ||
|
|
844a28b3fd | ||
|
|
21dd168736 | ||
|
|
e3becc9f19 | ||
|
|
52a646c2a3 | ||
|
|
0333e55b63 |
45
.github/workflows/build.yml
vendored
45
.github/workflows/build.yml
vendored
@@ -14,7 +14,6 @@ on:
|
||||
- "LICENSE"
|
||||
- "NOTICE"
|
||||
env:
|
||||
GO_VERSION: "1.23.6"
|
||||
KIND_VERSION: "v0.11.1"
|
||||
KIND_IMAGE: "kindest/node:v1.21.1@sha256:69860bda5563ac81e3c0057d654b5253219618a22ec3a346306239bba8cfa1a6"
|
||||
|
||||
@@ -23,29 +22,29 @@ jobs:
|
||||
name: Lint
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Setup Go
|
||||
uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version: ${{ env.GO_VERSION }}
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v4
|
||||
uses: actions/checkout@v6
|
||||
- name: Setup Go
|
||||
uses: actions/setup-go@7a3fe6cf4cb3a834922a1244abfce67bcef6a0c5 # v6.2.0
|
||||
with:
|
||||
go-version-file: go.mod
|
||||
- name: yaml-lint
|
||||
uses: ibiqlik/action-yamllint@v3
|
||||
- name: Setup golangci-lint
|
||||
uses: golangci/golangci-lint-action@v6
|
||||
uses: golangci/golangci-lint-action@v8
|
||||
with:
|
||||
version: v1.64
|
||||
version: v2.5.0
|
||||
args: --verbose --timeout 2m
|
||||
unit:
|
||||
name: Unit tests
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Setup Go
|
||||
uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version: ${{ env.GO_VERSION }}
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v4
|
||||
uses: actions/checkout@v6
|
||||
- name: Setup Go
|
||||
uses: actions/setup-go@7a3fe6cf4cb3a834922a1244abfce67bcef6a0c5 # v6.2.0
|
||||
with:
|
||||
go-version-file: go.mod
|
||||
- name: Run unit tests
|
||||
run: make tests
|
||||
- name: Upload code coverage
|
||||
@@ -56,12 +55,12 @@ jobs:
|
||||
name: E2e tests
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Setup Go
|
||||
uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version: ${{ env.GO_VERSION }}
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v4
|
||||
uses: actions/checkout@v6
|
||||
- name: Setup Go
|
||||
uses: actions/setup-go@7a3fe6cf4cb3a834922a1244abfce67bcef6a0c5 # v6.2.0
|
||||
with:
|
||||
go-version-file: go.mod
|
||||
- name: Setup Kubernetes cluster (KIND)
|
||||
uses: engineerd/setup-kind@v0.6.2
|
||||
with:
|
||||
@@ -86,14 +85,14 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
needs: [e2e, unit]
|
||||
steps:
|
||||
- name: Setup Go
|
||||
uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version: ${{ env.GO_VERSION }}
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v4
|
||||
uses: actions/checkout@v6
|
||||
with:
|
||||
fetch-depth: 0
|
||||
- name: Setup Go
|
||||
uses: actions/setup-go@7a3fe6cf4cb3a834922a1244abfce67bcef6a0c5 # v6.2.0
|
||||
with:
|
||||
go-version-file: go.mod
|
||||
- name: Dry-run release snapshot
|
||||
uses: goreleaser/goreleaser-action@v6
|
||||
with:
|
||||
|
||||
4
.github/workflows/mkdocs-deploy.yaml
vendored
4
.github/workflows/mkdocs-deploy.yaml
vendored
@@ -16,11 +16,11 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout main
|
||||
uses: actions/checkout@v4
|
||||
uses: actions/checkout@v6
|
||||
with:
|
||||
fetch-depth: 0
|
||||
persist-credentials: true
|
||||
- uses: actions/setup-python@v5
|
||||
- uses: actions/setup-python@v6
|
||||
with:
|
||||
python-version: 3.x
|
||||
- run: |
|
||||
|
||||
4
.github/workflows/publish.yml
vendored
4
.github/workflows/publish.yml
vendored
@@ -16,14 +16,14 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Check Out Repo
|
||||
uses: actions/checkout@v4
|
||||
uses: actions/checkout@v6
|
||||
- name: Set up QEMU
|
||||
uses: docker/setup-qemu-action@v3
|
||||
- name: Set up Docker Buildx
|
||||
id: buildx
|
||||
uses: docker/setup-buildx-action@v3
|
||||
- name: Cache Docker layers
|
||||
uses: actions/cache@v4
|
||||
uses: actions/cache@v5
|
||||
with:
|
||||
path: /tmp/.buildx-cache
|
||||
key: ${{ runner.os }}-buildxarch-${{ github.sha }}
|
||||
|
||||
11
.github/workflows/release.yml
vendored
11
.github/workflows/release.yml
vendored
@@ -5,7 +5,6 @@ on:
|
||||
tags:
|
||||
- "v*"
|
||||
env:
|
||||
GO_VERSION: "1.23.6"
|
||||
KIND_VERSION: "v0.11.1"
|
||||
KIND_IMAGE: "kindest/node:v1.21.1@sha256:69860bda5563ac81e3c0057d654b5253219618a22ec3a346306239bba8cfa1a6"
|
||||
|
||||
@@ -14,14 +13,14 @@ jobs:
|
||||
name: Release
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Setup Go
|
||||
uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version: ${{ env.GO_VERSION }}
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v4
|
||||
uses: actions/checkout@v6
|
||||
with:
|
||||
fetch-depth: 0
|
||||
- name: Setup Go
|
||||
uses: actions/setup-go@7a3fe6cf4cb3a834922a1244abfce67bcef6a0c5 # v6.2.0
|
||||
with:
|
||||
go-version-file: go.mod
|
||||
- name: Run unit tests
|
||||
run: make tests
|
||||
- name: Setup Kubernetes cluster (KIND)
|
||||
|
||||
@@ -1,10 +1,28 @@
|
||||
---
|
||||
version: "2"
|
||||
linters:
|
||||
disable-all: true
|
||||
default: none
|
||||
enable:
|
||||
- gocyclo
|
||||
- gofmt
|
||||
- goimports
|
||||
- govet
|
||||
- misspell
|
||||
- typecheck
|
||||
exclusions:
|
||||
generated: lax
|
||||
presets:
|
||||
- comments
|
||||
- common-false-positives
|
||||
- legacy
|
||||
- std-error-handling
|
||||
paths:
|
||||
- third_party$
|
||||
- builtin$
|
||||
- examples$
|
||||
formatters:
|
||||
enable:
|
||||
- gofmt
|
||||
- goimports
|
||||
exclusions:
|
||||
generated: lax
|
||||
paths:
|
||||
- third_party$
|
||||
- builtin$
|
||||
- examples$
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
FROM golang:1.25.0 AS build
|
||||
FROM golang:1.25.7 AS build
|
||||
WORKDIR /go/src/github.com/aquasecurity/kube-bench/
|
||||
COPY makefile makefile
|
||||
COPY go.mod go.sum ./
|
||||
@@ -19,7 +19,7 @@ RUN /bin/bash -c 'echo "$(<kubectl.sha256) /usr/local/bin/kubectl" | sha256sum
|
||||
|
||||
RUN chmod +x /usr/local/bin/kubectl
|
||||
|
||||
FROM alpine:3.22.1 AS run
|
||||
FROM alpine:3.23.3 AS run
|
||||
WORKDIR /opt/kube-bench/
|
||||
# add GNU ps for -C, -o cmd, --no-headers support and add findutils to get GNU xargs
|
||||
# https://github.com/aquasecurity/kube-bench/issues/109
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
FROM golang:1.25.0 AS build
|
||||
FROM golang:1.25.7 AS build
|
||||
WORKDIR /go/src/github.com/aquasecurity/kube-bench/
|
||||
COPY makefile makefile
|
||||
COPY go.mod go.sum ./
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
FROM golang:1.25.0 AS build
|
||||
FROM golang:1.25.7 AS build
|
||||
WORKDIR /go/src/github.com/aquasecurity/kube-bench/
|
||||
COPY makefile makefile
|
||||
COPY go.mod go.sum ./
|
||||
|
||||
@@ -33,7 +33,7 @@ groups:
|
||||
Where possible, first bind users to a lower privileged role and then remove the
|
||||
clusterrolebinding to the cluster-admin role : kubectl delete clusterrolebinding [name]
|
||||
Condition: is_compliant is false if rolename is not cluster-admin and rolebinding is cluster-admin.
|
||||
scored: true
|
||||
scored: false
|
||||
|
||||
- id: 5.1.2
|
||||
text: "Minimize access to secrets (Automated)"
|
||||
@@ -46,7 +46,7 @@ groups:
|
||||
value: no
|
||||
remediation: |
|
||||
Where possible, remove get, list and watch access to Secret objects in the cluster.
|
||||
scored: true
|
||||
scored: false
|
||||
|
||||
- id: 5.1.3
|
||||
text: "Minimize wildcard use in Roles and ClusterRoles (Automated)"
|
||||
@@ -93,7 +93,7 @@ groups:
|
||||
objects or actions.
|
||||
Condition: role_is_compliant is false if ["*"] is found in rules.
|
||||
Condition: clusterrole_is_compliant is false if ["*"] is found in rules.
|
||||
scored: true
|
||||
scored: false
|
||||
|
||||
- id: 5.1.4
|
||||
text: "Minimize access to create pods (Automated)"
|
||||
@@ -107,7 +107,7 @@ groups:
|
||||
value: no
|
||||
remediation: |
|
||||
Where possible, remove create access to pod objects in the cluster.
|
||||
scored: true
|
||||
scored: false
|
||||
- id: 5.1.5
|
||||
text: "Ensure that default service accounts are not actively used (Automated)"
|
||||
audit: |
|
||||
@@ -125,7 +125,7 @@ groups:
|
||||
to the Kubernetes API server.
|
||||
Modify the configuration of each default service account to include this value
|
||||
`automountServiceAccountToken: false`.
|
||||
scored: true
|
||||
scored: false
|
||||
|
||||
- id: 5.1.6
|
||||
text: "Ensure that Service Account Tokens are only mounted where necessary (Automated)"
|
||||
@@ -158,7 +158,7 @@ groups:
|
||||
Condition: Pod is_compliant to true when
|
||||
- ServiceAccount is automountServiceAccountToken: false and Pod is automountServiceAccountToken: false or notset
|
||||
- ServiceAccount is automountServiceAccountToken: true notset and Pod is automountServiceAccountToken: false
|
||||
scored: true
|
||||
scored: false
|
||||
|
||||
- id: 5.1.7
|
||||
text: "Avoid use of system:masters group (Manual)"
|
||||
|
||||
2
cfg/cis-1.11/config.yaml
Normal file
2
cfg/cis-1.11/config.yaml
Normal file
@@ -0,0 +1,2 @@
|
||||
---
|
||||
## Version-specific settings that override the values in cfg/config.yaml
|
||||
62
cfg/cis-1.11/controlplane.yaml
Normal file
62
cfg/cis-1.11/controlplane.yaml
Normal file
@@ -0,0 +1,62 @@
|
||||
---
|
||||
controls:
|
||||
version: "cis-1.11"
|
||||
id: 3
|
||||
text: "Control Plane Configuration"
|
||||
type: "controlplane"
|
||||
groups:
|
||||
- id: 3.1
|
||||
text: "Authentication and Authorization"
|
||||
checks:
|
||||
- id: 3.1.1
|
||||
text: "Client certificate authentication should not be used for users (Manual)"
|
||||
type: "manual"
|
||||
remediation: |
|
||||
Alternative mechanisms provided by Kubernetes such as the use of OIDC should be
|
||||
implemented in place of client certificates.
|
||||
scored: false
|
||||
|
||||
- id: 3.1.2
|
||||
text: "Service account token authentication should not be used for users (Manual)"
|
||||
type: "manual"
|
||||
remediation: |
|
||||
Alternative mechanisms provided by Kubernetes such as the use of OIDC should be implemented
|
||||
in place of service account tokens.
|
||||
scored: false
|
||||
|
||||
- id: 3.1.3
|
||||
text: "Bootstrap token authentication should not be used for users (Manual)"
|
||||
type: "manual"
|
||||
remediation: |
|
||||
Alternative mechanisms provided by Kubernetes such as the use of OIDC should be implemented
|
||||
in place of bootstrap tokens.
|
||||
scored: false
|
||||
|
||||
- id: 3.2
|
||||
text: "Logging"
|
||||
checks:
|
||||
- id: 3.2.1
|
||||
text: "Ensure that a minimal audit policy is created (Manual)"
|
||||
audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep"
|
||||
tests:
|
||||
test_items:
|
||||
- flag: "--audit-policy-file"
|
||||
set: true
|
||||
remediation: |
|
||||
Create an audit policy file for your cluster.
|
||||
scored: false
|
||||
|
||||
- id: 3.2.2
|
||||
text: "Ensure that the audit policy covers key security concerns (Manual)"
|
||||
type: "manual"
|
||||
remediation: |
|
||||
Review the audit policy provided for the cluster and ensure that it covers
|
||||
at least the following areas,
|
||||
- Access to Secrets managed by the cluster. Care should be taken to only
|
||||
log Metadata for requests to Secrets, ConfigMaps, and TokenReviews, in
|
||||
order to avoid risk of logging sensitive data.
|
||||
- Modification of Pod and Deployment objects.
|
||||
- Use of `pods/exec`, `pods/portforward`, `pods/proxy` and `services/proxy`.
|
||||
For most requests, minimally logging at the Metadata level is recommended
|
||||
(the most basic level of logging).
|
||||
scored: false
|
||||
135
cfg/cis-1.11/etcd.yaml
Normal file
135
cfg/cis-1.11/etcd.yaml
Normal file
@@ -0,0 +1,135 @@
|
||||
---
|
||||
controls:
|
||||
version: "cis-1.11"
|
||||
id: 2
|
||||
text: "Etcd Node Configuration"
|
||||
type: "etcd"
|
||||
groups:
|
||||
- id: 2
|
||||
text: "Etcd Node Configuration"
|
||||
checks:
|
||||
- id: 2.1
|
||||
text: "Ensure that the --cert-file and --key-file arguments are set as appropriate (Automated)"
|
||||
audit: "/bin/ps -ef | /bin/grep $etcdbin | /bin/grep -v grep"
|
||||
tests:
|
||||
bin_op: and
|
||||
test_items:
|
||||
- flag: "--cert-file"
|
||||
env: "ETCD_CERT_FILE"
|
||||
- flag: "--key-file"
|
||||
env: "ETCD_KEY_FILE"
|
||||
remediation: |
|
||||
Follow the etcd service documentation and configure TLS encryption.
|
||||
Then, edit the etcd pod specification file /etc/kubernetes/manifests/etcd.yaml
|
||||
on the master node and set the below parameters.
|
||||
--cert-file=</path/to/ca-file>
|
||||
--key-file=</path/to/key-file>
|
||||
scored: true
|
||||
|
||||
- id: 2.2
|
||||
text: "Ensure that the --client-cert-auth argument is set to true (Automated)"
|
||||
audit: "/bin/ps -ef | /bin/grep $etcdbin | /bin/grep -v grep"
|
||||
tests:
|
||||
test_items:
|
||||
- flag: "--client-cert-auth"
|
||||
env: "ETCD_CLIENT_CERT_AUTH"
|
||||
compare:
|
||||
op: eq
|
||||
value: true
|
||||
remediation: |
|
||||
Edit the etcd pod specification file $etcdconf on the master
|
||||
node and set the below parameter.
|
||||
--client-cert-auth="true"
|
||||
scored: true
|
||||
|
||||
- id: 2.3
|
||||
text: "Ensure that the --auto-tls argument is not set to true (Automated)"
|
||||
audit: "/bin/ps -ef | /bin/grep $etcdbin | /bin/grep -v grep"
|
||||
tests:
|
||||
bin_op: or
|
||||
test_items:
|
||||
- flag: "--auto-tls"
|
||||
env: "ETCD_AUTO_TLS"
|
||||
set: false
|
||||
- flag: "--auto-tls"
|
||||
env: "ETCD_AUTO_TLS"
|
||||
compare:
|
||||
op: eq
|
||||
value: false
|
||||
remediation: |
|
||||
Edit the etcd pod specification file $etcdconf on the master
|
||||
node and either remove the --auto-tls parameter or set it to false.
|
||||
--auto-tls=false
|
||||
scored: true
|
||||
|
||||
- id: 2.4
|
||||
text: "Ensure that the --peer-cert-file and --peer-key-file arguments are
|
||||
set as appropriate (Automated)"
|
||||
audit: "/bin/ps -ef | /bin/grep $etcdbin | /bin/grep -v grep"
|
||||
tests:
|
||||
bin_op: and
|
||||
test_items:
|
||||
- flag: "--peer-cert-file"
|
||||
env: "ETCD_PEER_CERT_FILE"
|
||||
- flag: "--peer-key-file"
|
||||
env: "ETCD_PEER_KEY_FILE"
|
||||
remediation: |
|
||||
Follow the etcd service documentation and configure peer TLS encryption as appropriate
|
||||
for your etcd cluster.
|
||||
Then, edit the etcd pod specification file $etcdconf on the
|
||||
master node and set the below parameters.
|
||||
--peer-client-file=</path/to/peer-cert-file>
|
||||
--peer-key-file=</path/to/peer-key-file>
|
||||
scored: true
|
||||
|
||||
- id: 2.5
|
||||
text: "Ensure that the --peer-client-cert-auth argument is set to true (Automated)"
|
||||
audit: "/bin/ps -ef | /bin/grep $etcdbin | /bin/grep -v grep"
|
||||
tests:
|
||||
test_items:
|
||||
- flag: "--peer-client-cert-auth"
|
||||
env: "ETCD_PEER_CLIENT_CERT_AUTH"
|
||||
compare:
|
||||
op: eq
|
||||
value: true
|
||||
remediation: |
|
||||
Edit the etcd pod specification file $etcdconf on the master
|
||||
node and set the below parameter.
|
||||
--peer-client-cert-auth=true
|
||||
scored: true
|
||||
|
||||
- id: 2.6
|
||||
text: "Ensure that the --peer-auto-tls argument is not set to true (Automated)"
|
||||
audit: "/bin/ps -ef | /bin/grep $etcdbin | /bin/grep -v grep"
|
||||
tests:
|
||||
bin_op: or
|
||||
test_items:
|
||||
- flag: "--peer-auto-tls"
|
||||
env: "ETCD_PEER_AUTO_TLS"
|
||||
set: false
|
||||
- flag: "--peer-auto-tls"
|
||||
env: "ETCD_PEER_AUTO_TLS"
|
||||
compare:
|
||||
op: eq
|
||||
value: false
|
||||
remediation: |
|
||||
Edit the etcd pod specification file $etcdconf on the master
|
||||
node and either remove the --peer-auto-tls parameter or set it to false.
|
||||
--peer-auto-tls=false
|
||||
scored: true
|
||||
|
||||
- id: 2.7
|
||||
text: "Ensure that a unique Certificate Authority is used for etcd (Manual)"
|
||||
audit: "/bin/ps -ef | /bin/grep $etcdbin | /bin/grep -v grep"
|
||||
tests:
|
||||
test_items:
|
||||
- flag: "--trusted-ca-file"
|
||||
env: "ETCD_TRUSTED_CA_FILE"
|
||||
remediation: |
|
||||
[Manual test]
|
||||
Follow the etcd documentation and create a dedicated certificate authority setup for the
|
||||
etcd service.
|
||||
Then, edit the etcd pod specification file $etcdconf on the
|
||||
master node and set the below parameter.
|
||||
--trusted-ca-file=</path/to/ca-file>
|
||||
scored: false
|
||||
932
cfg/cis-1.11/master.yaml
Normal file
932
cfg/cis-1.11/master.yaml
Normal file
@@ -0,0 +1,932 @@
|
||||
---
|
||||
controls:
|
||||
version: "cis-1.11"
|
||||
id: 1
|
||||
text: "Control Plane Security Configuration"
|
||||
type: "master"
|
||||
groups:
|
||||
- id: 1.1
|
||||
text: "Control Plane Node Configuration Files"
|
||||
checks:
|
||||
- id: 1.1.1
|
||||
text: "Ensure that the API server pod specification file permissions are set to 600 or more restrictive (Automated)"
|
||||
audit: "/bin/sh -c 'if test -e $apiserverconf; then stat -c permissions=%a $apiserverconf; fi'"
|
||||
tests:
|
||||
test_items:
|
||||
- flag: "permissions"
|
||||
compare:
|
||||
op: bitmask
|
||||
value: "600"
|
||||
remediation: |
|
||||
Run the below command (based on the file location on your system) on the
|
||||
control plane node.
|
||||
For example, chmod 600 $apiserverconf
|
||||
scored: true
|
||||
|
||||
- id: 1.1.2
|
||||
text: "Ensure that the API server pod specification file ownership is set to root:root (Automated)"
|
||||
audit: "/bin/sh -c 'if test -e $apiserverconf; then stat -c %U:%G $apiserverconf; fi'"
|
||||
tests:
|
||||
test_items:
|
||||
- flag: "root:root"
|
||||
remediation: |
|
||||
Run the below command (based on the file location on your system) on the control plane node.
|
||||
For example, chown root:root $apiserverconf
|
||||
scored: true
|
||||
|
||||
- id: 1.1.3
|
||||
text: "Ensure that the controller manager pod specification file permissions are set to 600 or more restrictive (Automated)"
|
||||
audit: "/bin/sh -c 'if test -e $controllermanagerconf; then stat -c permissions=%a $controllermanagerconf; fi'"
|
||||
tests:
|
||||
test_items:
|
||||
- flag: "permissions"
|
||||
compare:
|
||||
op: bitmask
|
||||
value: "600"
|
||||
remediation: |
|
||||
Run the below command (based on the file location on your system) on the control plane node.
|
||||
For example, chmod 600 $controllermanagerconf
|
||||
scored: true
|
||||
|
||||
- id: 1.1.4
|
||||
text: "Ensure that the controller manager pod specification file ownership is set to root:root (Automated)"
|
||||
audit: "/bin/sh -c 'if test -e $controllermanagerconf; then stat -c %U:%G $controllermanagerconf; fi'"
|
||||
tests:
|
||||
test_items:
|
||||
- flag: "root:root"
|
||||
remediation: |
|
||||
Run the below command (based on the file location on your system) on the control plane node.
|
||||
For example, chown root:root $controllermanagerconf
|
||||
scored: true
|
||||
|
||||
- id: 1.1.5
|
||||
text: "Ensure that the scheduler pod specification file permissions are set to 600 or more restrictive (Automated)"
|
||||
audit: "/bin/sh -c 'if test -e $schedulerconf; then stat -c permissions=%a $schedulerconf; fi'"
|
||||
tests:
|
||||
test_items:
|
||||
- flag: "permissions"
|
||||
compare:
|
||||
op: bitmask
|
||||
value: "600"
|
||||
remediation: |
|
||||
Run the below command (based on the file location on your system) on the control plane node.
|
||||
For example, chmod 600 $schedulerconf
|
||||
scored: true
|
||||
|
||||
- id: 1.1.6
|
||||
text: "Ensure that the scheduler pod specification file ownership is set to root:root (Automated)"
|
||||
audit: "/bin/sh -c 'if test -e $schedulerconf; then stat -c %U:%G $schedulerconf; fi'"
|
||||
tests:
|
||||
test_items:
|
||||
- flag: "root:root"
|
||||
remediation: |
|
||||
Run the below command (based on the file location on your system) on the control plane node.
|
||||
For example, chown root:root $schedulerconf
|
||||
scored: true
|
||||
|
||||
- id: 1.1.7
|
||||
text: "Ensure that the etcd pod specification file permissions are set to 600 or more restrictive (Automated)"
|
||||
audit: "/bin/sh -c 'if test -e $etcdconf; then find $etcdconf -name '*etcd*' | xargs stat -c permissions=%a; fi'"
|
||||
use_multiple_values: true
|
||||
tests:
|
||||
test_items:
|
||||
- flag: "permissions"
|
||||
compare:
|
||||
op: bitmask
|
||||
value: "600"
|
||||
remediation: |
|
||||
Run the below command (based on the file location on your system) on the control plane node.
|
||||
For example,
|
||||
chmod 600 $etcdconf
|
||||
scored: true
|
||||
|
||||
- id: 1.1.8
|
||||
text: "Ensure that the etcd pod specification file ownership is set to root:root (Automated)"
|
||||
audit: "/bin/sh -c 'if test -e $etcdconf; then find $etcdconf -name '*etcd*' | xargs stat -c %U:%G; fi'"
|
||||
use_multiple_values: true
|
||||
tests:
|
||||
test_items:
|
||||
- flag: "root:root"
|
||||
remediation: |
|
||||
Run the below command (based on the file location on your system) on the control plane node.
|
||||
For example,
|
||||
chown root:root $etcdconf
|
||||
scored: true
|
||||
|
||||
- id: 1.1.9
|
||||
text: "Ensure that the Container Network Interface file permissions are set to 600 or more restrictive (Manual)"
|
||||
audit: |
|
||||
ps -ef | grep $kubeletbin | grep -- --cni-conf-dir | sed 's%.*cni-conf-dir[= ]\([^ ]*\).*%\1%' | xargs -I{} find {} -mindepth 1 | xargs --no-run-if-empty stat -c permissions=%a
|
||||
find /var/lib/cni/networks -type f 2> /dev/null | xargs --no-run-if-empty stat -c permissions=%a
|
||||
use_multiple_values: true
|
||||
tests:
|
||||
test_items:
|
||||
- flag: "permissions"
|
||||
compare:
|
||||
op: bitmask
|
||||
value: "600"
|
||||
remediation: |
|
||||
Run the below command (based on the file location on your system) on the control plane node.
|
||||
For example, chmod 600 <path/to/cni/files>
|
||||
scored: false
|
||||
|
||||
- id: 1.1.10
|
||||
text: "Ensure that the Container Network Interface file ownership is set to root:root (Manual)"
|
||||
audit: |
|
||||
ps -ef | grep $kubeletbin | grep -- --cni-conf-dir | sed 's%.*cni-conf-dir[= ]\([^ ]*\).*%\1%' | xargs -I{} find {} -mindepth 1 | xargs --no-run-if-empty stat -c %U:%G
|
||||
find /var/lib/cni/networks -type f 2> /dev/null | xargs --no-run-if-empty stat -c %U:%G
|
||||
use_multiple_values: true
|
||||
tests:
|
||||
test_items:
|
||||
- flag: "root:root"
|
||||
remediation: |
|
||||
Run the below command (based on the file location on your system) on the control plane node.
|
||||
For example,
|
||||
chown root:root <path/to/cni/files>
|
||||
scored: false
|
||||
|
||||
- id: 1.1.11
|
||||
text: "Ensure that the etcd data directory permissions are set to 700 or more restrictive (Automated)"
|
||||
audit: |
|
||||
DATA_DIR=''
|
||||
for d in $(ps -ef | grep $etcdbin | grep -- --data-dir | sed 's%.*data-dir[= ]\([^ ]*\).*%\1%'); do
|
||||
if test -d "$d"; then DATA_DIR="$d"; fi
|
||||
done
|
||||
if ! test -d "$DATA_DIR"; then DATA_DIR=$etcddatadir; fi
|
||||
stat -c permissions=%a "$DATA_DIR"
|
||||
tests:
|
||||
test_items:
|
||||
- flag: "permissions"
|
||||
compare:
|
||||
op: bitmask
|
||||
value: "700"
|
||||
remediation: |
|
||||
On the etcd server node, get the etcd data directory, passed as an argument --data-dir,
|
||||
from the command 'ps -ef | grep etcd'.
|
||||
Run the below command (based on the etcd data directory found above). For example,
|
||||
chmod 700 /var/lib/etcd
|
||||
scored: true
|
||||
|
||||
- id: 1.1.12
|
||||
text: "Ensure that the etcd data directory ownership is set to etcd:etcd (Automated)"
|
||||
audit: |
|
||||
DATA_DIR=''
|
||||
for d in $(ps -ef | grep $etcdbin | grep -- --data-dir | sed 's%.*data-dir[= ]\([^ ]*\).*%\1%'); do
|
||||
if test -d "$d"; then DATA_DIR="$d"; fi
|
||||
done
|
||||
if ! test -d "$DATA_DIR"; then DATA_DIR=$etcddatadir; fi
|
||||
stat -c %U:%G "$DATA_DIR"
|
||||
tests:
|
||||
test_items:
|
||||
- flag: "etcd:etcd"
|
||||
remediation: |
|
||||
On the etcd server node, get the etcd data directory, passed as an argument --data-dir,
|
||||
from the command 'ps -ef | grep etcd'.
|
||||
Run the below command (based on the etcd data directory found above).
|
||||
For example, chown etcd:etcd /var/lib/etcd
|
||||
scored: true
|
||||
|
||||
- id: 1.1.13
|
||||
text: "Ensure that the default administrative credential file permissions are set to 600 (Automated)"
|
||||
audit: |
|
||||
for adminconf in /etc/kubernetes/admin.conf /etc/kubernetes/super-admin.conf; do if test -e $adminconf; then stat -c "permissions=%a %n" $adminconf; fi; done
|
||||
use_multiple_values: true
|
||||
tests:
|
||||
test_items:
|
||||
- flag: "permissions"
|
||||
compare:
|
||||
op: bitmask
|
||||
value: "600"
|
||||
remediation: |
|
||||
Run the below command (based on the file location on your system) on the control plane node.
|
||||
For example, chmod 600 /etc/kubernetes/admin.conf
|
||||
On Kubernetes 1.29+ the super-admin.conf file should also be modified, if present.
|
||||
For example, chmod 600 /etc/kubernetes/super-admin.conf
|
||||
scored: true
|
||||
|
||||
- id: 1.1.14
|
||||
text: "Ensure that the default administrative credential file ownership is set to root:root (Automated)"
|
||||
audit: |
|
||||
for adminconf in /etc/kubernetes/admin.conf /etc/kubernetes/super-admin.conf; do if test -e $adminconf; then stat -c "ownership=%U:%G %n" $adminconf; fi; done
|
||||
use_multiple_values: true
|
||||
tests:
|
||||
test_items:
|
||||
- flag: "ownership"
|
||||
compare:
|
||||
op: eq
|
||||
value: "root:root"
|
||||
remediation: |
|
||||
Run the below command (based on the file location on your system) on the control plane node.
|
||||
For example, chown root:root /etc/kubernetes/admin.conf
|
||||
On Kubernetes 1.29+ the super-admin.conf file should also be modified, if present.
|
||||
For example, chown root:root /etc/kubernetes/super-admin.conf
|
||||
scored: true
|
||||
|
||||
- id: 1.1.15
|
||||
text: "Ensure that the scheduler.conf file permissions are set to 600 or more restrictive (Automated)"
|
||||
audit: "/bin/sh -c 'if test -e $schedulerkubeconfig; then stat -c permissions=%a $schedulerkubeconfig; fi'"
|
||||
tests:
|
||||
test_items:
|
||||
- flag: "permissions"
|
||||
compare:
|
||||
op: bitmask
|
||||
value: "600"
|
||||
remediation: |
|
||||
Run the below command (based on the file location on your system) on the control plane node.
|
||||
For example,
|
||||
chmod 600 $schedulerkubeconfig
|
||||
scored: true
|
||||
|
||||
- id: 1.1.16
|
||||
text: "Ensure that the scheduler.conf file ownership is set to root:root (Automated)"
|
||||
audit: "/bin/sh -c 'if test -e $schedulerkubeconfig; then stat -c %U:%G $schedulerkubeconfig; fi'"
|
||||
tests:
|
||||
test_items:
|
||||
- flag: "root:root"
|
||||
remediation: |
|
||||
Run the below command (based on the file location on your system) on the control plane node.
|
||||
For example,
|
||||
chown root:root $schedulerkubeconfig
|
||||
scored: true
|
||||
|
||||
- id: 1.1.17
|
||||
text: "Ensure that the controller-manager.conf file permissions are set to 600 or more restrictive (Automated)"
|
||||
audit: "/bin/sh -c 'if test -e $controllermanagerkubeconfig; then stat -c permissions=%a $controllermanagerkubeconfig; fi'"
|
||||
tests:
|
||||
test_items:
|
||||
- flag: "permissions"
|
||||
compare:
|
||||
op: bitmask
|
||||
value: "600"
|
||||
remediation: |
|
||||
Run the below command (based on the file location on your system) on the control plane node.
|
||||
For example,
|
||||
chmod 600 $controllermanagerkubeconfig
|
||||
scored: true
|
||||
|
||||
- id: 1.1.18
|
||||
text: "Ensure that the controller-manager.conf file ownership is set to root:root (Automated)"
|
||||
audit: "/bin/sh -c 'if test -e $controllermanagerkubeconfig; then stat -c %U:%G $controllermanagerkubeconfig; fi'"
|
||||
tests:
|
||||
test_items:
|
||||
- flag: "root:root"
|
||||
remediation: |
|
||||
Run the below command (based on the file location on your system) on the control plane node.
|
||||
For example,
|
||||
chown root:root $controllermanagerkubeconfig
|
||||
scored: true
|
||||
|
||||
- id: 1.1.19
|
||||
text: "Ensure that the Kubernetes PKI directory and file ownership is set to root:root (Automated)"
|
||||
audit: "find /etc/kubernetes/pki/ | xargs stat -c %U:%G"
|
||||
use_multiple_values: true
|
||||
tests:
|
||||
test_items:
|
||||
- flag: "root:root"
|
||||
remediation: |
|
||||
Run the below command (based on the file location on your system) on the control plane node.
|
||||
For example,
|
||||
chown -R root:root /etc/kubernetes/pki/
|
||||
scored: true
|
||||
|
||||
- id: 1.1.20
|
||||
text: "Ensure that the Kubernetes PKI certificate file permissions are set to 644 or more restrictive (Manual)"
|
||||
audit: "find /etc/kubernetes/pki/ -name '*.crt' | xargs stat -c permissions=%a"
|
||||
use_multiple_values: true
|
||||
tests:
|
||||
test_items:
|
||||
- flag: "permissions"
|
||||
compare:
|
||||
op: bitmask
|
||||
value: "644"
|
||||
remediation: |
|
||||
Run the below command (based on the file location on your system) on the control plane node.
|
||||
For example,
|
||||
chmod -R 644 /etc/kubernetes/pki/*.crt
|
||||
scored: false
|
||||
|
||||
- id: 1.1.21
|
||||
text: "Ensure that the Kubernetes PKI key file permissions are set to 600 (Manual)"
|
||||
audit: "find /etc/kubernetes/pki/ -name '*.key' | xargs stat -c permissions=%a"
|
||||
use_multiple_values: true
|
||||
tests:
|
||||
test_items:
|
||||
- flag: "permissions"
|
||||
compare:
|
||||
op: bitmask
|
||||
value: "600"
|
||||
remediation: |
|
||||
Run the below command (based on the file location on your system) on the control plane node.
|
||||
For example,
|
||||
chmod -R 600 /etc/kubernetes/pki/*.key
|
||||
scored: false
|
||||
|
||||
- id: 1.2
|
||||
text: "API Server"
|
||||
checks:
|
||||
- id: 1.2.1
|
||||
text: "Ensure that the --anonymous-auth argument is set to false (Manual)"
|
||||
audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep"
|
||||
tests:
|
||||
test_items:
|
||||
- flag: "--anonymous-auth"
|
||||
compare:
|
||||
op: eq
|
||||
value: false
|
||||
remediation: |
|
||||
Edit the API server pod specification file $apiserverconf
|
||||
on the control plane node and set the below parameter.
|
||||
--anonymous-auth=false
|
||||
scored: false
|
||||
|
||||
- id: 1.2.2
|
||||
text: "Ensure that the --token-auth-file parameter is not set (Automated)"
|
||||
audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep"
|
||||
tests:
|
||||
test_items:
|
||||
- flag: "--token-auth-file"
|
||||
set: false
|
||||
remediation: |
|
||||
Follow the documentation and configure alternate mechanisms for authentication. Then,
|
||||
edit the API server pod specification file $apiserverconf
|
||||
on the control plane node and remove the --token-auth-file=<filename> parameter.
|
||||
scored: true
|
||||
|
||||
- id: 1.2.3
|
||||
text: "Ensure that the --DenyServiceExternalIPs is set (Manual)"
|
||||
audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep"
|
||||
tests:
|
||||
bin_op: or
|
||||
test_items:
|
||||
- flag: "--enable-admission-plugins"
|
||||
compare:
|
||||
op: has
|
||||
value: "DenyServiceExternalIPs"
|
||||
remediation: |
|
||||
Edit the API server pod specification file $apiserverconf
|
||||
on the control plane node and add the `DenyServiceExternalIPs` plugin
|
||||
to the enabled admission plugins, as such --enable-admission-plugin=DenyServiceExternalIPs.
|
||||
scored: false
|
||||
|
||||
- id: 1.2.4
|
||||
text: "Ensure that the --kubelet-client-certificate and --kubelet-client-key arguments are set as appropriate (Automated)"
|
||||
audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep"
|
||||
tests:
|
||||
bin_op: and
|
||||
test_items:
|
||||
- flag: "--kubelet-client-certificate"
|
||||
- flag: "--kubelet-client-key"
|
||||
remediation: |
|
||||
Follow the Kubernetes documentation and set up the TLS connection between the
|
||||
apiserver and kubelets. Then, edit API server pod specification file
|
||||
$apiserverconf on the control plane node and set the
|
||||
kubelet client certificate and key parameters as below.
|
||||
--kubelet-client-certificate=<path/to/client-certificate-file>
|
||||
--kubelet-client-key=<path/to/client-key-file>
|
||||
scored: true
|
||||
|
||||
- id: 1.2.5
|
||||
text: "Ensure that the --kubelet-certificate-authority argument is set as appropriate (Automated)"
|
||||
audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep"
|
||||
tests:
|
||||
test_items:
|
||||
- flag: "--kubelet-certificate-authority"
|
||||
remediation: |
|
||||
Follow the Kubernetes documentation and setup the TLS connection between
|
||||
the apiserver and kubelets. Then, edit the API server pod specification file
|
||||
$apiserverconf on the control plane node and set the
|
||||
--kubelet-certificate-authority parameter to the path to the cert file for the certificate authority.
|
||||
--kubelet-certificate-authority=<ca-string>
|
||||
scored: true
|
||||
|
||||
- id: 1.2.6
|
||||
text: "Ensure that the --authorization-mode argument is not set to AlwaysAllow (Automated)"
|
||||
audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep"
|
||||
tests:
|
||||
test_items:
|
||||
- flag: "--authorization-mode"
|
||||
compare:
|
||||
op: nothave
|
||||
value: "AlwaysAllow"
|
||||
remediation: |
|
||||
Edit the API server pod specification file $apiserverconf
|
||||
on the control plane node and set the --authorization-mode parameter to values other than AlwaysAllow.
|
||||
One such example could be as below.
|
||||
--authorization-mode=RBAC
|
||||
scored: true
|
||||
|
||||
- id: 1.2.7
|
||||
text: "Ensure that the --authorization-mode argument includes Node (Automated)"
|
||||
audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep"
|
||||
tests:
|
||||
test_items:
|
||||
- flag: "--authorization-mode"
|
||||
compare:
|
||||
op: has
|
||||
value: "Node"
|
||||
remediation: |
|
||||
Edit the API server pod specification file $apiserverconf
|
||||
on the control plane node and set the --authorization-mode parameter to a value that includes Node.
|
||||
--authorization-mode=Node,RBAC
|
||||
scored: true
|
||||
|
||||
- id: 1.2.8
|
||||
text: "Ensure that the --authorization-mode argument includes RBAC (Automated)"
|
||||
audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep"
|
||||
tests:
|
||||
test_items:
|
||||
- flag: "--authorization-mode"
|
||||
compare:
|
||||
op: has
|
||||
value: "RBAC"
|
||||
remediation: |
|
||||
Edit the API server pod specification file $apiserverconf
|
||||
on the control plane node and set the --authorization-mode parameter to a value that includes RBAC,
|
||||
for example `--authorization-mode=Node,RBAC`.
|
||||
scored: true
|
||||
|
||||
- id: 1.2.9
|
||||
text: "Ensure that the admission control plugin EventRateLimit is set (Manual)"
|
||||
audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep"
|
||||
tests:
|
||||
test_items:
|
||||
- flag: "--enable-admission-plugins"
|
||||
compare:
|
||||
op: has
|
||||
value: "EventRateLimit"
|
||||
remediation: |
|
||||
Follow the Kubernetes documentation and set the desired limits in a configuration file.
|
||||
Then, edit the API server pod specification file $apiserverconf
|
||||
and set the below parameters.
|
||||
--enable-admission-plugins=...,EventRateLimit,...
|
||||
--admission-control-config-file=<path/to/configuration/file>
|
||||
scored: false
|
||||
|
||||
- id: 1.2.10
|
||||
text: "Ensure that the admission control plugin AlwaysAdmit is not set (Automated)"
|
||||
audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep"
|
||||
tests:
|
||||
bin_op: or
|
||||
test_items:
|
||||
- flag: "--enable-admission-plugins"
|
||||
compare:
|
||||
op: nothave
|
||||
value: AlwaysAdmit
|
||||
- flag: "--enable-admission-plugins"
|
||||
set: false
|
||||
remediation: |
|
||||
Edit the API server pod specification file $apiserverconf
|
||||
on the control plane node and either remove the --enable-admission-plugins parameter, or set it to a
|
||||
value that does not include AlwaysAdmit.
|
||||
scored: true
|
||||
|
||||
- id: 1.2.11
|
||||
text: "Ensure that the admission control plugin AlwaysPullImages is set (Manual)"
|
||||
audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep"
|
||||
tests:
|
||||
test_items:
|
||||
- flag: "--enable-admission-plugins"
|
||||
compare:
|
||||
op: has
|
||||
value: "AlwaysPullImages"
|
||||
remediation: |
|
||||
Edit the API server pod specification file $apiserverconf
|
||||
on the control plane node and set the --enable-admission-plugins parameter to include
|
||||
AlwaysPullImages.
|
||||
--enable-admission-plugins=...,AlwaysPullImages,...
|
||||
scored: false
|
||||
|
||||
- id: 1.2.12
|
||||
text: "Ensure that the admission control plugin ServiceAccount is set (Automated)"
|
||||
audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep"
|
||||
tests:
|
||||
bin_op: or
|
||||
test_items:
|
||||
- flag: "--disable-admission-plugins"
|
||||
compare:
|
||||
op: nothave
|
||||
value: "ServiceAccount"
|
||||
- flag: "--disable-admission-plugins"
|
||||
set: false
|
||||
remediation: |
|
||||
Follow the documentation and create ServiceAccount objects as per your environment.
|
||||
Then, edit the API server pod specification file $apiserverconf
|
||||
on the control plane node and ensure that the --disable-admission-plugins parameter is set to a
|
||||
value that does not include ServiceAccount.
|
||||
scored: true
|
||||
|
||||
- id: 1.2.13
|
||||
text: "Ensure that the admission control plugin NamespaceLifecycle is set (Automated)"
|
||||
audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep"
|
||||
tests:
|
||||
bin_op: or
|
||||
test_items:
|
||||
- flag: "--disable-admission-plugins"
|
||||
compare:
|
||||
op: nothave
|
||||
value: "NamespaceLifecycle"
|
||||
- flag: "--disable-admission-plugins"
|
||||
set: false
|
||||
remediation: |
|
||||
Edit the API server pod specification file $apiserverconf
|
||||
on the control plane node and set the --disable-admission-plugins parameter to
|
||||
ensure it does not include NamespaceLifecycle.
|
||||
scored: true
|
||||
|
||||
- id: 1.2.14
|
||||
text: "Ensure that the admission control plugin NodeRestriction is set (Automated)"
|
||||
audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep"
|
||||
tests:
|
||||
test_items:
|
||||
- flag: "--enable-admission-plugins"
|
||||
compare:
|
||||
op: has
|
||||
value: "NodeRestriction"
|
||||
remediation: |
|
||||
Follow the Kubernetes documentation and configure NodeRestriction plug-in on kubelets.
|
||||
Then, edit the API server pod specification file $apiserverconf
|
||||
on the control plane node and set the --enable-admission-plugins parameter to a
|
||||
value that includes NodeRestriction.
|
||||
--enable-admission-plugins=...,NodeRestriction,...
|
||||
scored: true
|
||||
|
||||
- id: 1.2.15
|
||||
text: "Ensure that the --profiling argument is set to false (Automated)"
|
||||
audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep"
|
||||
tests:
|
||||
test_items:
|
||||
- flag: "--profiling"
|
||||
compare:
|
||||
op: eq
|
||||
value: false
|
||||
remediation: |
|
||||
Edit the API server pod specification file $apiserverconf
|
||||
on the control plane node and set the below parameter.
|
||||
--profiling=false
|
||||
scored: true
|
||||
|
||||
- id: 1.2.16
|
||||
text: "Ensure that the --audit-log-path argument is set (Automated)"
|
||||
audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep"
|
||||
tests:
|
||||
test_items:
|
||||
- flag: "--audit-log-path"
|
||||
remediation: |
|
||||
Edit the API server pod specification file $apiserverconf
|
||||
on the control plane node and set the --audit-log-path parameter to a suitable path and
|
||||
file where you would like audit logs to be written, for example,
|
||||
--audit-log-path=/var/log/apiserver/audit.log
|
||||
scored: true
|
||||
|
||||
- id: 1.2.17
|
||||
text: "Ensure that the --audit-log-maxage argument is set to 30 or as appropriate (Automated)"
|
||||
audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep"
|
||||
tests:
|
||||
test_items:
|
||||
- flag: "--audit-log-maxage"
|
||||
compare:
|
||||
op: gte
|
||||
value: 30
|
||||
remediation: |
|
||||
Edit the API server pod specification file $apiserverconf
|
||||
on the control plane node and set the --audit-log-maxage parameter to 30
|
||||
or as an appropriate number of days, for example,
|
||||
--audit-log-maxage=30
|
||||
scored: true
|
||||
|
||||
- id: 1.2.18
|
||||
text: "Ensure that the --audit-log-maxbackup argument is set to 10 or as appropriate (Automated)"
|
||||
audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep"
|
||||
tests:
|
||||
test_items:
|
||||
- flag: "--audit-log-maxbackup"
|
||||
compare:
|
||||
op: gte
|
||||
value: 10
|
||||
remediation: |
|
||||
Edit the API server pod specification file $apiserverconf
|
||||
on the control plane node and set the --audit-log-maxbackup parameter to 10 or to an appropriate
|
||||
value. For example,
|
||||
--audit-log-maxbackup=10
|
||||
scored: true
|
||||
|
||||
- id: 1.2.19
|
||||
text: "Ensure that the --audit-log-maxsize argument is set to 100 or as appropriate (Automated)"
|
||||
audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep"
|
||||
tests:
|
||||
test_items:
|
||||
- flag: "--audit-log-maxsize"
|
||||
compare:
|
||||
op: gte
|
||||
value: 100
|
||||
remediation: |
|
||||
Edit the API server pod specification file $apiserverconf
|
||||
on the control plane node and set the --audit-log-maxsize parameter to an appropriate size in MB.
|
||||
For example, to set it as 100 MB, --audit-log-maxsize=100
|
||||
scored: true
|
||||
|
||||
- id: 1.2.20
|
||||
text: "Ensure that the --request-timeout argument is set as appropriate (Manual)"
|
||||
audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep"
|
||||
type: manual
|
||||
remediation: |
|
||||
Edit the API server pod specification file $apiserverconf
|
||||
and set the below parameter as appropriate and if needed.
|
||||
For example, --request-timeout=300s
|
||||
scored: false
|
||||
|
||||
- id: 1.2.21
|
||||
text: "Ensure that the --service-account-lookup argument is set to true (Automated)"
|
||||
audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep"
|
||||
tests:
|
||||
bin_op: or
|
||||
test_items:
|
||||
- flag: "--service-account-lookup"
|
||||
set: false
|
||||
- flag: "--service-account-lookup"
|
||||
compare:
|
||||
op: eq
|
||||
value: true
|
||||
remediation: |
|
||||
Edit the API server pod specification file $apiserverconf
|
||||
on the control plane node and set the below parameter.
|
||||
--service-account-lookup=true
|
||||
Alternatively, you can delete the --service-account-lookup parameter from this file so
|
||||
that the default takes effect.
|
||||
scored: true
|
||||
|
||||
- id: 1.2.22
|
||||
text: "Ensure that the --service-account-key-file argument is set as appropriate (Automated)"
|
||||
audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep"
|
||||
tests:
|
||||
test_items:
|
||||
- flag: "--service-account-key-file"
|
||||
remediation: |
|
||||
Edit the API server pod specification file $apiserverconf
|
||||
on the control plane node and set the --service-account-key-file parameter
|
||||
to the public key file for service accounts. For example,
|
||||
--service-account-key-file=<filename>
|
||||
scored: true
|
||||
|
||||
- id: 1.2.23
|
||||
text: "Ensure that the --etcd-certfile and --etcd-keyfile arguments are set as appropriate (Automated)"
|
||||
audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep"
|
||||
tests:
|
||||
bin_op: and
|
||||
test_items:
|
||||
- flag: "--etcd-certfile"
|
||||
- flag: "--etcd-keyfile"
|
||||
remediation: |
|
||||
Follow the Kubernetes documentation and set up the TLS connection between the apiserver and etcd.
|
||||
Then, edit the API server pod specification file $apiserverconf
|
||||
on the control plane node and set the etcd certificate and key file parameters.
|
||||
--etcd-certfile=<path/to/client-certificate-file>
|
||||
--etcd-keyfile=<path/to/client-key-file>
|
||||
scored: true
|
||||
|
||||
- id: 1.2.24
|
||||
text: "Ensure that the --tls-cert-file and --tls-private-key-file arguments are set as appropriate (Automated)"
|
||||
audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep"
|
||||
tests:
|
||||
bin_op: and
|
||||
test_items:
|
||||
- flag: "--tls-cert-file"
|
||||
- flag: "--tls-private-key-file"
|
||||
remediation: |
|
||||
Follow the Kubernetes documentation and set up the TLS connection on the apiserver.
|
||||
Then, edit the API server pod specification file $apiserverconf
|
||||
on the control plane node and set the TLS certificate and private key file parameters.
|
||||
--tls-cert-file=<path/to/tls-certificate-file>
|
||||
--tls-private-key-file=<path/to/tls-key-file>
|
||||
scored: true
|
||||
|
||||
- id: 1.2.25
|
||||
text: "Ensure that the --client-ca-file argument is set as appropriate (Automated)"
|
||||
audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep"
|
||||
tests:
|
||||
test_items:
|
||||
- flag: "--client-ca-file"
|
||||
remediation: |
|
||||
Follow the Kubernetes documentation and set up the TLS connection on the apiserver.
|
||||
Then, edit the API server pod specification file $apiserverconf
|
||||
on the control plane node and set the client certificate authority file.
|
||||
--client-ca-file=<path/to/client-ca-file>
|
||||
scored: true
|
||||
|
||||
- id: 1.2.26
|
||||
text: "Ensure that the --etcd-cafile argument is set as appropriate (Automated)"
|
||||
audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep"
|
||||
tests:
|
||||
test_items:
|
||||
- flag: "--etcd-cafile"
|
||||
remediation: |
|
||||
Follow the Kubernetes documentation and set up the TLS connection between the apiserver and etcd.
|
||||
Then, edit the API server pod specification file $apiserverconf
|
||||
on the control plane node and set the etcd certificate authority file parameter.
|
||||
--etcd-cafile=<path/to/ca-file>
|
||||
scored: true
|
||||
|
||||
- id: 1.2.27
|
||||
text: "Ensure that the --encryption-provider-config argument is set as appropriate (Manual)"
|
||||
audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep"
|
||||
tests:
|
||||
test_items:
|
||||
- flag: "--encryption-provider-config"
|
||||
remediation: |
|
||||
Follow the Kubernetes documentation and configure a EncryptionConfig file.
|
||||
Then, edit the API server pod specification file $apiserverconf
|
||||
on the control plane node and set the --encryption-provider-config parameter to the path of that file.
|
||||
For example, --encryption-provider-config=</path/to/EncryptionConfig/File>
|
||||
scored: false
|
||||
|
||||
- id: 1.2.28
|
||||
text: "Ensure that encryption providers are appropriately configured (Manual)"
|
||||
audit: |
|
||||
ENCRYPTION_PROVIDER_CONFIG=$(ps -ef | grep $apiserverbin | grep -- --encryption-provider-config | sed 's%.*encryption-provider-config[= ]\([^ ]*\).*%\1%')
|
||||
if test -e $ENCRYPTION_PROVIDER_CONFIG; then grep -A1 'providers:' $ENCRYPTION_PROVIDER_CONFIG | tail -n1 | grep -o "[A-Za-z]*" | sed 's/^/provider=/'; fi
|
||||
tests:
|
||||
test_items:
|
||||
- flag: "provider"
|
||||
compare:
|
||||
op: valid_elements
|
||||
value: "aescbc,kms,secretbox"
|
||||
remediation: |
|
||||
Follow the Kubernetes documentation and configure a EncryptionConfig file.
|
||||
In this file, choose aescbc, kms or secretbox as the encryption provider.
|
||||
scored: false
|
||||
|
||||
- id: 1.2.29
|
||||
text: "Ensure that the API Server only makes use of Strong Cryptographic Ciphers (Manual)"
|
||||
audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep"
|
||||
tests:
|
||||
test_items:
|
||||
- flag: "--tls-cipher-suites"
|
||||
compare:
|
||||
op: valid_elements
|
||||
value: "TLS_AES_128_GCM_SHA256,TLS_AES_256_GCM_SHA384,TLS_CHACHA20_POLY1305_SHA256,TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA,TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256,TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256"
|
||||
remediation: |
|
||||
Edit the API server pod specification file /etc/kubernetes/manifests/kube-apiserver.yaml
|
||||
on the control plane node and set the below parameter.
|
||||
--tls-cipher-suites=TLS_AES_128_GCM_SHA256,TLS_AES_256_GCM_SHA384,TLS_CHACHA20_POLY1305_SHA256,
|
||||
TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA,TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,
|
||||
TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,
|
||||
TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256,
|
||||
TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA,
|
||||
TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256
|
||||
scored: false
|
||||
|
||||
- id: 1.2.30
|
||||
text: "Ensure that the --service-account-extend-token-expiration parameter is set to false (Automated)"
|
||||
audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep"
|
||||
tests:
|
||||
test_items:
|
||||
- flag: "--service-account-extend-token-expiration"
|
||||
compare:
|
||||
op: eq
|
||||
value: false
|
||||
remediation: |
|
||||
Edit the API server pod specification file /etc/kubernetes/manifests/kube-apiserver.yaml on the Control Plane node and set the --service-account-extend-token-expiration parameter to false.
|
||||
`--service-account-extend-token-expiration=false`
|
||||
By default, this parameter is set to true.
|
||||
scored: true
|
||||
|
||||
- id: 1.3
|
||||
text: "Controller Manager"
|
||||
checks:
|
||||
- id: 1.3.1
|
||||
text: "Ensure that the --terminated-pod-gc-threshold argument is set as appropriate (Manual)"
|
||||
audit: "/bin/ps -ef | grep $controllermanagerbin | grep -v grep"
|
||||
tests:
|
||||
test_items:
|
||||
- flag: "--terminated-pod-gc-threshold"
|
||||
remediation: |
|
||||
Edit the Controller Manager pod specification file $controllermanagerconf
|
||||
on the control plane node and set the --terminated-pod-gc-threshold to an appropriate threshold,
|
||||
for example, --terminated-pod-gc-threshold=10
|
||||
scored: false
|
||||
|
||||
- id: 1.3.2
|
||||
text: "Ensure that the --profiling argument is set to false (Automated)"
|
||||
audit: "/bin/ps -ef | grep $controllermanagerbin | grep -v grep"
|
||||
tests:
|
||||
test_items:
|
||||
- flag: "--profiling"
|
||||
compare:
|
||||
op: eq
|
||||
value: false
|
||||
remediation: |
|
||||
Edit the Controller Manager pod specification file $controllermanagerconf
|
||||
on the control plane node and set the below parameter.
|
||||
--profiling=false
|
||||
scored: true
|
||||
|
||||
- id: 1.3.3
|
||||
text: "Ensure that the --use-service-account-credentials argument is set to true (Automated)"
|
||||
audit: "/bin/ps -ef | grep $controllermanagerbin | grep -v grep"
|
||||
tests:
|
||||
test_items:
|
||||
- flag: "--use-service-account-credentials"
|
||||
compare:
|
||||
op: noteq
|
||||
value: false
|
||||
remediation: |
|
||||
Edit the Controller Manager pod specification file $controllermanagerconf
|
||||
on the control plane node to set the below parameter.
|
||||
--use-service-account-credentials=true
|
||||
scored: true
|
||||
|
||||
- id: 1.3.4
|
||||
text: "Ensure that the --service-account-private-key-file argument is set as appropriate (Automated)"
|
||||
audit: "/bin/ps -ef | grep $controllermanagerbin | grep -v grep"
|
||||
tests:
|
||||
test_items:
|
||||
- flag: "--service-account-private-key-file"
|
||||
remediation: |
|
||||
Edit the Controller Manager pod specification file $controllermanagerconf
|
||||
on the control plane node and set the --service-account-private-key-file parameter
|
||||
to the private key file for service accounts.
|
||||
--service-account-private-key-file=<filename>
|
||||
scored: true
|
||||
|
||||
- id: 1.3.5
|
||||
text: "Ensure that the --root-ca-file argument is set as appropriate (Automated)"
|
||||
audit: "/bin/ps -ef | grep $controllermanagerbin | grep -v grep"
|
||||
tests:
|
||||
test_items:
|
||||
- flag: "--root-ca-file"
|
||||
remediation: |
|
||||
Edit the Controller Manager pod specification file $controllermanagerconf
|
||||
on the control plane node and set the --root-ca-file parameter to the certificate bundle file`.
|
||||
--root-ca-file=<path/to/file>
|
||||
scored: true
|
||||
|
||||
- id: 1.3.6
|
||||
text: "Ensure that the RotateKubeletServerCertificate argument is set to true (Automated)"
|
||||
audit: "/bin/ps -ef | grep $controllermanagerbin | grep -v grep"
|
||||
tests:
|
||||
bin_op: or
|
||||
test_items:
|
||||
- flag: "--feature-gates"
|
||||
compare:
|
||||
op: nothave
|
||||
value: "RotateKubeletServerCertificate=false"
|
||||
set: true
|
||||
- flag: "--feature-gates"
|
||||
set: false
|
||||
remediation: |
|
||||
Edit the Controller Manager pod specification file $controllermanagerconf
|
||||
on the control plane node and set the --feature-gates parameter to include RotateKubeletServerCertificate=true.
|
||||
--feature-gates=RotateKubeletServerCertificate=true
|
||||
scored: true
|
||||
|
||||
- id: 1.3.7
|
||||
text: "Ensure that the --bind-address argument is set to 127.0.0.1 (Automated)"
|
||||
audit: "/bin/ps -ef | grep $controllermanagerbin | grep -v grep"
|
||||
tests:
|
||||
bin_op: or
|
||||
test_items:
|
||||
- flag: "--bind-address"
|
||||
compare:
|
||||
op: eq
|
||||
value: "127.0.0.1"
|
||||
- flag: "--bind-address"
|
||||
set: false
|
||||
remediation: |
|
||||
Edit the Controller Manager pod specification file $controllermanagerconf
|
||||
on the control plane node and ensure the correct value for the --bind-address parameter
|
||||
scored: true
|
||||
|
||||
- id: 1.4
|
||||
text: "Scheduler"
|
||||
checks:
|
||||
- id: 1.4.1
|
||||
text: "Ensure that the --profiling argument is set to false (Automated)"
|
||||
audit: "/bin/ps -ef | grep $schedulerbin | grep -v grep"
|
||||
tests:
|
||||
test_items:
|
||||
- flag: "--profiling"
|
||||
compare:
|
||||
op: eq
|
||||
value: false
|
||||
remediation: |
|
||||
Edit the Scheduler pod specification file $schedulerconf file
|
||||
on the control plane node and set the below parameter.
|
||||
--profiling=false
|
||||
scored: true
|
||||
|
||||
- id: 1.4.2
|
||||
text: "Ensure that the --bind-address argument is set to 127.0.0.1 (Automated)"
|
||||
audit: "/bin/ps -ef | grep $schedulerbin | grep -v grep"
|
||||
tests:
|
||||
bin_op: or
|
||||
test_items:
|
||||
- flag: "--bind-address"
|
||||
compare:
|
||||
op: eq
|
||||
value: "127.0.0.1"
|
||||
- flag: "--bind-address"
|
||||
set: false
|
||||
remediation: |
|
||||
Edit the Scheduler pod specification file $schedulerconf
|
||||
on the control plane node and ensure the correct value for the --bind-address parameter
|
||||
scored: true
|
||||
509
cfg/cis-1.11/node.yaml
Normal file
509
cfg/cis-1.11/node.yaml
Normal file
@@ -0,0 +1,509 @@
|
||||
---
|
||||
controls:
|
||||
version: "cis-1.11"
|
||||
id: 4
|
||||
text: "Worker Node Security Configuration"
|
||||
type: "node"
|
||||
groups:
|
||||
- id: 4.1
|
||||
text: "Worker Node Configuration Files"
|
||||
checks:
|
||||
- id: 4.1.1
|
||||
text: "Ensure that the kubelet service file permissions are set to 600 or more restrictive (Automated)"
|
||||
audit: '/bin/sh -c ''if test -e $kubeletsvc; then stat -c permissions=%a $kubeletsvc; fi'' '
|
||||
tests:
|
||||
test_items:
|
||||
- flag: "permissions"
|
||||
compare:
|
||||
op: bitmask
|
||||
value: "600"
|
||||
remediation: |
|
||||
Run the below command (based on the file location on your system) on the each worker node.
|
||||
For example, chmod 600 $kubeletsvc
|
||||
scored: true
|
||||
|
||||
- id: 4.1.2
|
||||
text: "Ensure that the kubelet service file ownership is set to root:root (Automated)"
|
||||
audit: '/bin/sh -c "if test -e $kubeletsvc; then stat -c %U:%G $kubeletsvc; else echo \"File not found\"; fi"'
|
||||
tests:
|
||||
bin_op: or
|
||||
test_items:
|
||||
- flag: root:root
|
||||
- flag: "File not found"
|
||||
remediation: |
|
||||
Run the below command (based on the file location on your system) on the each worker node.
|
||||
For example,
|
||||
chown root:root $kubeletsvc
|
||||
scored: true
|
||||
|
||||
- id: 4.1.3
|
||||
text: "If proxy kubeconfig file exists ensure permissions are set to 600 or more restrictive (Manual)"
|
||||
audit: '/bin/sh -c ''if test -e $proxykubeconfig; then stat -c permissions=%a $proxykubeconfig; fi'' '
|
||||
tests:
|
||||
bin_op: or
|
||||
test_items:
|
||||
- flag: "permissions"
|
||||
set: true
|
||||
compare:
|
||||
op: bitmask
|
||||
value: "600"
|
||||
remediation: |
|
||||
Run the below command (based on the file location on your system) on the each worker node.
|
||||
For example,
|
||||
chmod 600 $proxykubeconfig
|
||||
scored: false
|
||||
|
||||
- id: 4.1.4
|
||||
text: "If proxy kubeconfig file exists ensure ownership is set to root:root (Manual)"
|
||||
audit: '/bin/sh -c ''if test -e $proxykubeconfig; then stat -c %U:%G $proxykubeconfig; fi'' '
|
||||
tests:
|
||||
bin_op: or
|
||||
test_items:
|
||||
- flag: root:root
|
||||
remediation: |
|
||||
Run the below command (based on the file location on your system) on the each worker node.
|
||||
For example, chown root:root $proxykubeconfig
|
||||
scored: false
|
||||
|
||||
- id: 4.1.5
|
||||
text: "Ensure that the --kubeconfig kubelet.conf file permissions are set to 600 or more restrictive (Automated)"
|
||||
audit: '/bin/sh -c ''if test -e $kubeletkubeconfig; then stat -c permissions=%a $kubeletkubeconfig; fi'' '
|
||||
tests:
|
||||
test_items:
|
||||
- flag: "permissions"
|
||||
compare:
|
||||
op: bitmask
|
||||
value: "600"
|
||||
remediation: |
|
||||
Run the below command (based on the file location on your system) on the each worker node.
|
||||
For example,
|
||||
chmod 600 $kubeletkubeconfig
|
||||
scored: true
|
||||
|
||||
- id: 4.1.6
|
||||
text: "Ensure that the --kubeconfig kubelet.conf file ownership is set to root:root (Automated)"
|
||||
audit: '/bin/sh -c ''if test -e $kubeletkubeconfig; then stat -c %U:%G $kubeletkubeconfig; fi'' '
|
||||
tests:
|
||||
test_items:
|
||||
- flag: root:root
|
||||
remediation: |
|
||||
Run the below command (based on the file location on your system) on the each worker node.
|
||||
For example,
|
||||
chown root:root $kubeletkubeconfig
|
||||
scored: true
|
||||
|
||||
- id: 4.1.7
|
||||
text: "Ensure that the certificate authorities file permissions are set to 644 or more restrictive (Manual)"
|
||||
audit: |
|
||||
CAFILE=$(ps -ef | grep kubelet | grep -v apiserver | grep -- --client-ca-file= | awk -F '--client-ca-file=' '{print $2}' | awk '{print $1}' | uniq)
|
||||
if test -z $CAFILE; then CAFILE=$kubeletcafile; fi
|
||||
if test -e $CAFILE; then stat -c permissions=%a $CAFILE; fi
|
||||
tests:
|
||||
test_items:
|
||||
- flag: "permissions"
|
||||
compare:
|
||||
op: bitmask
|
||||
value: "644"
|
||||
remediation: |
|
||||
Run the following command to modify the file permissions of the
|
||||
--client-ca-file chmod 644 <filename>
|
||||
scored: false
|
||||
|
||||
- id: 4.1.8
|
||||
text: "Ensure that the client certificate authorities file ownership is set to root:root (Manual)"
|
||||
audit: |
|
||||
CAFILE=$(ps -ef | grep kubelet | grep -v apiserver | grep -- --client-ca-file= | awk -F '--client-ca-file=' '{print $2}' | awk '{print $1}' | uniq)
|
||||
if test -z $CAFILE; then CAFILE=$kubeletcafile; fi
|
||||
if test -e $CAFILE; then stat -c %U:%G $CAFILE; fi
|
||||
tests:
|
||||
test_items:
|
||||
- flag: root:root
|
||||
compare:
|
||||
op: eq
|
||||
value: root:root
|
||||
remediation: |
|
||||
Run the following command to modify the ownership of the --client-ca-file.
|
||||
chown root:root <filename>
|
||||
scored: false
|
||||
|
||||
- id: 4.1.9
|
||||
text: "If the kubelet config.yaml configuration file is being used validate permissions set to 600 or more restrictive (Automated)"
|
||||
audit: '/bin/sh -c ''if test -e $kubeletconf; then stat -c permissions=%a $kubeletconf; fi'' '
|
||||
tests:
|
||||
test_items:
|
||||
- flag: "permissions"
|
||||
compare:
|
||||
op: bitmask
|
||||
value: "600"
|
||||
remediation: |
|
||||
Run the following command (using the config file location identified in the Audit step)
|
||||
chmod 600 $kubeletconf
|
||||
scored: true
|
||||
|
||||
- id: 4.1.10
|
||||
text: "If the kubelet config.yaml configuration file is being used validate file ownership is set to root:root (Automated)"
|
||||
audit: '/bin/sh -c ''if test -e $kubeletconf; then stat -c %U:%G $kubeletconf; fi'' '
|
||||
tests:
|
||||
test_items:
|
||||
- flag: root:root
|
||||
remediation: |
|
||||
Run the following command (using the config file location identified in the Audit step)
|
||||
chown root:root $kubeletconf
|
||||
scored: true
|
||||
|
||||
- id: 4.2
|
||||
text: "Kubelet"
|
||||
checks:
|
||||
- id: 4.2.1
|
||||
text: "Ensure that the --anonymous-auth argument is set to false (Automated)"
|
||||
audit: "/bin/ps -fC $kubeletbin"
|
||||
audit_config: "/bin/cat $kubeletconf"
|
||||
tests:
|
||||
test_items:
|
||||
- flag: "--anonymous-auth"
|
||||
path: '{.authentication.anonymous.enabled}'
|
||||
compare:
|
||||
op: eq
|
||||
value: false
|
||||
remediation: |
|
||||
If using a Kubelet config file, edit the file to set `authentication: anonymous: enabled` to
|
||||
`false`.
|
||||
If using executable arguments, edit the kubelet service file
|
||||
$kubeletsvc on each worker node and
|
||||
set the below parameter in KUBELET_SYSTEM_PODS_ARGS variable.
|
||||
`--anonymous-auth=false`
|
||||
Based on your system, restart the kubelet service. For example,
|
||||
systemctl daemon-reload
|
||||
systemctl restart kubelet.service
|
||||
scored: true
|
||||
|
||||
- id: 4.2.2
|
||||
text: "Ensure that the --authorization-mode argument is not set to AlwaysAllow (Automated)"
|
||||
audit: "/bin/ps -fC $kubeletbin"
|
||||
audit_config: "/bin/cat $kubeletconf"
|
||||
tests:
|
||||
test_items:
|
||||
- flag: --authorization-mode
|
||||
path: '{.authorization.mode}'
|
||||
compare:
|
||||
op: nothave
|
||||
value: AlwaysAllow
|
||||
remediation: |
|
||||
If using a Kubelet config file, edit the file to set `authorization.mode` to Webhook. If
|
||||
using executable arguments, edit the kubelet service file
|
||||
$kubeletsvc on each worker node and
|
||||
set the below parameter in KUBELET_AUTHZ_ARGS variable.
|
||||
--authorization-mode=Webhook
|
||||
Based on your system, restart the kubelet service. For example,
|
||||
systemctl daemon-reload
|
||||
systemctl restart kubelet.service
|
||||
scored: true
|
||||
|
||||
- id: 4.2.3
|
||||
text: "Ensure that the --client-ca-file argument is set as appropriate (Automated)"
|
||||
audit: "/bin/ps -fC $kubeletbin"
|
||||
audit_config: "/bin/cat $kubeletconf"
|
||||
tests:
|
||||
test_items:
|
||||
- flag: --client-ca-file
|
||||
path: '{.authentication.x509.clientCAFile}'
|
||||
remediation: |
|
||||
If using a Kubelet config file, edit the file to set `authentication.x509.clientCAFile` to
|
||||
the location of the client CA file.
|
||||
If using command line arguments, edit the kubelet service file
|
||||
$kubeletsvc on each worker node and
|
||||
set the below parameter in KUBELET_AUTHZ_ARGS variable.
|
||||
--client-ca-file=<path/to/client-ca-file>
|
||||
Based on your system, restart the kubelet service. For example,
|
||||
systemctl daemon-reload
|
||||
systemctl restart kubelet.service
|
||||
scored: true
|
||||
|
||||
- id: 4.2.4
|
||||
text: "Verify that if defined, the --read-only-port argument is set to 0 (Manual)"
|
||||
audit: "/bin/ps -fC $kubeletbin"
|
||||
audit_config: "/bin/cat $kubeletconf"
|
||||
tests:
|
||||
bin_op: or
|
||||
test_items:
|
||||
- flag: "--read-only-port"
|
||||
path: '{.readOnlyPort}'
|
||||
compare:
|
||||
op: eq
|
||||
value: 0
|
||||
- flag: "--read-only-port"
|
||||
path: '{.readOnlyPort}'
|
||||
set: false
|
||||
remediation: |
|
||||
If using a Kubelet config file, edit the file to set `readOnlyPort` to 0.
|
||||
If using command line arguments, edit the kubelet service file
|
||||
$kubeletsvc on each worker node and
|
||||
set the below parameter in KUBELET_SYSTEM_PODS_ARGS variable.
|
||||
--read-only-port=0
|
||||
Based on your system, restart the kubelet service. For example,
|
||||
systemctl daemon-reload
|
||||
systemctl restart kubelet.service
|
||||
scored: false
|
||||
|
||||
- id: 4.2.5
|
||||
text: "Ensure that the --streaming-connection-idle-timeout argument is not set to 0 (Manual)"
|
||||
audit: "/bin/ps -fC $kubeletbin"
|
||||
audit_config: "/bin/cat $kubeletconf"
|
||||
tests:
|
||||
test_items:
|
||||
- flag: --streaming-connection-idle-timeout
|
||||
path: '{.streamingConnectionIdleTimeout}'
|
||||
compare:
|
||||
op: noteq
|
||||
value: 0
|
||||
- flag: --streaming-connection-idle-timeout
|
||||
path: '{.streamingConnectionIdleTimeout}'
|
||||
set: false
|
||||
bin_op: or
|
||||
remediation: |
|
||||
If using a Kubelet config file, edit the file to set `streamingConnectionIdleTimeout` to a
|
||||
value other than 0.
|
||||
If using command line arguments, edit the kubelet service file
|
||||
$kubeletsvc on each worker node and
|
||||
set the below parameter in KUBELET_SYSTEM_PODS_ARGS variable.
|
||||
--streaming-connection-idle-timeout=5m
|
||||
Based on your system, restart the kubelet service. For example,
|
||||
systemctl daemon-reload
|
||||
systemctl restart kubelet.service
|
||||
scored: false
|
||||
|
||||
- id: 4.2.6
|
||||
text: "Ensure that the --make-iptables-util-chains argument is set to true (Automated)"
|
||||
audit: "/bin/ps -fC $kubeletbin"
|
||||
audit_config: "/bin/cat $kubeletconf"
|
||||
tests:
|
||||
test_items:
|
||||
- flag: --make-iptables-util-chains
|
||||
path: '{.makeIPTablesUtilChains}'
|
||||
compare:
|
||||
op: eq
|
||||
value: true
|
||||
- flag: --make-iptables-util-chains
|
||||
path: '{.makeIPTablesUtilChains}'
|
||||
set: false
|
||||
bin_op: or
|
||||
remediation: |
|
||||
If using a Kubelet config file, edit the file to set `makeIPTablesUtilChains` to `true`.
|
||||
If using command line arguments, edit the kubelet service file
|
||||
$kubeletsvc on each worker node and
|
||||
remove the --make-iptables-util-chains argument from the
|
||||
KUBELET_SYSTEM_PODS_ARGS variable.
|
||||
Based on your system, restart the kubelet service. For example:
|
||||
systemctl daemon-reload
|
||||
systemctl restart kubelet.service
|
||||
scored: true
|
||||
|
||||
- id: 4.2.7
|
||||
text: "Ensure that the --hostname-override argument is not set (Manual)"
|
||||
# This is one of those properties that can only be set as a command line argument.
|
||||
# To check if the property is set as expected, we need to parse the kubelet command
|
||||
# instead reading the Kubelet Configuration file.
|
||||
audit: "/bin/ps -fC $kubeletbin"
|
||||
tests:
|
||||
test_items:
|
||||
- flag: --hostname-override
|
||||
set: false
|
||||
remediation: |
|
||||
Edit the kubelet service file $kubeletsvc
|
||||
on each worker node and remove the --hostname-override argument from the
|
||||
KUBELET_SYSTEM_PODS_ARGS variable.
|
||||
Based on your system, restart the kubelet service. For example,
|
||||
systemctl daemon-reload
|
||||
systemctl restart kubelet.service
|
||||
scored: false
|
||||
|
||||
- id: 4.2.8
|
||||
text: "Ensure that the eventRecordQPS argument is set to a level which ensures appropriate event capture (Manual)"
|
||||
audit: "/bin/ps -fC $kubeletbin"
|
||||
audit_config: "/bin/cat $kubeletconf"
|
||||
tests:
|
||||
test_items:
|
||||
- flag: --event-qps
|
||||
path: '{.eventRecordQPS}'
|
||||
compare:
|
||||
op: gte
|
||||
value: 0
|
||||
- flag: --event-qps
|
||||
path: '{.eventRecordQPS}'
|
||||
set: false
|
||||
bin_op: or
|
||||
remediation: |
|
||||
If using a Kubelet config file, edit the file to set `eventRecordQPS` to an appropriate level.
|
||||
If using command line arguments, edit the kubelet service file
|
||||
$kubeletsvc on each worker node and
|
||||
set the below parameter in KUBELET_SYSTEM_PODS_ARGS variable.
|
||||
Based on your system, restart the kubelet service. For example,
|
||||
systemctl daemon-reload
|
||||
systemctl restart kubelet.service
|
||||
scored: false
|
||||
|
||||
- id: 4.2.9
|
||||
text: "Ensure that the --tls-cert-file and --tls-private-key-file arguments are set as appropriate (Manual)"
|
||||
audit: "/bin/ps -fC $kubeletbin"
|
||||
audit_config: "/bin/cat $kubeletconf"
|
||||
tests:
|
||||
test_items:
|
||||
- flag: --tls-cert-file
|
||||
path: '{.tlsCertFile}'
|
||||
- flag: --tls-private-key-file
|
||||
path: '{.tlsPrivateKeyFile}'
|
||||
remediation: |
|
||||
If using a Kubelet config file, edit the file to set `tlsCertFile` to the location
|
||||
of the certificate file to use to identify this Kubelet, and `tlsPrivateKeyFile`
|
||||
to the location of the corresponding private key file.
|
||||
If using command line arguments, edit the kubelet service file
|
||||
$kubeletsvc on each worker node and
|
||||
set the below parameters in KUBELET_CERTIFICATE_ARGS variable.
|
||||
--tls-cert-file=<path/to/tls-certificate-file>
|
||||
--tls-private-key-file=<path/to/tls-key-file>
|
||||
Based on your system, restart the kubelet service. For example,
|
||||
systemctl daemon-reload
|
||||
systemctl restart kubelet.service
|
||||
scored: false
|
||||
|
||||
- id: 4.2.10
|
||||
text: "Ensure that the --rotate-certificates argument is not set to false (Automated)"
|
||||
audit: "/bin/ps -fC $kubeletbin"
|
||||
audit_config: "/bin/cat $kubeletconf"
|
||||
tests:
|
||||
test_items:
|
||||
- flag: --rotate-certificates
|
||||
path: '{.rotateCertificates}'
|
||||
compare:
|
||||
op: eq
|
||||
value: true
|
||||
- flag: --rotate-certificates
|
||||
path: '{.rotateCertificates}'
|
||||
set: false
|
||||
bin_op: or
|
||||
remediation: |
|
||||
If using a Kubelet config file, edit the file to add the line `rotateCertificates` to `true` or
|
||||
remove it altogether to use the default value.
|
||||
If using command line arguments, edit the kubelet service file
|
||||
$kubeletsvc on each worker node and
|
||||
remove --rotate-certificates=false argument from the KUBELET_CERTIFICATE_ARGS
|
||||
variable.
|
||||
Based on your system, restart the kubelet service. For example,
|
||||
systemctl daemon-reload
|
||||
systemctl restart kubelet.service
|
||||
scored: true
|
||||
|
||||
- id: 4.2.11
|
||||
text: "Verify that the RotateKubeletServerCertificate argument is set to true (Manual)"
|
||||
audit: "/bin/ps -fC $kubeletbin"
|
||||
audit_config: "/bin/cat $kubeletconf"
|
||||
tests:
|
||||
bin_op: or
|
||||
test_items:
|
||||
- flag: RotateKubeletServerCertificate
|
||||
path: '{.featureGates.RotateKubeletServerCertificate}'
|
||||
compare:
|
||||
op: nothave
|
||||
value: false
|
||||
- flag: RotateKubeletServerCertificate
|
||||
path: '{.featureGates.RotateKubeletServerCertificate}'
|
||||
set: false
|
||||
remediation: |
|
||||
Edit the kubelet service file $kubeletsvc
|
||||
on each worker node and set the below parameter in KUBELET_CERTIFICATE_ARGS variable.
|
||||
--feature-gates=RotateKubeletServerCertificate=true
|
||||
Based on your system, restart the kubelet service. For example:
|
||||
systemctl daemon-reload
|
||||
systemctl restart kubelet.service
|
||||
scored: false
|
||||
|
||||
- id: 4.2.12
|
||||
text: "Ensure that the Kubelet only makes use of Strong Cryptographic Ciphers (Manual)"
|
||||
audit: "/bin/ps -fC $kubeletbin"
|
||||
audit_config: "/bin/cat $kubeletconf"
|
||||
tests:
|
||||
test_items:
|
||||
- flag: --tls-cipher-suites
|
||||
path: '{range .tlsCipherSuites[:]}{}{'',''}{end}'
|
||||
compare:
|
||||
op: valid_elements
|
||||
value: TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256
|
||||
remediation: |
|
||||
If using a Kubelet config file, edit the file to set `tlsCipherSuites` to
|
||||
TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256
|
||||
or to a subset of these values.
|
||||
If using executable arguments, edit the kubelet service file
|
||||
$kubeletsvc on each worker node and
|
||||
set the --tls-cipher-suites parameter as follows, or to a subset of these values.
|
||||
--tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256
|
||||
Based on your system, restart the kubelet service. For example:
|
||||
systemctl daemon-reload
|
||||
systemctl restart kubelet.service
|
||||
scored: false
|
||||
|
||||
- id: 4.2.13
|
||||
text: "Ensure that a limit is set on pod PIDs (Manual)"
|
||||
audit: "/bin/ps -fC $kubeletbin"
|
||||
audit_config: "/bin/cat $kubeletconf"
|
||||
tests:
|
||||
test_items:
|
||||
- flag: --pod-max-pids
|
||||
path: '{.podPidsLimit}'
|
||||
remediation: |
|
||||
Decide on an appropriate level for this parameter and set it,
|
||||
either via the --pod-max-pids command line parameter or the PodPidsLimit configuration file setting.
|
||||
scored: false
|
||||
|
||||
- id: 4.2.14
|
||||
text: "Ensure that the --seccomp-default parameter is set to true (Manual)"
|
||||
audit: "/bin/ps -fC $kubeletbin"
|
||||
audit_config: "/bin/cat $kubeletconf"
|
||||
tests:
|
||||
test_items:
|
||||
- flag: --seccomp-default
|
||||
path: '{.seccompDefault}'
|
||||
remediation: |
|
||||
Set the parameter, either via the --seccomp-default command line parameter or the
|
||||
seccompDefault configuration file setting.
|
||||
By default the seccomp profile is not enabled.
|
||||
scored: false
|
||||
|
||||
- id: 4.2.15
|
||||
text: "Ensure that the --IPAddressDeny is set to any (Manual)"
|
||||
audit: "/bin/ps -fC $kubeletbin"
|
||||
audit_config: "/bin/cat $kubeletconf"
|
||||
tests:
|
||||
test_items:
|
||||
- flag: --IPAddressDeny
|
||||
path: '{.IPAddressDeny}'
|
||||
remediation: |
|
||||
Configuring the setting IPAddressDeny=any will deny service to any IP address not specified in the complimentary setting IPAddressAllow configuration parameter (
|
||||
IPAddressDeny=any
|
||||
IPAddressAllow={{ kubelet_secure_addresses }}
|
||||
*Note
|
||||
kubelet_secure_addresses: "localhost link-local {{ kube_pods_subnets |regex_replace(',', ' ') }} {{ kube_node_addresses }} {{ loadbalancer_apiserver.address | default('')"
|
||||
By default IPAddressDeny is not enabled.
|
||||
scored: false
|
||||
|
||||
- id: 4.3
|
||||
text: "kube-proxy"
|
||||
checks:
|
||||
- id: 4.3.1
|
||||
text: "Ensure that the kube-proxy metrics service is bound to localhost (Automated)"
|
||||
audit: "/bin/ps -fC $proxybin"
|
||||
audit_config: "/bin/sh -c 'if test -e $proxykubeconfig; then cat $proxykubeconfig; fi'"
|
||||
tests:
|
||||
bin_op: or
|
||||
test_items:
|
||||
- flag: "--metrics-bind-address"
|
||||
path: '{.metricsBindAddress}'
|
||||
compare:
|
||||
op: has
|
||||
value: "127.0.0.1"
|
||||
- flag: "--metrics-bind-address"
|
||||
path: '{.metricsBindAddress}'
|
||||
set: false
|
||||
remediation: |
|
||||
Modify or remove any values which bind the metrics service to a non-localhost address.
|
||||
The default value is 127.0.0.1:10249.
|
||||
scored: true
|
||||
560
cfg/cis-1.11/policies.yaml
Normal file
560
cfg/cis-1.11/policies.yaml
Normal file
@@ -0,0 +1,560 @@
|
||||
---
|
||||
controls:
|
||||
version: "cis-1.11"
|
||||
id: 5
|
||||
text: "Kubernetes Policies"
|
||||
type: "policies"
|
||||
groups:
|
||||
- id: 5.1
|
||||
text: "RBAC and Service Accounts"
|
||||
checks:
|
||||
- id: 5.1.1
|
||||
text: "Ensure that the cluster-admin role is only used where required (Manual)"
|
||||
audit: |
|
||||
kubectl get clusterrolebindings -o=custom-columns=NAME:.metadata.name,ROLE:.roleRef.name,SUBJECT:.subjects[*].name --no-headers | while read -r role_name role_binding subject
|
||||
do
|
||||
if [[ "${role_name}" != "cluster-admin" && "${role_binding}" == "cluster-admin" ]]; then
|
||||
is_compliant="false"
|
||||
else
|
||||
is_compliant="true"
|
||||
fi;
|
||||
echo "**role_name: ${role_name} role_binding: ${role_binding} subject: ${subject} is_compliant: ${is_compliant}"
|
||||
done
|
||||
use_multiple_values: true
|
||||
tests:
|
||||
test_items:
|
||||
- flag: "is_compliant"
|
||||
compare:
|
||||
op: eq
|
||||
value: true
|
||||
remediation: |
|
||||
Identify all clusterrolebindings to the cluster-admin role. Check if they are used and
|
||||
if they need this role or if they could use a role with fewer privileges.
|
||||
Where possible, first bind users to a lower privileged role and then remove the
|
||||
clusterrolebinding to the cluster-admin role : kubectl delete clusterrolebinding [name]
|
||||
Condition: is_compliant is false if rolename is not cluster-admin and rolebinding is cluster-admin.
|
||||
scored: false
|
||||
|
||||
- id: 5.1.2
|
||||
text: "Minimize access to secrets (Manual)"
|
||||
audit: "echo \"canGetListWatchSecretsAsSystemAuthenticated: $(kubectl auth can-i get,list,watch secrets --all-namespaces --as=system:authenticated)\""
|
||||
tests:
|
||||
test_items:
|
||||
- flag: "canGetListWatchSecretsAsSystemAuthenticated"
|
||||
compare:
|
||||
op: eq
|
||||
value: no
|
||||
remediation: |
|
||||
Where possible, remove get, list and watch access to Secret objects in the cluster.
|
||||
scored: false
|
||||
|
||||
- id: 5.1.3
|
||||
text: "Minimize wildcard use in Roles and ClusterRoles (Manual)"
|
||||
audit: |
|
||||
# Check Roles
|
||||
kubectl get roles --all-namespaces -o custom-columns=ROLE_NAMESPACE:.metadata.namespace,ROLE_NAME:.metadata.name --no-headers | while read -r role_namespace role_name
|
||||
do
|
||||
role_rules=$(kubectl get role -n "${role_namespace}" "${role_name}" -o=json | jq -c '.rules')
|
||||
if echo "${role_rules}" | grep -q "\[\"\*\"\]"; then
|
||||
role_is_compliant="false"
|
||||
else
|
||||
role_is_compliant="true"
|
||||
fi;
|
||||
echo "**role_name: ${role_name} role_namespace: ${role_namespace} role_rules: ${role_rules} role_is_compliant: ${role_is_compliant}"
|
||||
done
|
||||
|
||||
# Check ClusterRoles
|
||||
kubectl get clusterroles -o custom-columns=CLUSTERROLE_NAME:.metadata.name --no-headers | while read -r clusterrole_name
|
||||
do
|
||||
clusterrole_rules=$(kubectl get clusterrole "${clusterrole_name}" -o=json | jq -c '.rules')
|
||||
if echo "${clusterrole_rules}" | grep -q "\[\"\*\"\]"; then
|
||||
clusterrole_is_compliant="false"
|
||||
else
|
||||
clusterrole_is_compliant="true"
|
||||
fi;
|
||||
echo "**clusterrole_name: ${clusterrole_name} clusterrole_rules: ${clusterrole_rules} clusterrole_is_compliant: ${clusterrole_is_compliant}"
|
||||
done
|
||||
use_multiple_values: true
|
||||
tests:
|
||||
bin_op: or
|
||||
test_items:
|
||||
- flag: "role_is_compliant"
|
||||
compare:
|
||||
op: eq
|
||||
value: true
|
||||
set: true
|
||||
- flag: "clusterrole_is_compliant"
|
||||
compare:
|
||||
op: eq
|
||||
value: true
|
||||
set: true
|
||||
remediation: |
|
||||
Where possible replace any use of wildcards ["*"] in roles and clusterroles with specific
|
||||
objects or actions.
|
||||
Condition: role_is_compliant is false if ["*"] is found in rules.
|
||||
Condition: clusterrole_is_compliant is false if ["*"] is found in rules.
|
||||
scored: false
|
||||
|
||||
- id: 5.1.4
|
||||
text: "Minimize access to create pods (Manual)"
|
||||
audit: |
|
||||
echo "canCreatePodsAsSystemAuthenticated: $(kubectl auth can-i create pods --all-namespaces --as=system:authenticated)"
|
||||
tests:
|
||||
test_items:
|
||||
- flag: "canCreatePodsAsSystemAuthenticated"
|
||||
compare:
|
||||
op: eq
|
||||
value: no
|
||||
remediation: |
|
||||
Where possible, remove create access to pod objects in the cluster.
|
||||
scored: false
|
||||
|
||||
- id: 5.1.5
|
||||
text: "Ensure that default service accounts are not actively used (Manual)"
|
||||
audit: |
|
||||
kubectl get serviceaccount --all-namespaces --field-selector metadata.name=default -o=json | jq -r '.items[] | " namespace: \(.metadata.namespace), kind: \(.kind), name: \(.metadata.name), automountServiceAccountToken: \(.automountServiceAccountToken | if . == null then "notset" else . end )"' | xargs -L 1
|
||||
use_multiple_values: true
|
||||
tests:
|
||||
test_items:
|
||||
- flag: "automountServiceAccountToken"
|
||||
compare:
|
||||
op: eq
|
||||
value: false
|
||||
set: true
|
||||
remediation: |
|
||||
Create explicit service accounts wherever a Kubernetes workload requires specific access
|
||||
to the Kubernetes API server.
|
||||
Modify the configuration of each default service account to include this value
|
||||
`automountServiceAccountToken: false`.
|
||||
scored: false
|
||||
|
||||
- id: 5.1.6
|
||||
text: "Ensure that Service Account Tokens are only mounted where necessary (Manual)"
|
||||
audit: |
|
||||
kubectl get pods --all-namespaces -o custom-columns=POD_NAMESPACE:.metadata.namespace,POD_NAME:.metadata.name,POD_SERVICE_ACCOUNT:.spec.serviceAccount,POD_IS_AUTOMOUNTSERVICEACCOUNTTOKEN:.spec.automountServiceAccountToken --no-headers | while read -r pod_namespace pod_name pod_service_account pod_is_automountserviceaccounttoken
|
||||
do
|
||||
# Retrieve automountServiceAccountToken's value for ServiceAccount and Pod, set to notset if null or <none>.
|
||||
svacc_is_automountserviceaccounttoken=$(kubectl get serviceaccount -n "${pod_namespace}" "${pod_service_account}" -o json | jq -r '.automountServiceAccountToken' | sed -e 's/<none>/notset/g' -e 's/null/notset/g')
|
||||
pod_is_automountserviceaccounttoken=$(echo "${pod_is_automountserviceaccounttoken}" | sed -e 's/<none>/notset/g' -e 's/null/notset/g')
|
||||
if [ "${svacc_is_automountserviceaccounttoken}" = "false" ] && ( [ "${pod_is_automountserviceaccounttoken}" = "false" ] || [ "${pod_is_automountserviceaccounttoken}" = "notset" ] ); then
|
||||
is_compliant="true"
|
||||
elif [ "${svacc_is_automountserviceaccounttoken}" = "true" ] && [ "${pod_is_automountserviceaccounttoken}" = "false" ]; then
|
||||
is_compliant="true"
|
||||
else
|
||||
is_compliant="false"
|
||||
fi
|
||||
echo "**namespace: ${pod_namespace} pod_name: ${pod_name} service_account: ${pod_service_account} pod_is_automountserviceaccounttoken: ${pod_is_automountserviceaccounttoken} svacc_is_automountServiceAccountToken: ${svacc_is_automountserviceaccounttoken} is_compliant: ${is_compliant}"
|
||||
done
|
||||
use_multiple_values: true
|
||||
tests:
|
||||
test_items:
|
||||
- flag: "is_compliant"
|
||||
compare:
|
||||
op: eq
|
||||
value: true
|
||||
remediation: |
|
||||
Modify the definition of ServiceAccounts and Pods which do not need to mount service
|
||||
account tokens to disable it, with `automountServiceAccountToken: false`.
|
||||
If both the ServiceAccount and the Pod's .spec specify a value for automountServiceAccountToken, the Pod spec takes precedence.
|
||||
Condition: Pod is_compliant to true when
|
||||
- ServiceAccount is automountServiceAccountToken: false and Pod is automountServiceAccountToken: false or notset
|
||||
- ServiceAccount is automountServiceAccountToken: true notset and Pod is automountServiceAccountToken: false
|
||||
scored: false
|
||||
|
||||
- id: 5.1.7
|
||||
text: "Avoid use of system:masters group (Manual)"
|
||||
type: "manual"
|
||||
remediation: |
|
||||
Remove the system:masters group from all users in the cluster.
|
||||
scored: false
|
||||
|
||||
- id: 5.1.8
|
||||
text: "Limit use of the Bind, Impersonate and Escalate permissions in the Kubernetes cluster (Manual)"
|
||||
type: "manual"
|
||||
remediation: |
|
||||
Where possible, remove the impersonate, bind and escalate rights from subjects.
|
||||
scored: false
|
||||
|
||||
- id: 5.1.9
|
||||
text: "Minimize access to create persistent volumes (Manual)"
|
||||
type: "manual"
|
||||
remediation: |
|
||||
Where possible, remove create access to PersistentVolume objects in the cluster.
|
||||
scored: false
|
||||
|
||||
- id: 5.1.10
|
||||
text: "Minimize access to the proxy sub-resource of nodes (Manual)"
|
||||
type: "manual"
|
||||
remediation: |
|
||||
Where possible, remove access to the proxy sub-resource of node objects.
|
||||
scored: false
|
||||
|
||||
- id: 5.1.11
|
||||
text: "Minimize access to the approval sub-resource of certificatesigningrequests objects (Manual)"
|
||||
type: "manual"
|
||||
remediation: |
|
||||
Where possible, remove access to the approval sub-resource of certificatesigningrequests objects.
|
||||
scored: false
|
||||
|
||||
- id: 5.1.12
|
||||
text: "Minimize access to webhook configuration objects (Manual)"
|
||||
type: "manual"
|
||||
remediation: |
|
||||
Where possible, remove access to the validatingwebhookconfigurations or mutatingwebhookconfigurations objects
|
||||
scored: false
|
||||
|
||||
- id: 5.1.13
|
||||
text: "Minimize access to the service account token creation (Manual)"
|
||||
type: "manual"
|
||||
remediation: |
|
||||
Where possible, remove access to the token sub-resource of serviceaccount objects.
|
||||
scored: false
|
||||
|
||||
- id: 5.2
|
||||
text: "Pod Security Standards"
|
||||
checks:
|
||||
- id: 5.2.1
|
||||
text: "Ensure that the cluster has at least one active policy control mechanism in place (Manual)"
|
||||
type: "manual"
|
||||
remediation: |
|
||||
Ensure that either Pod Security Admission or an external policy control system is in place
|
||||
for every namespace which contains user workloads.
|
||||
scored: false
|
||||
|
||||
- id: 5.2.2
|
||||
text: "Minimize the admission of privileged containers (Manual)"
|
||||
audit: |
|
||||
kubectl get pods --all-namespaces -o custom-columns=POD_NAME:.metadata.name,POD_NAMESPACE:.metadata.namespace --no-headers | while read -r pod_name pod_namespace
|
||||
do
|
||||
# Retrieve container(s) for each Pod.
|
||||
kubectl get pod "${pod_name}" --namespace "${pod_namespace}" -o json | jq -c '.spec.containers[]' | while read -r container
|
||||
do
|
||||
# Retrieve container's name.
|
||||
container_name=$(echo ${container} | jq -r '.name')
|
||||
# Retrieve container's .securityContext.privileged value.
|
||||
container_privileged=$(echo ${container} | jq -r '.securityContext.privileged' | sed -e 's/null/notset/g')
|
||||
if [ "${container_privileged}" = "false" ] || [ "${container_privileged}" = "notset" ] ; then
|
||||
echo "***pod_name: ${pod_name} container_name: ${container_name} pod_namespace: ${pod_namespace} is_container_privileged: ${container_privileged} is_compliant: true"
|
||||
else
|
||||
echo "***pod_name: ${pod_name} container_name: ${container_name} pod_namespace: ${pod_namespace} is_container_privileged: ${container_privileged} is_compliant: false"
|
||||
fi
|
||||
done
|
||||
done
|
||||
use_multiple_values: true
|
||||
tests:
|
||||
test_items:
|
||||
- flag: "is_compliant"
|
||||
compare:
|
||||
op: eq
|
||||
value: true
|
||||
remediation: |
|
||||
Add policies to each namespace in the cluster which has user workloads to restrict the
|
||||
admission of privileged containers.
|
||||
Audit: the audit list all pods' containers to retrieve their .securityContext.privileged value.
|
||||
Condition: is_compliant is false if container's `.securityContext.privileged` is set to `true`.
|
||||
Default: by default, there are no restrictions on the creation of privileged containers.
|
||||
scored: false
|
||||
|
||||
- id: 5.2.3
|
||||
text: "Minimize the admission of containers wishing to share the host process ID namespace (Manual)"
|
||||
audit: |
|
||||
kubectl get pods --all-namespaces -o custom-columns=POD_NAME:.metadata.name,POD_NAMESPACE:.metadata.namespace --no-headers | while read -r pod_name pod_namespace
|
||||
do
|
||||
# Retrieve spec.hostPID for each pod.
|
||||
pod_hostpid=$(kubectl get pod "${pod_name}" --namespace "${pod_namespace}" -o jsonpath='{.spec.hostPID}' 2>/dev/null)
|
||||
if [ -z "${pod_hostpid}" ]; then
|
||||
pod_hostpid="false"
|
||||
echo "***pod_name: ${pod_name} pod_namespace: ${pod_namespace} is_pod_hostpid: ${pod_hostpid} is_compliant: true"
|
||||
else
|
||||
echo "***pod_name: ${pod_name} pod_namespace: ${pod_namespace} is_pod_hostpid: ${pod_hostpid} is_compliant: false"
|
||||
fi
|
||||
done
|
||||
use_multiple_values: true
|
||||
tests:
|
||||
test_items:
|
||||
- flag: "is_compliant"
|
||||
compare:
|
||||
op: eq
|
||||
value: true
|
||||
remediation: |
|
||||
Add policies to each namespace in the cluster which has user workloads to restrict the
|
||||
admission of `hostPID` containers.
|
||||
Audit: the audit retrieves each Pod' spec.hostPID.
|
||||
Condition: is_compliant is false if Pod's spec.hostPID is set to `true`.
|
||||
Default: by default, there are no restrictions on the creation of hostPID containers.
|
||||
scored: false
|
||||
|
||||
- id: 5.2.4
|
||||
text: "Minimize the admission of containers wishing to share the host IPC namespace (Manual)"
|
||||
audit: |
|
||||
kubectl get pods --all-namespaces -o custom-columns=POD_NAME:.metadata.name,POD_NAMESPACE:.metadata.namespace --no-headers | while read -r pod_name pod_namespace
|
||||
do
|
||||
# Retrieve spec.hostIPC for each pod.
|
||||
pod_hostipc=$(kubectl get pod "${pod_name}" --namespace "${pod_namespace}" -o jsonpath='{.spec.hostIPC}' 2>/dev/null)
|
||||
if [ -z "${pod_hostipc}" ]; then
|
||||
pod_hostipc="false"
|
||||
echo "***pod_name: ${pod_name} pod_namespace: ${pod_namespace} is_pod_hostipc: ${pod_hostipc} is_compliant: true"
|
||||
else
|
||||
echo "***pod_name: ${pod_name} pod_namespace: ${pod_namespace} is_pod_hostipc: ${pod_hostipc} is_compliant: false"
|
||||
fi
|
||||
done
|
||||
use_multiple_values: true
|
||||
tests:
|
||||
test_items:
|
||||
- flag: "is_compliant"
|
||||
compare:
|
||||
op: eq
|
||||
value: true
|
||||
remediation: |
|
||||
Add policies to each namespace in the cluster which has user workloads to restrict the
|
||||
admission of `hostIPC` containers.
|
||||
Audit: the audit retrieves each Pod' spec.IPC.
|
||||
Condition: is_compliant is false if Pod's spec.hostIPC is set to `true`.
|
||||
Default: by default, there are no restrictions on the creation of hostIPC containers.
|
||||
scored: false
|
||||
|
||||
- id: 5.2.5
|
||||
text: "Minimize the admission of containers wishing to share the host network namespace (Manual)"
|
||||
audit: |
|
||||
kubectl get pods --all-namespaces -o custom-columns=POD_NAME:.metadata.name,POD_NAMESPACE:.metadata.namespace --no-headers | while read -r pod_name pod_namespace
|
||||
do
|
||||
# Retrieve spec.hostNetwork for each pod.
|
||||
pod_hostnetwork=$(kubectl get pod "${pod_name}" --namespace "${pod_namespace}" -o jsonpath='{.spec.hostNetwork}' 2>/dev/null)
|
||||
if [ -z "${pod_hostnetwork}" ]; then
|
||||
pod_hostnetwork="false"
|
||||
echo "***pod_name: ${pod_name} pod_namespace: ${pod_namespace} is_pod_hostnetwork: ${pod_hostnetwork} is_compliant: true"
|
||||
else
|
||||
echo "***pod_name: ${pod_name} pod_namespace: ${pod_namespace} is_pod_hostnetwork: ${pod_hostnetwork} is_compliant: false"
|
||||
fi
|
||||
done
|
||||
use_multiple_values: true
|
||||
tests:
|
||||
test_items:
|
||||
- flag: "is_compliant"
|
||||
compare:
|
||||
op: eq
|
||||
value: true
|
||||
remediation: |
|
||||
Add policies to each namespace in the cluster which has user workloads to restrict the
|
||||
admission of `hostNetwork` containers.
|
||||
Audit: the audit retrieves each Pod' spec.hostNetwork.
|
||||
Condition: is_compliant is false if Pod's spec.hostNetwork is set to `true`.
|
||||
Default: by default, there are no restrictions on the creation of hostNetwork containers.
|
||||
scored: false
|
||||
|
||||
- id: 5.2.6
|
||||
text: "Minimize the admission of containers with allowPrivilegeEscalation (Manual)"
|
||||
audit: |
|
||||
kubectl get pods --all-namespaces -o custom-columns=POD_NAME:.metadata.name,POD_NAMESPACE:.metadata.namespace --no-headers | while read -r pod_name pod_namespace
|
||||
do
|
||||
# Retrieve container(s) for each Pod.
|
||||
kubectl get pod "${pod_name}" --namespace "${pod_namespace}" -o json | jq -c '.spec.containers[]' | while read -r container
|
||||
do
|
||||
# Retrieve container's name
|
||||
container_name=$(echo ${container} | jq -r '.name')
|
||||
# Retrieve container's .securityContext.allowPrivilegeEscalation
|
||||
container_allowprivesc=$(echo ${container} | jq -r '.securityContext.allowPrivilegeEscalation' | sed -e 's/null/notset/g')
|
||||
if [ "${container_allowprivesc}" = "false" ] || [ "${container_allowprivesc}" = "notset" ]; then
|
||||
echo "***pod_name: ${pod_name} container_name: ${container_name} pod_namespace: ${pod_namespace} is_container_allowprivesc: ${container_allowprivesc} is_compliant: true"
|
||||
else
|
||||
echo "***pod_name: ${pod_name} container_name: ${container_name} pod_namespace: ${pod_namespace} is_container_allowprivesc: ${container_allowprivesc} is_compliant: false"
|
||||
fi
|
||||
done
|
||||
done
|
||||
use_multiple_values: true
|
||||
tests:
|
||||
test_items:
|
||||
- flag: "is_compliant"
|
||||
compare:
|
||||
op: eq
|
||||
value: true
|
||||
remediation: |
|
||||
Add policies to each namespace in the cluster which has user workloads to restrict the
|
||||
admission of containers with `.securityContext.allowPrivilegeEscalation` set to `true`.
|
||||
Audit: the audit retrieves each Pod's container(s) `.securityContext.allowPrivilegeEscalation`.
|
||||
Condition: is_compliant is false if container's `.securityContext.allowPrivilegeEscalation` is set to `true`.
|
||||
Default: If notset, privilege escalation is allowed (default to true). However if PSP/PSA is used with a `restricted` profile,
|
||||
privilege escalation is explicitly disallowed unless configured otherwise.
|
||||
scored: false
|
||||
|
||||
- id: 5.2.7
|
||||
text: "Minimize the admission of root containers (Manual)"
|
||||
type: "manual"
|
||||
remediation: |
|
||||
Create a policy for each namespace in the cluster, ensuring that either `MustRunAsNonRoot`
|
||||
or `MustRunAs` with the range of UIDs not including 0, is set.
|
||||
scored: false
|
||||
|
||||
- id: 5.2.8
|
||||
text: "Minimize the admission of containers with the NET_RAW capability (Manual)"
|
||||
type: "manual"
|
||||
remediation: |
|
||||
Add policies to each namespace in the cluster which has user workloads to restrict the
|
||||
admission of containers with the `NET_RAW` capability.
|
||||
scored: false
|
||||
|
||||
- id: 5.2.9
|
||||
text: "Minimize the admission of containers with added capabilities (Manual)"
|
||||
audit: |
|
||||
kubectl get pods --all-namespaces -o custom-columns=POD_NAME:.metadata.name,POD_NAMESPACE:.metadata.namespace --no-headers | while read -r pod_name pod_namespace
|
||||
do
|
||||
# Retrieve container(s) for each Pod.
|
||||
kubectl get pod "${pod_name}" --namespace "${pod_namespace}" -o json | jq -c '.spec.containers[]' | while read -r container
|
||||
do
|
||||
# Retrieve container's name
|
||||
container_name=$(echo ${container} | jq -r '.name')
|
||||
# Retrieve container's added capabilities
|
||||
container_caps_add=$(echo ${container} | jq -r '.securityContext.capabilities.add' | sed -e 's/null/notset/g')
|
||||
# Set is_compliant to true by default.
|
||||
is_compliant=true
|
||||
caps_list=""
|
||||
if [ "${container_caps_add}" != "notset" ]; then
|
||||
# Loop through all caps and append caps_list, then set is_compliant to false.
|
||||
for cap in $(echo "${container_caps_add}" | jq -r '.[]'); do
|
||||
caps_list+="${cap},"
|
||||
is_compliant=false
|
||||
done
|
||||
# Remove trailing comma for the last list member.
|
||||
caps_list=${caps_list%,}
|
||||
fi
|
||||
if [ "${is_compliant}" = true ]; then
|
||||
echo "***pod_name: ${pod_name} container_name: ${container_name} pod_namespace: ${pod_namespace} container_caps_add: ${container_caps_add} is_compliant: true"
|
||||
else
|
||||
echo "***pod_name: ${pod_name} container_name: ${container_name} pod_namespace: ${pod_namespace} container_caps_add: ${caps_list} is_compliant: false"
|
||||
fi
|
||||
done
|
||||
done
|
||||
use_multiple_values: true
|
||||
tests:
|
||||
test_items:
|
||||
- flag: "is_compliant"
|
||||
compare:
|
||||
op: eq
|
||||
value: true
|
||||
remediation: |
|
||||
Ensure that `allowedCapabilities` is not present in policies for the cluster unless
|
||||
it is set to an empty array.
|
||||
Audit: the audit retrieves each Pod's container(s) added capabilities.
|
||||
Condition: is_compliant is false if added capabilities are added for a given container.
|
||||
Default: Containers run with a default set of capabilities as assigned by the Container Runtime.
|
||||
scored: false
|
||||
|
||||
- id: 5.2.10
|
||||
text: "Minimize the admission of containers with capabilities assigned (Manual)"
|
||||
type: "manual"
|
||||
remediation: |
|
||||
Review the use of capabilites in applications running on your cluster. Where a namespace
|
||||
contains applications which do not require any Linux capabities to operate consider adding
|
||||
a PSP which forbids the admission of containers which do not drop all capabilities.
|
||||
scored: false
|
||||
|
||||
- id: 5.2.11
|
||||
text: "Minimize the admission of Windows HostProcess containers (Manual)"
|
||||
type: "manual"
|
||||
remediation: |
|
||||
Add policies to each namespace in the cluster which has user workloads to restrict the
|
||||
admission of containers that have `.securityContext.windowsOptions.hostProcess` set to `true`.
|
||||
scored: false
|
||||
|
||||
- id: 5.2.12
|
||||
text: "Minimize the admission of HostPath volumes (Manual)"
|
||||
type: "manual"
|
||||
remediation: |
|
||||
Add policies to each namespace in the cluster which has user workloads to restrict the
|
||||
admission of containers with `hostPath` volumes.
|
||||
scored: false
|
||||
|
||||
- id: 5.2.13
|
||||
text: "Minimize the admission of containers which use HostPorts (Manual)"
|
||||
type: "manual"
|
||||
remediation: |
|
||||
Add policies to each namespace in the cluster which has user workloads to restrict the
|
||||
admission of containers which use `hostPort` sections.
|
||||
scored: false
|
||||
|
||||
- id: 5.3
|
||||
text: "Network Policies and CNI"
|
||||
checks:
|
||||
- id: 5.3.1
|
||||
text: "Ensure that the CNI in use supports NetworkPolicies (Manual)"
|
||||
type: "manual"
|
||||
remediation: |
|
||||
If the CNI plugin in use does not support network policies, consideration should be given to
|
||||
making use of a different plugin, or finding an alternate mechanism for restricting traffic
|
||||
in the Kubernetes cluster.
|
||||
scored: false
|
||||
|
||||
- id: 5.3.2
|
||||
text: "Ensure that all Namespaces have NetworkPolicies defined (Manual)"
|
||||
type: "manual"
|
||||
remediation: |
|
||||
Follow the documentation and create NetworkPolicy objects as you need them.
|
||||
scored: false
|
||||
|
||||
- id: 5.4
|
||||
text: "Secrets Management"
|
||||
checks:
|
||||
- id: 5.4.1
|
||||
text: "Prefer using Secrets as files over Secrets as environment variables (Manual)"
|
||||
type: "manual"
|
||||
remediation: |
|
||||
If possible, rewrite application code to read Secrets from mounted secret files, rather than
|
||||
from environment variables.
|
||||
scored: false
|
||||
|
||||
- id: 5.4.2
|
||||
text: "Consider external secret storage (Manual)"
|
||||
type: "manual"
|
||||
remediation: |
|
||||
Refer to the Secrets management options offered by your cloud provider or a third-party
|
||||
secrets management solution.
|
||||
scored: false
|
||||
|
||||
- id: 5.5
|
||||
text: "Extensible Admission Control"
|
||||
checks:
|
||||
- id: 5.5.1
|
||||
text: "Configure Image Provenance using ImagePolicyWebhook admission controller (Manual)"
|
||||
type: "manual"
|
||||
remediation: |
|
||||
Follow the Kubernetes documentation and setup image provenance.
|
||||
scored: false
|
||||
|
||||
- id: 5.6
|
||||
text: "General Policies"
|
||||
checks:
|
||||
- id: 5.6.1
|
||||
text: "Create administrative boundaries between resources using namespaces (Manual)"
|
||||
type: "manual"
|
||||
remediation: |
|
||||
Follow the documentation and create namespaces for objects in your deployment as you need
|
||||
them.
|
||||
scored: false
|
||||
|
||||
- id: 5.6.2
|
||||
text: "Ensure that the seccomp profile is set to docker/default in your Pod definitions (Manual)"
|
||||
type: "manual"
|
||||
remediation: |
|
||||
Use `securityContext` to enable the docker/default seccomp profile in your pod definitions.
|
||||
An example is as below:
|
||||
securityContext:
|
||||
seccompProfile:
|
||||
type: RuntimeDefault
|
||||
scored: false
|
||||
|
||||
- id: 5.6.3
|
||||
text: "Apply SecurityContext to your Pods and Containers (Manual)"
|
||||
type: "manual"
|
||||
remediation: |
|
||||
Follow the Kubernetes documentation and apply SecurityContexts to your Pods. For a
|
||||
suggested list of SecurityContexts, you may refer to the CIS Security Benchmark for Docker
|
||||
Containers.
|
||||
scored: false
|
||||
|
||||
- id: 5.6.4
|
||||
text: "The default namespace should not be used (Manual)"
|
||||
type: "manual"
|
||||
remediation: |
|
||||
Ensure that namespaces are created to allow for appropriate segregation of Kubernetes
|
||||
resources and that all new resources are created in a specific namespace.
|
||||
scored: false
|
||||
2
cfg/cis-1.12/config.yaml
Normal file
2
cfg/cis-1.12/config.yaml
Normal file
@@ -0,0 +1,2 @@
|
||||
---
|
||||
## Version-specific settings that override the values in cfg/config.yaml
|
||||
62
cfg/cis-1.12/controlplane.yaml
Normal file
62
cfg/cis-1.12/controlplane.yaml
Normal file
@@ -0,0 +1,62 @@
|
||||
---
|
||||
controls:
|
||||
version: "cis-1.12"
|
||||
id: 3
|
||||
text: "Control Plane Configuration"
|
||||
type: "controlplane"
|
||||
groups:
|
||||
- id: 3.1
|
||||
text: "Authentication and Authorization"
|
||||
checks:
|
||||
- id: 3.1.1
|
||||
text: "Client certificate authentication should not be used for users (Manual)"
|
||||
type: "manual"
|
||||
remediation: |
|
||||
Alternative mechanisms provided by Kubernetes such as the use of OIDC should be
|
||||
implemented in place of client certificates.
|
||||
scored: false
|
||||
|
||||
- id: 3.1.2
|
||||
text: "Service account token authentication should not be used for users (Manual)"
|
||||
type: "manual"
|
||||
remediation: |
|
||||
Alternative mechanisms provided by Kubernetes such as the use of OIDC should be implemented
|
||||
in place of service account tokens.
|
||||
scored: false
|
||||
|
||||
- id: 3.1.3
|
||||
text: "Bootstrap token authentication should not be used for users (Manual)"
|
||||
type: "manual"
|
||||
remediation: |
|
||||
Alternative mechanisms provided by Kubernetes such as the use of OIDC should be implemented
|
||||
in place of bootstrap tokens.
|
||||
scored: false
|
||||
|
||||
- id: 3.2
|
||||
text: "Logging"
|
||||
checks:
|
||||
- id: 3.2.1
|
||||
text: "Ensure that a minimal audit policy is created (Manual)"
|
||||
audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep"
|
||||
tests:
|
||||
test_items:
|
||||
- flag: "--audit-policy-file"
|
||||
set: true
|
||||
remediation: |
|
||||
Create an audit policy file for your cluster.
|
||||
scored: false
|
||||
|
||||
- id: 3.2.2
|
||||
text: "Ensure that the audit policy covers key security concerns (Manual)"
|
||||
type: "manual"
|
||||
remediation: |
|
||||
Review the audit policy provided for the cluster and ensure that it covers
|
||||
at least the following areas,
|
||||
- Access to Secrets managed by the cluster. Care should be taken to only
|
||||
log Metadata for requests to Secrets, ConfigMaps, and TokenReviews, in
|
||||
order to avoid risk of logging sensitive data.
|
||||
- Modification of Pod and Deployment objects.
|
||||
- Use of `pods/exec`, `pods/portforward`, `pods/proxy` and `services/proxy`.
|
||||
For most requests, minimally logging at the Metadata level is recommended
|
||||
(the most basic level of logging).
|
||||
scored: false
|
||||
135
cfg/cis-1.12/etcd.yaml
Normal file
135
cfg/cis-1.12/etcd.yaml
Normal file
@@ -0,0 +1,135 @@
|
||||
---
|
||||
controls:
|
||||
version: "cis-1.12"
|
||||
id: 2
|
||||
text: "Etcd Node Configuration"
|
||||
type: "etcd"
|
||||
groups:
|
||||
- id: 2
|
||||
text: "Etcd Node Configuration"
|
||||
checks:
|
||||
- id: 2.1
|
||||
text: "Ensure that the --cert-file and --key-file arguments are set as appropriate (Automated)"
|
||||
audit: "/bin/ps -ef | /bin/grep $etcdbin | /bin/grep -v grep"
|
||||
tests:
|
||||
bin_op: and
|
||||
test_items:
|
||||
- flag: "--cert-file"
|
||||
env: "ETCD_CERT_FILE"
|
||||
- flag: "--key-file"
|
||||
env: "ETCD_KEY_FILE"
|
||||
remediation: |
|
||||
Follow the etcd service documentation and configure TLS encryption.
|
||||
Then, edit the etcd pod specification file /etc/kubernetes/manifests/etcd.yaml
|
||||
on the master node and set the below parameters.
|
||||
--cert-file=</path/to/ca-file>
|
||||
--key-file=</path/to/key-file>
|
||||
scored: true
|
||||
|
||||
- id: 2.2
|
||||
text: "Ensure that the --client-cert-auth argument is set to true (Automated)"
|
||||
audit: "/bin/ps -ef | /bin/grep $etcdbin | /bin/grep -v grep"
|
||||
tests:
|
||||
test_items:
|
||||
- flag: "--client-cert-auth"
|
||||
env: "ETCD_CLIENT_CERT_AUTH"
|
||||
compare:
|
||||
op: eq
|
||||
value: true
|
||||
remediation: |
|
||||
Edit the etcd pod specification file $etcdconf on the master
|
||||
node and set the below parameter.
|
||||
--client-cert-auth="true"
|
||||
scored: true
|
||||
|
||||
- id: 2.3
|
||||
text: "Ensure that the --auto-tls argument is not set to true (Automated)"
|
||||
audit: "/bin/ps -ef | /bin/grep $etcdbin | /bin/grep -v grep"
|
||||
tests:
|
||||
bin_op: or
|
||||
test_items:
|
||||
- flag: "--auto-tls"
|
||||
env: "ETCD_AUTO_TLS"
|
||||
set: false
|
||||
- flag: "--auto-tls"
|
||||
env: "ETCD_AUTO_TLS"
|
||||
compare:
|
||||
op: eq
|
||||
value: false
|
||||
remediation: |
|
||||
Edit the etcd pod specification file $etcdconf on the master
|
||||
node and either remove the --auto-tls parameter or set it to false.
|
||||
--auto-tls=false
|
||||
scored: true
|
||||
|
||||
- id: 2.4
|
||||
text: "Ensure that the --peer-cert-file and --peer-key-file arguments are
|
||||
set as appropriate (Automated)"
|
||||
audit: "/bin/ps -ef | /bin/grep $etcdbin | /bin/grep -v grep"
|
||||
tests:
|
||||
bin_op: and
|
||||
test_items:
|
||||
- flag: "--peer-cert-file"
|
||||
env: "ETCD_PEER_CERT_FILE"
|
||||
- flag: "--peer-key-file"
|
||||
env: "ETCD_PEER_KEY_FILE"
|
||||
remediation: |
|
||||
Follow the etcd service documentation and configure peer TLS encryption as appropriate
|
||||
for your etcd cluster.
|
||||
Then, edit the etcd pod specification file $etcdconf on the
|
||||
master node and set the below parameters.
|
||||
--peer-client-file=</path/to/peer-cert-file>
|
||||
--peer-key-file=</path/to/peer-key-file>
|
||||
scored: true
|
||||
|
||||
- id: 2.5
|
||||
text: "Ensure that the --peer-client-cert-auth argument is set to true (Automated)"
|
||||
audit: "/bin/ps -ef | /bin/grep $etcdbin | /bin/grep -v grep"
|
||||
tests:
|
||||
test_items:
|
||||
- flag: "--peer-client-cert-auth"
|
||||
env: "ETCD_PEER_CLIENT_CERT_AUTH"
|
||||
compare:
|
||||
op: eq
|
||||
value: true
|
||||
remediation: |
|
||||
Edit the etcd pod specification file $etcdconf on the master
|
||||
node and set the below parameter.
|
||||
--peer-client-cert-auth=true
|
||||
scored: true
|
||||
|
||||
- id: 2.6
|
||||
text: "Ensure that the --peer-auto-tls argument is not set to true (Automated)"
|
||||
audit: "/bin/ps -ef | /bin/grep $etcdbin | /bin/grep -v grep"
|
||||
tests:
|
||||
bin_op: or
|
||||
test_items:
|
||||
- flag: "--peer-auto-tls"
|
||||
env: "ETCD_PEER_AUTO_TLS"
|
||||
set: false
|
||||
- flag: "--peer-auto-tls"
|
||||
env: "ETCD_PEER_AUTO_TLS"
|
||||
compare:
|
||||
op: eq
|
||||
value: false
|
||||
remediation: |
|
||||
Edit the etcd pod specification file $etcdconf on the master
|
||||
node and either remove the --peer-auto-tls parameter or set it to false.
|
||||
--peer-auto-tls=false
|
||||
scored: true
|
||||
|
||||
- id: 2.7
|
||||
text: "Ensure that a unique Certificate Authority is used for etcd (Manual)"
|
||||
audit: "/bin/ps -ef | /bin/grep $etcdbin | /bin/grep -v grep"
|
||||
tests:
|
||||
test_items:
|
||||
- flag: "--trusted-ca-file"
|
||||
env: "ETCD_TRUSTED_CA_FILE"
|
||||
remediation: |
|
||||
[Manual test]
|
||||
Follow the etcd documentation and create a dedicated certificate authority setup for the
|
||||
etcd service.
|
||||
Then, edit the etcd pod specification file $etcdconf on the
|
||||
master node and set the below parameter.
|
||||
--trusted-ca-file=</path/to/ca-file>
|
||||
scored: false
|
||||
930
cfg/cis-1.12/master.yaml
Normal file
930
cfg/cis-1.12/master.yaml
Normal file
@@ -0,0 +1,930 @@
|
||||
---
|
||||
controls:
|
||||
version: "cis-1.12"
|
||||
id: 1
|
||||
text: "Control Plane Security Configuration"
|
||||
type: "master"
|
||||
groups:
|
||||
- id: 1.1
|
||||
text: "Control Plane Node Configuration Files"
|
||||
checks:
|
||||
- id: 1.1.1
|
||||
text: "Ensure that the API server pod specification file permissions are set to 600 or more restrictive (Automated)"
|
||||
audit: "/bin/sh -c 'if test -e $apiserverconf; then stat -c permissions=%a $apiserverconf; fi'"
|
||||
tests:
|
||||
test_items:
|
||||
- flag: "permissions"
|
||||
compare:
|
||||
op: bitmask
|
||||
value: "600"
|
||||
remediation: |
|
||||
Run the below command (based on the file location on your system) on the
|
||||
control plane node.
|
||||
For example, chmod 600 $apiserverconf
|
||||
scored: true
|
||||
|
||||
- id: 1.1.2
|
||||
text: "Ensure that the API server pod specification file ownership is set to root:root (Automated)"
|
||||
audit: "/bin/sh -c 'if test -e $apiserverconf; then stat -c %U:%G $apiserverconf; fi'"
|
||||
tests:
|
||||
test_items:
|
||||
- flag: "root:root"
|
||||
remediation: |
|
||||
Run the below command (based on the file location on your system) on the control plane node.
|
||||
For example, chown root:root $apiserverconf
|
||||
scored: true
|
||||
|
||||
- id: 1.1.3
|
||||
text: "Ensure that the controller manager pod specification file permissions are set to 600 or more restrictive (Automated)"
|
||||
audit: "/bin/sh -c 'if test -e $controllermanagerconf; then stat -c permissions=%a $controllermanagerconf; fi'"
|
||||
tests:
|
||||
test_items:
|
||||
- flag: "permissions"
|
||||
compare:
|
||||
op: bitmask
|
||||
value: "600"
|
||||
remediation: |
|
||||
Run the below command (based on the file location on your system) on the control plane node.
|
||||
For example, chmod 600 $controllermanagerconf
|
||||
scored: true
|
||||
|
||||
- id: 1.1.4
|
||||
text: "Ensure that the controller manager pod specification file ownership is set to root:root (Automated)"
|
||||
audit: "/bin/sh -c 'if test -e $controllermanagerconf; then stat -c %U:%G $controllermanagerconf; fi'"
|
||||
tests:
|
||||
test_items:
|
||||
- flag: "root:root"
|
||||
remediation: |
|
||||
Run the below command (based on the file location on your system) on the control plane node.
|
||||
For example, chown root:root $controllermanagerconf
|
||||
scored: true
|
||||
|
||||
- id: 1.1.5
|
||||
text: "Ensure that the scheduler pod specification file permissions are set to 600 or more restrictive (Automated)"
|
||||
audit: "/bin/sh -c 'if test -e $schedulerconf; then stat -c permissions=%a $schedulerconf; fi'"
|
||||
tests:
|
||||
test_items:
|
||||
- flag: "permissions"
|
||||
compare:
|
||||
op: bitmask
|
||||
value: "600"
|
||||
remediation: |
|
||||
Run the below command (based on the file location on your system) on the control plane node.
|
||||
For example, chmod 600 $schedulerconf
|
||||
scored: true
|
||||
|
||||
- id: 1.1.6
|
||||
text: "Ensure that the scheduler pod specification file ownership is set to root:root (Automated)"
|
||||
audit: "/bin/sh -c 'if test -e $schedulerconf; then stat -c %U:%G $schedulerconf; fi'"
|
||||
tests:
|
||||
test_items:
|
||||
- flag: "root:root"
|
||||
remediation: |
|
||||
Run the below command (based on the file location on your system) on the control plane node.
|
||||
For example, chown root:root $schedulerconf
|
||||
scored: true
|
||||
|
||||
- id: 1.1.7
|
||||
text: "Ensure that the etcd pod specification file permissions are set to 600 or more restrictive (Automated)"
|
||||
audit: "/bin/sh -c 'if test -e $etcdconf; then find $etcdconf -name '*etcd*' | xargs stat -c permissions=%a; fi'"
|
||||
use_multiple_values: true
|
||||
tests:
|
||||
test_items:
|
||||
- flag: "permissions"
|
||||
compare:
|
||||
op: bitmask
|
||||
value: "600"
|
||||
remediation: |
|
||||
Run the below command (based on the file location on your system) on the control plane node.
|
||||
For example,
|
||||
chmod 600 $etcdconf
|
||||
scored: true
|
||||
|
||||
- id: 1.1.8
|
||||
text: "Ensure that the etcd pod specification file ownership is set to root:root (Automated)"
|
||||
audit: "/bin/sh -c 'if test -e $etcdconf; then find $etcdconf -name '*etcd*' | xargs stat -c %U:%G; fi'"
|
||||
use_multiple_values: true
|
||||
tests:
|
||||
test_items:
|
||||
- flag: "root:root"
|
||||
remediation: |
|
||||
Run the below command (based on the file location on your system) on the control plane node.
|
||||
For example,
|
||||
chown root:root $etcdconf
|
||||
scored: true
|
||||
|
||||
- id: 1.1.9
|
||||
text: "Ensure that the Container Network Interface file permissions are set to 600 or more restrictive (Manual)"
|
||||
audit: |
|
||||
ps -ef | grep $kubeletbin | grep -- --cni-conf-dir | sed 's%.*cni-conf-dir[= ]\([^ ]*\).*%\1%' | xargs -I{} find {} -mindepth 1 | xargs --no-run-if-empty stat -c permissions=%a
|
||||
find /var/lib/cni/networks -type f 2> /dev/null | xargs --no-run-if-empty stat -c permissions=%a
|
||||
use_multiple_values: true
|
||||
tests:
|
||||
test_items:
|
||||
- flag: "permissions"
|
||||
compare:
|
||||
op: bitmask
|
||||
value: "600"
|
||||
remediation: |
|
||||
Run the below command (based on the file location on your system) on the control plane node.
|
||||
For example, chmod 600 <path/to/cni/files>
|
||||
scored: false
|
||||
|
||||
- id: 1.1.10
|
||||
text: "Ensure that the Container Network Interface file ownership is set to root:root (Manual)"
|
||||
audit: |
|
||||
ps -ef | grep $kubeletbin | grep -- --cni-conf-dir | sed 's%.*cni-conf-dir[= ]\([^ ]*\).*%\1%' | xargs -I{} find {} -mindepth 1 | xargs --no-run-if-empty stat -c %U:%G
|
||||
find /var/lib/cni/networks -type f 2> /dev/null | xargs --no-run-if-empty stat -c %U:%G
|
||||
use_multiple_values: true
|
||||
tests:
|
||||
test_items:
|
||||
- flag: "root:root"
|
||||
remediation: |
|
||||
Run the below command (based on the file location on your system) on the control plane node.
|
||||
For example,
|
||||
chown root:root <path/to/cni/files>
|
||||
scored: false
|
||||
|
||||
- id: 1.1.11
|
||||
text: "Ensure that the etcd data directory permissions are set to 700 or more restrictive (Automated)"
|
||||
audit: |
|
||||
DATA_DIR=''
|
||||
for d in $(ps -ef | grep $etcdbin | grep -- --data-dir | sed 's%.*data-dir[= ]\([^ ]*\).*%\1%'); do
|
||||
if test -d "$d"; then DATA_DIR="$d"; fi
|
||||
done
|
||||
if ! test -d "$DATA_DIR"; then DATA_DIR=$etcddatadir; fi
|
||||
stat -c permissions=%a "$DATA_DIR"
|
||||
tests:
|
||||
test_items:
|
||||
- flag: "permissions"
|
||||
compare:
|
||||
op: bitmask
|
||||
value: "700"
|
||||
remediation: |
|
||||
On the etcd server node, get the etcd data directory, passed as an argument --data-dir,
|
||||
from the command 'ps -ef | grep etcd'.
|
||||
Run the below command (based on the etcd data directory found above). For example,
|
||||
chmod 700 /var/lib/etcd
|
||||
scored: true
|
||||
|
||||
- id: 1.1.12
|
||||
text: "Ensure that the etcd data directory ownership is set to etcd:etcd (Automated)"
|
||||
audit: |
|
||||
DATA_DIR=''
|
||||
for d in $(ps -ef | grep $etcdbin | grep -- --data-dir | sed 's%.*data-dir[= ]\([^ ]*\).*%\1%'); do
|
||||
if test -d "$d"; then DATA_DIR="$d"; fi
|
||||
done
|
||||
if ! test -d "$DATA_DIR"; then DATA_DIR=$etcddatadir; fi
|
||||
stat -c %U:%G "$DATA_DIR"
|
||||
tests:
|
||||
test_items:
|
||||
- flag: "etcd:etcd"
|
||||
remediation: |
|
||||
On the etcd server node, get the etcd data directory, passed as an argument --data-dir,
|
||||
from the command 'ps -ef | grep etcd'.
|
||||
Run the below command (based on the etcd data directory found above).
|
||||
For example, chown etcd:etcd /var/lib/etcd
|
||||
scored: true
|
||||
|
||||
- id: 1.1.13
|
||||
text: "Ensure that the default administrative credential file permissions are set to 600 (Automated)"
|
||||
audit: |
|
||||
for adminconf in /etc/kubernetes/admin.conf /etc/kubernetes/super-admin.conf; do if test -e $adminconf; then stat -c "permissions=%a %n" $adminconf; fi; done
|
||||
use_multiple_values: true
|
||||
tests:
|
||||
test_items:
|
||||
- flag: "permissions"
|
||||
compare:
|
||||
op: bitmask
|
||||
value: "600"
|
||||
remediation: |
|
||||
Run the below command (based on the file location on your system) on the control plane node.
|
||||
For example, chmod 600 /etc/kubernetes/admin.conf
|
||||
On Kubernetes 1.29+ the super-admin.conf file should also be modified, if present.
|
||||
For example, chmod 600 /etc/kubernetes/super-admin.conf
|
||||
scored: true
|
||||
|
||||
- id: 1.1.14
|
||||
text: "Ensure that the default administrative credential file ownership is set to root:root (Automated)"
|
||||
audit: |
|
||||
for adminconf in /etc/kubernetes/admin.conf /etc/kubernetes/super-admin.conf; do if test -e $adminconf; then stat -c "ownership=%U:%G %n" $adminconf; fi; done
|
||||
use_multiple_values: true
|
||||
tests:
|
||||
test_items:
|
||||
- flag: "ownership"
|
||||
compare:
|
||||
op: eq
|
||||
value: "root:root"
|
||||
remediation: |
|
||||
Run the below command (based on the file location on your system) on the control plane node.
|
||||
For example, chown root:root /etc/kubernetes/admin.conf
|
||||
On Kubernetes 1.29+ the super-admin.conf file should also be modified, if present.
|
||||
For example, chown root:root /etc/kubernetes/super-admin.conf
|
||||
scored: true
|
||||
|
||||
- id: 1.1.15
|
||||
text: "Ensure that the scheduler.conf file permissions are set to 600 or more restrictive (Automated)"
|
||||
audit: "/bin/sh -c 'if test -e $schedulerkubeconfig; then stat -c permissions=%a $schedulerkubeconfig; fi'"
|
||||
tests:
|
||||
test_items:
|
||||
- flag: "permissions"
|
||||
compare:
|
||||
op: bitmask
|
||||
value: "600"
|
||||
remediation: |
|
||||
Run the below command (based on the file location on your system) on the control plane node.
|
||||
For example,
|
||||
chmod 600 $schedulerkubeconfig
|
||||
scored: true
|
||||
|
||||
- id: 1.1.16
|
||||
text: "Ensure that the scheduler.conf file ownership is set to root:root (Automated)"
|
||||
audit: "/bin/sh -c 'if test -e $schedulerkubeconfig; then stat -c %U:%G $schedulerkubeconfig; fi'"
|
||||
tests:
|
||||
test_items:
|
||||
- flag: "root:root"
|
||||
remediation: |
|
||||
Run the below command (based on the file location on your system) on the control plane node.
|
||||
For example,
|
||||
chown root:root $schedulerkubeconfig
|
||||
scored: true
|
||||
|
||||
- id: 1.1.17
|
||||
text: "Ensure that the controller-manager.conf file permissions are set to 600 or more restrictive (Automated)"
|
||||
audit: "/bin/sh -c 'if test -e $controllermanagerkubeconfig; then stat -c permissions=%a $controllermanagerkubeconfig; fi'"
|
||||
tests:
|
||||
test_items:
|
||||
- flag: "permissions"
|
||||
compare:
|
||||
op: bitmask
|
||||
value: "600"
|
||||
remediation: |
|
||||
Run the below command (based on the file location on your system) on the control plane node.
|
||||
For example,
|
||||
chmod 600 $controllermanagerkubeconfig
|
||||
scored: true
|
||||
|
||||
- id: 1.1.18
|
||||
text: "Ensure that the controller-manager.conf file ownership is set to root:root (Automated)"
|
||||
audit: "/bin/sh -c 'if test -e $controllermanagerkubeconfig; then stat -c %U:%G $controllermanagerkubeconfig; fi'"
|
||||
tests:
|
||||
test_items:
|
||||
- flag: "root:root"
|
||||
remediation: |
|
||||
Run the below command (based on the file location on your system) on the control plane node.
|
||||
For example,
|
||||
chown root:root $controllermanagerkubeconfig
|
||||
scored: true
|
||||
|
||||
- id: 1.1.19
|
||||
text: "Ensure that the Kubernetes PKI directory and file ownership is set to root:root (Automated)"
|
||||
audit: "find /etc/kubernetes/pki/ | xargs stat -c %U:%G"
|
||||
use_multiple_values: true
|
||||
tests:
|
||||
test_items:
|
||||
- flag: "root:root"
|
||||
remediation: |
|
||||
Run the below command (based on the file location on your system) on the control plane node.
|
||||
For example,
|
||||
chown -R root:root /etc/kubernetes/pki/
|
||||
scored: true
|
||||
|
||||
- id: 1.1.20
|
||||
text: "Ensure that the Kubernetes PKI certificate file permissions are set to 644 or more restrictive (Manual)"
|
||||
audit: "find /etc/kubernetes/pki/ -name '*.crt' | xargs stat -c permissions=%a"
|
||||
use_multiple_values: true
|
||||
tests:
|
||||
test_items:
|
||||
- flag: "permissions"
|
||||
compare:
|
||||
op: bitmask
|
||||
value: "644"
|
||||
remediation: |
|
||||
Run the below command (based on the file location on your system) on the control plane node.
|
||||
For example,
|
||||
chmod -R 644 /etc/kubernetes/pki/*.crt
|
||||
scored: false
|
||||
|
||||
- id: 1.1.21
|
||||
text: "Ensure that the Kubernetes PKI key file permissions are set to 600 (Manual)"
|
||||
audit: "find /etc/kubernetes/pki/ -name '*.key' | xargs stat -c permissions=%a"
|
||||
use_multiple_values: true
|
||||
tests:
|
||||
test_items:
|
||||
- flag: "permissions"
|
||||
compare:
|
||||
op: bitmask
|
||||
value: "600"
|
||||
remediation: |
|
||||
Run the below command (based on the file location on your system) on the control plane node.
|
||||
For example,
|
||||
chmod -R 600 /etc/kubernetes/pki/*.key
|
||||
scored: false
|
||||
|
||||
- id: 1.2
|
||||
text: "API Server"
|
||||
checks:
|
||||
- id: 1.2.1
|
||||
text: "Ensure that the --anonymous-auth argument is set to false (Manual)"
|
||||
audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep"
|
||||
tests:
|
||||
test_items:
|
||||
- flag: "--anonymous-auth"
|
||||
compare:
|
||||
op: eq
|
||||
value: false
|
||||
remediation: |
|
||||
Edit the API server pod specification file $apiserverconf
|
||||
on the control plane node and set the below parameter.
|
||||
--anonymous-auth=false
|
||||
scored: false
|
||||
|
||||
- id: 1.2.2
|
||||
text: "Ensure that the --token-auth-file parameter is not set (Automated)"
|
||||
audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep"
|
||||
tests:
|
||||
test_items:
|
||||
- flag: "--token-auth-file"
|
||||
set: false
|
||||
remediation: |
|
||||
Follow the documentation and configure alternate mechanisms for authentication. Then,
|
||||
edit the API server pod specification file $apiserverconf
|
||||
on the control plane node and remove the --token-auth-file=<filename> parameter.
|
||||
scored: true
|
||||
|
||||
- id: 1.2.3
|
||||
text: "Ensure that the --DenyServiceExternalIPs is set (Manual)"
|
||||
audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep"
|
||||
tests:
|
||||
bin_op: or
|
||||
test_items:
|
||||
- flag: "--enable-admission-plugins"
|
||||
compare:
|
||||
op: has
|
||||
value: "DenyServiceExternalIPs"
|
||||
remediation: |
|
||||
Edit the API server pod specification file $apiserverconf
|
||||
on the control plane node and add the `DenyServiceExternalIPs` plugin
|
||||
to the enabled admission plugins, as such --enable-admission-plugin=DenyServiceExternalIPs.
|
||||
scored: false
|
||||
|
||||
- id: 1.2.4
|
||||
text: "Ensure that the --kubelet-client-certificate and --kubelet-client-key arguments are set as appropriate (Automated)"
|
||||
audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep"
|
||||
tests:
|
||||
bin_op: and
|
||||
test_items:
|
||||
- flag: "--kubelet-client-certificate"
|
||||
- flag: "--kubelet-client-key"
|
||||
remediation: |
|
||||
Follow the Kubernetes documentation and set up the TLS connection between the
|
||||
apiserver and kubelets. Then, edit API server pod specification file
|
||||
$apiserverconf on the control plane node and set the
|
||||
kubelet client certificate and key parameters as below.
|
||||
--kubelet-client-certificate=<path/to/client-certificate-file>
|
||||
--kubelet-client-key=<path/to/client-key-file>
|
||||
scored: true
|
||||
|
||||
- id: 1.2.5
|
||||
text: "Ensure that the --kubelet-certificate-authority argument is set as appropriate (Automated)"
|
||||
audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep"
|
||||
tests:
|
||||
test_items:
|
||||
- flag: "--kubelet-certificate-authority"
|
||||
remediation: |
|
||||
Follow the Kubernetes documentation and setup the TLS connection between
|
||||
the apiserver and kubelets. Then, edit the API server pod specification file
|
||||
$apiserverconf on the control plane node and set the
|
||||
--kubelet-certificate-authority parameter to the path to the cert file for the certificate authority.
|
||||
--kubelet-certificate-authority=<ca-string>
|
||||
scored: true
|
||||
|
||||
- id: 1.2.6
|
||||
text: "Ensure that the --authorization-mode argument is not set to AlwaysAllow (Automated)"
|
||||
audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep"
|
||||
tests:
|
||||
test_items:
|
||||
- flag: "--authorization-mode"
|
||||
compare:
|
||||
op: nothave
|
||||
value: "AlwaysAllow"
|
||||
remediation: |
|
||||
Edit the API server pod specification file $apiserverconf
|
||||
on the control plane node and set the --authorization-mode parameter to values other than AlwaysAllow.
|
||||
One such example could be as below.
|
||||
--authorization-mode=RBAC
|
||||
scored: true
|
||||
|
||||
- id: 1.2.7
|
||||
text: "Ensure that the --authorization-mode argument includes Node (Automated)"
|
||||
audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep"
|
||||
tests:
|
||||
test_items:
|
||||
- flag: "--authorization-mode"
|
||||
compare:
|
||||
op: has
|
||||
value: "Node"
|
||||
remediation: |
|
||||
Edit the API server pod specification file $apiserverconf
|
||||
on the control plane node and set the --authorization-mode parameter to a value that includes Node.
|
||||
--authorization-mode=Node,RBAC
|
||||
scored: true
|
||||
|
||||
- id: 1.2.8
|
||||
text: "Ensure that the --authorization-mode argument includes RBAC (Automated)"
|
||||
audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep"
|
||||
tests:
|
||||
test_items:
|
||||
- flag: "--authorization-mode"
|
||||
compare:
|
||||
op: has
|
||||
value: "RBAC"
|
||||
remediation: |
|
||||
Edit the API server pod specification file $apiserverconf
|
||||
on the control plane node and set the --authorization-mode parameter to a value that includes RBAC,
|
||||
for example `--authorization-mode=Node,RBAC`.
|
||||
scored: true
|
||||
|
||||
- id: 1.2.9
|
||||
text: "Ensure that the admission control plugin EventRateLimit is set (Manual)"
|
||||
audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep"
|
||||
tests:
|
||||
test_items:
|
||||
- flag: "--enable-admission-plugins"
|
||||
compare:
|
||||
op: has
|
||||
value: "EventRateLimit"
|
||||
remediation: |
|
||||
Follow the Kubernetes documentation and set the desired limits in a configuration file.
|
||||
Then, edit the API server pod specification file $apiserverconf
|
||||
and set the below parameters.
|
||||
--enable-admission-plugins=...,EventRateLimit,...
|
||||
--admission-control-config-file=<path/to/configuration/file>
|
||||
scored: false
|
||||
|
||||
- id: 1.2.10
|
||||
text: "Ensure that the admission control plugin AlwaysAdmit is not set (Automated)"
|
||||
audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep"
|
||||
tests:
|
||||
bin_op: or
|
||||
test_items:
|
||||
- flag: "--enable-admission-plugins"
|
||||
compare:
|
||||
op: nothave
|
||||
value: AlwaysAdmit
|
||||
- flag: "--enable-admission-plugins"
|
||||
set: false
|
||||
remediation: |
|
||||
Edit the API server pod specification file $apiserverconf
|
||||
on the control plane node and either remove the --enable-admission-plugins parameter, or set it to a
|
||||
value that does not include AlwaysAdmit.
|
||||
scored: true
|
||||
|
||||
- id: 1.2.11
|
||||
text: "Ensure that the admission control plugin AlwaysPullImages is set (Manual)"
|
||||
audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep"
|
||||
tests:
|
||||
test_items:
|
||||
- flag: "--enable-admission-plugins"
|
||||
compare:
|
||||
op: has
|
||||
value: "AlwaysPullImages"
|
||||
remediation: |
|
||||
Edit the API server pod specification file $apiserverconf
|
||||
on the control plane node and set the --enable-admission-plugins parameter to include
|
||||
AlwaysPullImages.
|
||||
--enable-admission-plugins=...,AlwaysPullImages,...
|
||||
scored: false
|
||||
|
||||
- id: 1.2.12
|
||||
text: "Ensure that the admission control plugin ServiceAccount is set (Automated)"
|
||||
audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep"
|
||||
tests:
|
||||
bin_op: or
|
||||
test_items:
|
||||
- flag: "--disable-admission-plugins"
|
||||
compare:
|
||||
op: nothave
|
||||
value: "ServiceAccount"
|
||||
- flag: "--disable-admission-plugins"
|
||||
set: false
|
||||
remediation: |
|
||||
Follow the documentation and create ServiceAccount objects as per your environment.
|
||||
Then, edit the API server pod specification file $apiserverconf
|
||||
on the control plane node and ensure that the --disable-admission-plugins parameter is set to a
|
||||
value that does not include ServiceAccount.
|
||||
scored: true
|
||||
|
||||
- id: 1.2.13
|
||||
text: "Ensure that the admission control plugin NamespaceLifecycle is set (Automated)"
|
||||
audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep"
|
||||
tests:
|
||||
bin_op: or
|
||||
test_items:
|
||||
- flag: "--disable-admission-plugins"
|
||||
compare:
|
||||
op: nothave
|
||||
value: "NamespaceLifecycle"
|
||||
- flag: "--disable-admission-plugins"
|
||||
set: false
|
||||
remediation: |
|
||||
Edit the API server pod specification file $apiserverconf
|
||||
on the control plane node and set the --disable-admission-plugins parameter to
|
||||
ensure it does not include NamespaceLifecycle.
|
||||
scored: true
|
||||
|
||||
- id: 1.2.14
|
||||
text: "Ensure that the admission control plugin NodeRestriction is set (Automated)"
|
||||
audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep"
|
||||
tests:
|
||||
test_items:
|
||||
- flag: "--enable-admission-plugins"
|
||||
compare:
|
||||
op: has
|
||||
value: "NodeRestriction"
|
||||
remediation: |
|
||||
Follow the Kubernetes documentation and configure NodeRestriction plug-in on kubelets.
|
||||
Then, edit the API server pod specification file $apiserverconf
|
||||
on the control plane node and set the --enable-admission-plugins parameter to a
|
||||
value that includes NodeRestriction.
|
||||
--enable-admission-plugins=...,NodeRestriction,...
|
||||
scored: true
|
||||
|
||||
- id: 1.2.15
|
||||
text: "Ensure that the --profiling argument is set to false (Automated)"
|
||||
audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep"
|
||||
tests:
|
||||
test_items:
|
||||
- flag: "--profiling"
|
||||
compare:
|
||||
op: eq
|
||||
value: false
|
||||
remediation: |
|
||||
Edit the API server pod specification file $apiserverconf
|
||||
on the control plane node and set the below parameter.
|
||||
--profiling=false
|
||||
scored: true
|
||||
|
||||
- id: 1.2.16
|
||||
text: "Ensure that the --audit-log-path argument is set (Automated)"
|
||||
audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep"
|
||||
tests:
|
||||
test_items:
|
||||
- flag: "--audit-log-path"
|
||||
remediation: |
|
||||
Edit the API server pod specification file $apiserverconf
|
||||
on the control plane node and set the --audit-log-path parameter to a suitable path and
|
||||
file where you would like audit logs to be written, for example,
|
||||
--audit-log-path=/var/log/apiserver/audit.log
|
||||
scored: true
|
||||
|
||||
- id: 1.2.17
|
||||
text: "Ensure that the --audit-log-maxage argument is set to 30 or as appropriate (Automated)"
|
||||
audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep"
|
||||
tests:
|
||||
test_items:
|
||||
- flag: "--audit-log-maxage"
|
||||
compare:
|
||||
op: gte
|
||||
value: 30
|
||||
remediation: |
|
||||
Edit the API server pod specification file $apiserverconf
|
||||
on the control plane node and set the --audit-log-maxage parameter to 30
|
||||
or as an appropriate number of days, for example,
|
||||
--audit-log-maxage=30
|
||||
scored: true
|
||||
|
||||
- id: 1.2.18
|
||||
text: "Ensure that the --audit-log-maxbackup argument is set to 10 or as appropriate (Automated)"
|
||||
audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep"
|
||||
tests:
|
||||
test_items:
|
||||
- flag: "--audit-log-maxbackup"
|
||||
compare:
|
||||
op: gte
|
||||
value: 10
|
||||
remediation: |
|
||||
Edit the API server pod specification file $apiserverconf
|
||||
on the control plane node and set the --audit-log-maxbackup parameter to 10 or to an appropriate
|
||||
value. For example,
|
||||
--audit-log-maxbackup=10
|
||||
scored: true
|
||||
|
||||
- id: 1.2.19
|
||||
text: "Ensure that the --audit-log-maxsize argument is set to 100 or as appropriate (Automated)"
|
||||
audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep"
|
||||
tests:
|
||||
test_items:
|
||||
- flag: "--audit-log-maxsize"
|
||||
compare:
|
||||
op: gte
|
||||
value: 100
|
||||
remediation: |
|
||||
Edit the API server pod specification file $apiserverconf
|
||||
on the control plane node and set the --audit-log-maxsize parameter to an appropriate size in MB.
|
||||
For example, to set it as 100 MB, --audit-log-maxsize=100
|
||||
scored: true
|
||||
|
||||
- id: 1.2.20
|
||||
text: "Ensure that the --request-timeout argument is set as appropriate (Manual)"
|
||||
audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep"
|
||||
type: manual
|
||||
remediation: |
|
||||
Edit the API server pod specification file $apiserverconf
|
||||
and set the below parameter as appropriate and if needed.
|
||||
For example, --request-timeout=300s
|
||||
scored: false
|
||||
|
||||
- id: 1.2.21
|
||||
text: "Ensure that the --service-account-lookup argument is set to true (Automated)"
|
||||
audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep"
|
||||
tests:
|
||||
bin_op: or
|
||||
test_items:
|
||||
- flag: "--service-account-lookup"
|
||||
set: false
|
||||
- flag: "--service-account-lookup"
|
||||
compare:
|
||||
op: eq
|
||||
value: true
|
||||
remediation: |
|
||||
Edit the API server pod specification file $apiserverconf
|
||||
on the control plane node and set the below parameter.
|
||||
--service-account-lookup=true
|
||||
Alternatively, you can delete the --service-account-lookup parameter from this file so
|
||||
that the default takes effect.
|
||||
scored: true
|
||||
|
||||
- id: 1.2.22
|
||||
text: "Ensure that the --service-account-key-file argument is set as appropriate (Automated)"
|
||||
audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep"
|
||||
tests:
|
||||
test_items:
|
||||
- flag: "--service-account-key-file"
|
||||
remediation: |
|
||||
Edit the API server pod specification file $apiserverconf
|
||||
on the control plane node and set the --service-account-key-file parameter
|
||||
to the public key file for service accounts. For example,
|
||||
--service-account-key-file=<filename>
|
||||
scored: true
|
||||
|
||||
- id: 1.2.23
|
||||
text: "Ensure that the --etcd-certfile and --etcd-keyfile arguments are set as appropriate (Automated)"
|
||||
audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep"
|
||||
tests:
|
||||
bin_op: and
|
||||
test_items:
|
||||
- flag: "--etcd-certfile"
|
||||
- flag: "--etcd-keyfile"
|
||||
remediation: |
|
||||
Follow the Kubernetes documentation and set up the TLS connection between the apiserver and etcd.
|
||||
Then, edit the API server pod specification file $apiserverconf
|
||||
on the control plane node and set the etcd certificate and key file parameters.
|
||||
--etcd-certfile=<path/to/client-certificate-file>
|
||||
--etcd-keyfile=<path/to/client-key-file>
|
||||
scored: true
|
||||
|
||||
- id: 1.2.24
|
||||
text: "Ensure that the --tls-cert-file and --tls-private-key-file arguments are set as appropriate (Automated)"
|
||||
audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep"
|
||||
tests:
|
||||
bin_op: and
|
||||
test_items:
|
||||
- flag: "--tls-cert-file"
|
||||
- flag: "--tls-private-key-file"
|
||||
remediation: |
|
||||
Follow the Kubernetes documentation and set up the TLS connection on the apiserver.
|
||||
Then, edit the API server pod specification file $apiserverconf
|
||||
on the control plane node and set the TLS certificate and private key file parameters.
|
||||
--tls-cert-file=<path/to/tls-certificate-file>
|
||||
--tls-private-key-file=<path/to/tls-key-file>
|
||||
scored: true
|
||||
|
||||
- id: 1.2.25
|
||||
text: "Ensure that the --client-ca-file argument is set as appropriate (Automated)"
|
||||
audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep"
|
||||
tests:
|
||||
test_items:
|
||||
- flag: "--client-ca-file"
|
||||
remediation: |
|
||||
Follow the Kubernetes documentation and set up the TLS connection on the apiserver.
|
||||
Then, edit the API server pod specification file $apiserverconf
|
||||
on the control plane node and set the client certificate authority file.
|
||||
--client-ca-file=<path/to/client-ca-file>
|
||||
scored: true
|
||||
|
||||
- id: 1.2.26
|
||||
text: "Ensure that the --etcd-cafile argument is set as appropriate (Automated)"
|
||||
audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep"
|
||||
tests:
|
||||
test_items:
|
||||
- flag: "--etcd-cafile"
|
||||
remediation: |
|
||||
Follow the Kubernetes documentation and set up the TLS connection between the apiserver and etcd.
|
||||
Then, edit the API server pod specification file $apiserverconf
|
||||
on the control plane node and set the etcd certificate authority file parameter.
|
||||
--etcd-cafile=<path/to/ca-file>
|
||||
scored: true
|
||||
|
||||
- id: 1.2.27
|
||||
text: "Ensure that the --encryption-provider-config argument is set as appropriate (Manual)"
|
||||
audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep"
|
||||
tests:
|
||||
test_items:
|
||||
- flag: "--encryption-provider-config"
|
||||
remediation: |
|
||||
Follow the Kubernetes documentation and configure a EncryptionConfig file.
|
||||
Then, edit the API server pod specification file $apiserverconf
|
||||
on the control plane node and set the --encryption-provider-config parameter to the path of that file.
|
||||
For example, --encryption-provider-config=</path/to/EncryptionConfig/File>
|
||||
scored: false
|
||||
|
||||
- id: 1.2.28
|
||||
text: "Ensure that encryption providers are appropriately configured (Manual)"
|
||||
audit: |
|
||||
ENCRYPTION_PROVIDER_CONFIG=$(ps -ef | grep $apiserverbin | grep -- --encryption-provider-config | sed 's%.*encryption-provider-config[= ]\([^ ]*\).*%\1%')
|
||||
if test -e $ENCRYPTION_PROVIDER_CONFIG; then grep -A1 'providers:' $ENCRYPTION_PROVIDER_CONFIG | tail -n1 | grep -o "[A-Za-z]*" | sed 's/^/provider=/'; fi
|
||||
tests:
|
||||
test_items:
|
||||
- flag: "provider"
|
||||
compare:
|
||||
op: valid_elements
|
||||
value: "aescbc,kms,secretbox"
|
||||
remediation: |
|
||||
Follow the Kubernetes documentation and configure a EncryptionConfig file.
|
||||
In this file, choose aescbc, kms or secretbox as the encryption provider.
|
||||
scored: false
|
||||
|
||||
- id: 1.2.29
|
||||
text: "Ensure that the API Server only makes use of Strong Cryptographic Ciphers (Manual)"
|
||||
audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep"
|
||||
tests:
|
||||
test_items:
|
||||
- flag: "--tls-cipher-suites"
|
||||
compare:
|
||||
op: valid_elements
|
||||
value: "TLS_AES_128_GCM_SHA256,TLS_AES_256_GCM_SHA384,TLS_CHACHA20_POLY1305_SHA256,TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256"
|
||||
remediation: |
|
||||
Edit the API server pod specification file /etc/kubernetes/manifests/kube-apiserver.yaml
|
||||
on the control plane node and set the below parameter.
|
||||
--tls-cipher-suites=TLS_AES_128_GCM_SHA256,TLS_AES_256_GCM_SHA384,
|
||||
TLS_CHACHA20_POLY1305_SHA256,TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,
|
||||
TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,
|
||||
TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256
|
||||
scored: false
|
||||
|
||||
- id: 1.2.30
|
||||
text: "Ensure that the --service-account-extend-token-expiration parameter is set to false (Automated)"
|
||||
audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep"
|
||||
tests:
|
||||
test_items:
|
||||
- flag: "--service-account-extend-token-expiration"
|
||||
compare:
|
||||
op: eq
|
||||
value: false
|
||||
remediation: |
|
||||
Edit the API server pod specification file /etc/kubernetes/manifests/kube-apiserver.yaml on the Control Plane node and set the --service-account-extend-token-expiration parameter to false.
|
||||
`--service-account-extend-token-expiration=false`
|
||||
By default, this parameter is set to true.
|
||||
scored: true
|
||||
|
||||
- id: 1.3
|
||||
text: "Controller Manager"
|
||||
checks:
|
||||
- id: 1.3.1
|
||||
text: "Ensure that the --terminated-pod-gc-threshold argument is set as appropriate (Manual)"
|
||||
audit: "/bin/ps -ef | grep $controllermanagerbin | grep -v grep"
|
||||
tests:
|
||||
test_items:
|
||||
- flag: "--terminated-pod-gc-threshold"
|
||||
remediation: |
|
||||
Edit the Controller Manager pod specification file $controllermanagerconf
|
||||
on the control plane node and set the --terminated-pod-gc-threshold to an appropriate threshold,
|
||||
for example, --terminated-pod-gc-threshold=10
|
||||
scored: false
|
||||
|
||||
- id: 1.3.2
|
||||
text: "Ensure that the --profiling argument is set to false (Automated)"
|
||||
audit: "/bin/ps -ef | grep $controllermanagerbin | grep -v grep"
|
||||
tests:
|
||||
test_items:
|
||||
- flag: "--profiling"
|
||||
compare:
|
||||
op: eq
|
||||
value: false
|
||||
remediation: |
|
||||
Edit the Controller Manager pod specification file $controllermanagerconf
|
||||
on the control plane node and set the below parameter.
|
||||
--profiling=false
|
||||
scored: true
|
||||
|
||||
- id: 1.3.3
|
||||
text: "Ensure that the --use-service-account-credentials argument is set to true (Automated)"
|
||||
audit: "/bin/ps -ef | grep $controllermanagerbin | grep -v grep"
|
||||
tests:
|
||||
test_items:
|
||||
- flag: "--use-service-account-credentials"
|
||||
compare:
|
||||
op: noteq
|
||||
value: false
|
||||
remediation: |
|
||||
Edit the Controller Manager pod specification file $controllermanagerconf
|
||||
on the control plane node to set the below parameter.
|
||||
--use-service-account-credentials=true
|
||||
scored: true
|
||||
|
||||
- id: 1.3.4
|
||||
text: "Ensure that the --service-account-private-key-file argument is set as appropriate (Automated)"
|
||||
audit: "/bin/ps -ef | grep $controllermanagerbin | grep -v grep"
|
||||
tests:
|
||||
test_items:
|
||||
- flag: "--service-account-private-key-file"
|
||||
remediation: |
|
||||
Edit the Controller Manager pod specification file $controllermanagerconf
|
||||
on the control plane node and set the --service-account-private-key-file parameter
|
||||
to the private key file for service accounts.
|
||||
--service-account-private-key-file=<filename>
|
||||
scored: true
|
||||
|
||||
- id: 1.3.5
|
||||
text: "Ensure that the --root-ca-file argument is set as appropriate (Automated)"
|
||||
audit: "/bin/ps -ef | grep $controllermanagerbin | grep -v grep"
|
||||
tests:
|
||||
test_items:
|
||||
- flag: "--root-ca-file"
|
||||
remediation: |
|
||||
Edit the Controller Manager pod specification file $controllermanagerconf
|
||||
on the control plane node and set the --root-ca-file parameter to the certificate bundle file`.
|
||||
--root-ca-file=<path/to/file>
|
||||
scored: true
|
||||
|
||||
- id: 1.3.6
|
||||
text: "Ensure that the RotateKubeletServerCertificate argument is set to true (Automated)"
|
||||
audit: "/bin/ps -ef | grep $controllermanagerbin | grep -v grep"
|
||||
tests:
|
||||
bin_op: or
|
||||
test_items:
|
||||
- flag: "--feature-gates"
|
||||
compare:
|
||||
op: nothave
|
||||
value: "RotateKubeletServerCertificate=false"
|
||||
set: true
|
||||
- flag: "--feature-gates"
|
||||
set: false
|
||||
remediation: |
|
||||
Edit the Controller Manager pod specification file $controllermanagerconf
|
||||
on the control plane node and set the --feature-gates parameter to include RotateKubeletServerCertificate=true.
|
||||
--feature-gates=RotateKubeletServerCertificate=true
|
||||
scored: true
|
||||
|
||||
- id: 1.3.7
|
||||
text: "Ensure that the --bind-address argument is set to 127.0.0.1 (Automated)"
|
||||
audit: "/bin/ps -ef | grep $controllermanagerbin | grep -v grep"
|
||||
tests:
|
||||
bin_op: or
|
||||
test_items:
|
||||
- flag: "--bind-address"
|
||||
compare:
|
||||
op: eq
|
||||
value: "127.0.0.1"
|
||||
- flag: "--bind-address"
|
||||
set: false
|
||||
remediation: |
|
||||
Edit the Controller Manager pod specification file $controllermanagerconf
|
||||
on the control plane node and ensure the correct value for the --bind-address parameter
|
||||
scored: true
|
||||
|
||||
- id: 1.4
|
||||
text: "Scheduler"
|
||||
checks:
|
||||
- id: 1.4.1
|
||||
text: "Ensure that the --profiling argument is set to false (Automated)"
|
||||
audit: "/bin/ps -ef | grep $schedulerbin | grep -v grep"
|
||||
tests:
|
||||
test_items:
|
||||
- flag: "--profiling"
|
||||
compare:
|
||||
op: eq
|
||||
value: false
|
||||
remediation: |
|
||||
Edit the Scheduler pod specification file $schedulerconf file
|
||||
on the control plane node and set the below parameter.
|
||||
--profiling=false
|
||||
scored: true
|
||||
|
||||
- id: 1.4.2
|
||||
text: "Ensure that the --bind-address argument is set to 127.0.0.1 (Automated)"
|
||||
audit: "/bin/ps -ef | grep $schedulerbin | grep -v grep"
|
||||
tests:
|
||||
bin_op: or
|
||||
test_items:
|
||||
- flag: "--bind-address"
|
||||
compare:
|
||||
op: eq
|
||||
value: "127.0.0.1"
|
||||
- flag: "--bind-address"
|
||||
set: false
|
||||
remediation: |
|
||||
Edit the Scheduler pod specification file $schedulerconf
|
||||
on the control plane node and ensure the correct value for the --bind-address parameter
|
||||
scored: true
|
||||
492
cfg/cis-1.12/node.yaml
Normal file
492
cfg/cis-1.12/node.yaml
Normal file
@@ -0,0 +1,492 @@
|
||||
---
|
||||
controls:
|
||||
version: "cis-1.12"
|
||||
id: 4
|
||||
text: "Worker Node Security Configuration"
|
||||
type: "node"
|
||||
groups:
|
||||
- id: 4.1
|
||||
text: "Worker Node Configuration Files"
|
||||
checks:
|
||||
- id: 4.1.1
|
||||
text: "Ensure that the kubelet service file permissions are set to 600 or more restrictive (Automated)"
|
||||
audit: '/bin/sh -c ''if test -e $kubeletsvc; then stat -c permissions=%a $kubeletsvc; fi'' '
|
||||
tests:
|
||||
test_items:
|
||||
- flag: "permissions"
|
||||
compare:
|
||||
op: bitmask
|
||||
value: "600"
|
||||
remediation: |
|
||||
Run the below command (based on the file location on your system) on the each worker node.
|
||||
For example, chmod 600 $kubeletsvc
|
||||
scored: true
|
||||
|
||||
- id: 4.1.2
|
||||
text: "Ensure that the kubelet service file ownership is set to root:root (Automated)"
|
||||
audit: '/bin/sh -c "if test -e $kubeletsvc; then stat -c %U:%G $kubeletsvc; else echo \"File not found\"; fi"'
|
||||
tests:
|
||||
bin_op: or
|
||||
test_items:
|
||||
- flag: root:root
|
||||
- flag: "File not found"
|
||||
remediation: |
|
||||
Run the below command (based on the file location on your system) on the each worker node.
|
||||
For example,
|
||||
chown root:root $kubeletsvc
|
||||
scored: true
|
||||
|
||||
- id: 4.1.3
|
||||
text: "If proxy kubeconfig file exists ensure permissions are set to 600 or more restrictive (Manual)"
|
||||
audit: '/bin/sh -c ''if test -e $proxykubeconfig; then stat -c permissions=%a $proxykubeconfig; fi'' '
|
||||
tests:
|
||||
bin_op: or
|
||||
test_items:
|
||||
- flag: "permissions"
|
||||
set: true
|
||||
compare:
|
||||
op: bitmask
|
||||
value: "600"
|
||||
remediation: |
|
||||
Run the below command (based on the file location on your system) on the each worker node.
|
||||
For example,
|
||||
chmod 600 $proxykubeconfig
|
||||
scored: false
|
||||
|
||||
- id: 4.1.4
|
||||
text: "If proxy kubeconfig file exists ensure ownership is set to root:root (Manual)"
|
||||
audit: '/bin/sh -c ''if test -e $proxykubeconfig; then stat -c %U:%G $proxykubeconfig; fi'' '
|
||||
tests:
|
||||
bin_op: or
|
||||
test_items:
|
||||
- flag: root:root
|
||||
remediation: |
|
||||
Run the below command (based on the file location on your system) on the each worker node.
|
||||
For example, chown root:root $proxykubeconfig
|
||||
scored: false
|
||||
|
||||
- id: 4.1.5
|
||||
text: "Ensure that the --kubeconfig kubelet.conf file permissions are set to 600 or more restrictive (Automated)"
|
||||
audit: '/bin/sh -c ''if test -e $kubeletkubeconfig; then stat -c permissions=%a $kubeletkubeconfig; fi'' '
|
||||
tests:
|
||||
test_items:
|
||||
- flag: "permissions"
|
||||
compare:
|
||||
op: bitmask
|
||||
value: "600"
|
||||
remediation: |
|
||||
Run the below command (based on the file location on your system) on the each worker node.
|
||||
For example,
|
||||
chmod 600 $kubeletkubeconfig
|
||||
scored: true
|
||||
|
||||
- id: 4.1.6
|
||||
text: "Ensure that the --kubeconfig kubelet.conf file ownership is set to root:root (Automated)"
|
||||
audit: '/bin/sh -c ''if test -e $kubeletkubeconfig; then stat -c %U:%G $kubeletkubeconfig; fi'' '
|
||||
tests:
|
||||
test_items:
|
||||
- flag: root:root
|
||||
remediation: |
|
||||
Run the below command (based on the file location on your system) on the each worker node.
|
||||
For example,
|
||||
chown root:root $kubeletkubeconfig
|
||||
scored: true
|
||||
|
||||
- id: 4.1.7
|
||||
text: "Ensure that the certificate authorities file permissions are set to 644 or more restrictive (Manual)"
|
||||
audit: |
|
||||
CAFILE=$(ps -ef | grep kubelet | grep -v apiserver | grep -- --client-ca-file= | awk -F '--client-ca-file=' '{print $2}' | awk '{print $1}' | uniq)
|
||||
if test -z $CAFILE; then CAFILE=$kubeletcafile; fi
|
||||
if test -e $CAFILE; then stat -c permissions=%a $CAFILE; fi
|
||||
tests:
|
||||
test_items:
|
||||
- flag: "permissions"
|
||||
compare:
|
||||
op: bitmask
|
||||
value: "644"
|
||||
remediation: |
|
||||
Run the following command to modify the file permissions of the
|
||||
--client-ca-file chmod 644 <filename>
|
||||
scored: false
|
||||
|
||||
- id: 4.1.8
|
||||
text: "Ensure that the client certificate authorities file ownership is set to root:root (Manual)"
|
||||
audit: |
|
||||
CAFILE=$(ps -ef | grep kubelet | grep -v apiserver | grep -- --client-ca-file= | awk -F '--client-ca-file=' '{print $2}' | awk '{print $1}' | uniq)
|
||||
if test -z $CAFILE; then CAFILE=$kubeletcafile; fi
|
||||
if test -e $CAFILE; then stat -c %U:%G $CAFILE; fi
|
||||
tests:
|
||||
test_items:
|
||||
- flag: root:root
|
||||
compare:
|
||||
op: eq
|
||||
value: root:root
|
||||
remediation: |
|
||||
Run the following command to modify the ownership of the --client-ca-file.
|
||||
chown root:root <filename>
|
||||
scored: false
|
||||
|
||||
- id: 4.1.9
|
||||
text: "If the kubelet config.yaml configuration file is being used validate permissions set to 600 or more restrictive (Automated)"
|
||||
audit: '/bin/sh -c ''if test -e $kubeletconf; then stat -c permissions=%a $kubeletconf; fi'' '
|
||||
tests:
|
||||
test_items:
|
||||
- flag: "permissions"
|
||||
compare:
|
||||
op: bitmask
|
||||
value: "600"
|
||||
remediation: |
|
||||
Run the following command (using the config file location identified in the Audit step)
|
||||
chmod 600 $kubeletconf
|
||||
scored: true
|
||||
|
||||
- id: 4.1.10
|
||||
text: "If the kubelet config.yaml configuration file is being used validate file ownership is set to root:root (Automated)"
|
||||
audit: '/bin/sh -c ''if test -e $kubeletconf; then stat -c %U:%G $kubeletconf; fi'' '
|
||||
tests:
|
||||
test_items:
|
||||
- flag: root:root
|
||||
remediation: |
|
||||
Run the following command (using the config file location identified in the Audit step)
|
||||
chown root:root $kubeletconf
|
||||
scored: true
|
||||
|
||||
- id: 4.2
|
||||
text: "Kubelet"
|
||||
checks:
|
||||
- id: 4.2.1
|
||||
text: "Ensure that the --anonymous-auth argument is set to false (Automated)"
|
||||
audit: "/bin/ps -fC $kubeletbin"
|
||||
audit_config: "/bin/cat $kubeletconf"
|
||||
tests:
|
||||
test_items:
|
||||
- flag: "--anonymous-auth"
|
||||
path: '{.authentication.anonymous.enabled}'
|
||||
compare:
|
||||
op: eq
|
||||
value: false
|
||||
remediation: |
|
||||
If using a Kubelet config file, edit the file to set `authentication: anonymous: enabled` to
|
||||
`false`.
|
||||
If using executable arguments, edit the kubelet service file
|
||||
$kubeletsvc on each worker node and
|
||||
set the below parameter in KUBELET_SYSTEM_PODS_ARGS variable.
|
||||
`--anonymous-auth=false`
|
||||
Based on your system, restart the kubelet service. For example,
|
||||
systemctl daemon-reload
|
||||
systemctl restart kubelet.service
|
||||
scored: true
|
||||
|
||||
- id: 4.2.2
|
||||
text: "Ensure that the --authorization-mode argument is not set to AlwaysAllow (Automated)"
|
||||
audit: "/bin/ps -fC $kubeletbin"
|
||||
audit_config: "/bin/cat $kubeletconf"
|
||||
tests:
|
||||
test_items:
|
||||
- flag: --authorization-mode
|
||||
path: '{.authorization.mode}'
|
||||
compare:
|
||||
op: nothave
|
||||
value: AlwaysAllow
|
||||
remediation: |
|
||||
If using a Kubelet config file, edit the file to set `authorization.mode` to Webhook. If
|
||||
using executable arguments, edit the kubelet service file
|
||||
$kubeletsvc on each worker node and
|
||||
set the below parameter in KUBELET_AUTHZ_ARGS variable.
|
||||
--authorization-mode=Webhook
|
||||
Based on your system, restart the kubelet service. For example,
|
||||
systemctl daemon-reload
|
||||
systemctl restart kubelet.service
|
||||
scored: true
|
||||
|
||||
- id: 4.2.3
|
||||
text: "Ensure that the --client-ca-file argument is set as appropriate (Automated)"
|
||||
audit: "/bin/ps -fC $kubeletbin"
|
||||
audit_config: "/bin/cat $kubeletconf"
|
||||
tests:
|
||||
test_items:
|
||||
- flag: --client-ca-file
|
||||
path: '{.authentication.x509.clientCAFile}'
|
||||
remediation: |
|
||||
If using a Kubelet config file, edit the file to set `authentication.x509.clientCAFile` to
|
||||
the location of the client CA file.
|
||||
If using command line arguments, edit the kubelet service file
|
||||
$kubeletsvc on each worker node and
|
||||
set the below parameter in KUBELET_AUTHZ_ARGS variable.
|
||||
--client-ca-file=<path/to/client-ca-file>
|
||||
Based on your system, restart the kubelet service. For example,
|
||||
systemctl daemon-reload
|
||||
systemctl restart kubelet.service
|
||||
scored: true
|
||||
|
||||
- id: 4.2.4
|
||||
text: "Verify that if defined, the --read-only-port argument is set to 0 (Manual)"
|
||||
audit: "/bin/ps -fC $kubeletbin"
|
||||
audit_config: "/bin/cat $kubeletconf"
|
||||
tests:
|
||||
bin_op: or
|
||||
test_items:
|
||||
- flag: "--read-only-port"
|
||||
path: '{.readOnlyPort}'
|
||||
compare:
|
||||
op: eq
|
||||
value: 0
|
||||
- flag: "--read-only-port"
|
||||
path: '{.readOnlyPort}'
|
||||
set: false
|
||||
remediation: |
|
||||
If using a Kubelet config file, edit the file to set `readOnlyPort` to 0.
|
||||
If using command line arguments, edit the kubelet service file
|
||||
$kubeletsvc on each worker node and
|
||||
set the below parameter in KUBELET_SYSTEM_PODS_ARGS variable.
|
||||
--read-only-port=0
|
||||
Based on your system, restart the kubelet service. For example,
|
||||
systemctl daemon-reload
|
||||
systemctl restart kubelet.service
|
||||
scored: false
|
||||
|
||||
- id: 4.2.5
|
||||
text: "Ensure that the --streaming-connection-idle-timeout argument is not set to 0 (Manual)"
|
||||
audit: "/bin/ps -fC $kubeletbin"
|
||||
audit_config: "/bin/cat $kubeletconf"
|
||||
tests:
|
||||
test_items:
|
||||
- flag: --streaming-connection-idle-timeout
|
||||
path: '{.streamingConnectionIdleTimeout}'
|
||||
compare:
|
||||
op: noteq
|
||||
value: 0
|
||||
- flag: --streaming-connection-idle-timeout
|
||||
path: '{.streamingConnectionIdleTimeout}'
|
||||
set: false
|
||||
bin_op: or
|
||||
remediation: |
|
||||
If using a Kubelet config file, edit the file to set `streamingConnectionIdleTimeout` to a
|
||||
value other than 0.
|
||||
If using command line arguments, edit the kubelet service file
|
||||
$kubeletsvc on each worker node and
|
||||
set the below parameter in KUBELET_SYSTEM_PODS_ARGS variable.
|
||||
--streaming-connection-idle-timeout=5m
|
||||
Based on your system, restart the kubelet service. For example,
|
||||
systemctl daemon-reload
|
||||
systemctl restart kubelet.service
|
||||
scored: false
|
||||
|
||||
- id: 4.2.6
|
||||
text: "Ensure that the --make-iptables-util-chains argument is set to true (Automated)"
|
||||
audit: "/bin/ps -fC $kubeletbin"
|
||||
audit_config: "/bin/cat $kubeletconf"
|
||||
tests:
|
||||
test_items:
|
||||
- flag: --make-iptables-util-chains
|
||||
path: '{.makeIPTablesUtilChains}'
|
||||
compare:
|
||||
op: eq
|
||||
value: true
|
||||
- flag: --make-iptables-util-chains
|
||||
path: '{.makeIPTablesUtilChains}'
|
||||
set: false
|
||||
bin_op: or
|
||||
remediation: |
|
||||
If using a Kubelet config file, edit the file to set `makeIPTablesUtilChains` to `true`.
|
||||
If using command line arguments, edit the kubelet service file
|
||||
$kubeletsvc on each worker node and
|
||||
remove the --make-iptables-util-chains argument from the
|
||||
KUBELET_SYSTEM_PODS_ARGS variable.
|
||||
Based on your system, restart the kubelet service. For example:
|
||||
systemctl daemon-reload
|
||||
systemctl restart kubelet.service
|
||||
scored: true
|
||||
|
||||
- id: 4.2.7
|
||||
text: "Ensure that the --hostname-override argument is not set (Manual)"
|
||||
# This is one of those properties that can only be set as a command line argument.
|
||||
# To check if the property is set as expected, we need to parse the kubelet command
|
||||
# instead reading the Kubelet Configuration file.
|
||||
audit: "/bin/ps -fC $kubeletbin"
|
||||
tests:
|
||||
test_items:
|
||||
- flag: --hostname-override
|
||||
set: false
|
||||
remediation: |
|
||||
Edit the kubelet service file $kubeletsvc
|
||||
on each worker node and remove the --hostname-override argument from the
|
||||
KUBELET_SYSTEM_PODS_ARGS variable.
|
||||
Based on your system, restart the kubelet service. For example,
|
||||
systemctl daemon-reload
|
||||
systemctl restart kubelet.service
|
||||
scored: false
|
||||
|
||||
- id: 4.2.8
|
||||
text: "Ensure that the eventRecordQPS argument is set to a level which ensures appropriate event capture (Manual)"
|
||||
audit: "/bin/ps -fC $kubeletbin"
|
||||
audit_config: "/bin/cat $kubeletconf"
|
||||
tests:
|
||||
test_items:
|
||||
- flag: --event-qps
|
||||
path: '{.eventRecordQPS}'
|
||||
compare:
|
||||
op: gte
|
||||
value: 0
|
||||
- flag: --event-qps
|
||||
path: '{.eventRecordQPS}'
|
||||
set: false
|
||||
bin_op: or
|
||||
remediation: |
|
||||
If using a Kubelet config file, edit the file to set `eventRecordQPS` to an appropriate level.
|
||||
If using command line arguments, edit the kubelet service file
|
||||
$kubeletsvc on each worker node and
|
||||
set the below parameter in KUBELET_SYSTEM_PODS_ARGS variable.
|
||||
Based on your system, restart the kubelet service. For example,
|
||||
systemctl daemon-reload
|
||||
systemctl restart kubelet.service
|
||||
scored: false
|
||||
|
||||
- id: 4.2.9
|
||||
text: "Ensure that the --tls-cert-file and --tls-private-key-file arguments are set as appropriate (Manual)"
|
||||
audit: "/bin/ps -fC $kubeletbin"
|
||||
audit_config: "/bin/cat $kubeletconf"
|
||||
tests:
|
||||
test_items:
|
||||
- flag: --tls-cert-file
|
||||
path: '{.tlsCertFile}'
|
||||
- flag: --tls-private-key-file
|
||||
path: '{.tlsPrivateKeyFile}'
|
||||
remediation: |
|
||||
If using a Kubelet config file, edit the file to set `tlsCertFile` to the location
|
||||
of the certificate file to use to identify this Kubelet, and `tlsPrivateKeyFile`
|
||||
to the location of the corresponding private key file.
|
||||
If using command line arguments, edit the kubelet service file
|
||||
$kubeletsvc on each worker node and
|
||||
set the below parameters in KUBELET_CERTIFICATE_ARGS variable.
|
||||
--tls-cert-file=<path/to/tls-certificate-file>
|
||||
--tls-private-key-file=<path/to/tls-key-file>
|
||||
Based on your system, restart the kubelet service. For example,
|
||||
systemctl daemon-reload
|
||||
systemctl restart kubelet.service
|
||||
scored: false
|
||||
|
||||
- id: 4.2.10
|
||||
text: "Ensure that the --rotate-certificates argument is not set to false (Automated)"
|
||||
audit: "/bin/ps -fC $kubeletbin"
|
||||
audit_config: "/bin/cat $kubeletconf"
|
||||
tests:
|
||||
test_items:
|
||||
- flag: --rotate-certificates
|
||||
path: '{.rotateCertificates}'
|
||||
compare:
|
||||
op: eq
|
||||
value: true
|
||||
- flag: --rotate-certificates
|
||||
path: '{.rotateCertificates}'
|
||||
set: false
|
||||
bin_op: or
|
||||
remediation: |
|
||||
If using a Kubelet config file, edit the file to add the line `rotateCertificates` to `true` or
|
||||
remove it altogether to use the default value.
|
||||
If using command line arguments, edit the kubelet service file
|
||||
$kubeletsvc on each worker node and
|
||||
remove --rotate-certificates=false argument from the KUBELET_CERTIFICATE_ARGS
|
||||
variable.
|
||||
Based on your system, restart the kubelet service. For example,
|
||||
systemctl daemon-reload
|
||||
systemctl restart kubelet.service
|
||||
scored: true
|
||||
|
||||
- id: 4.2.11
|
||||
text: "Verify that the RotateKubeletServerCertificate argument is set to true (Manual)"
|
||||
audit: "/bin/ps -fC $kubeletbin"
|
||||
audit_config: "/bin/cat $kubeletconf"
|
||||
tests:
|
||||
bin_op: or
|
||||
test_items:
|
||||
- flag: RotateKubeletServerCertificate
|
||||
path: '{.featureGates.RotateKubeletServerCertificate}'
|
||||
compare:
|
||||
op: nothave
|
||||
value: false
|
||||
- flag: RotateKubeletServerCertificate
|
||||
path: '{.featureGates.RotateKubeletServerCertificate}'
|
||||
set: false
|
||||
remediation: |
|
||||
Edit the kubelet service file $kubeletsvc
|
||||
on each worker node and set the below parameter in KUBELET_CERTIFICATE_ARGS variable.
|
||||
--feature-gates=RotateKubeletServerCertificate=true
|
||||
Based on your system, restart the kubelet service. For example:
|
||||
systemctl daemon-reload
|
||||
systemctl restart kubelet.service
|
||||
scored: false
|
||||
|
||||
- id: 4.2.12
|
||||
text: "Ensure that the Kubelet only makes use of Strong Cryptographic Ciphers (Manual)"
|
||||
audit: "/bin/ps -fC $kubeletbin"
|
||||
audit_config: "/bin/cat $kubeletconf"
|
||||
tests:
|
||||
test_items:
|
||||
- flag: --tls-cipher-suites
|
||||
path: '{range .tlsCipherSuites[:]}{}{'',''}{end}'
|
||||
compare:
|
||||
op: valid_elements
|
||||
value: TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384
|
||||
remediation: |
|
||||
If using a Kubelet config file, edit the file to set `tlsCipherSuites` to
|
||||
TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384
|
||||
or to a subset of these values.
|
||||
If using executable arguments, edit the kubelet service file
|
||||
$kubeletsvc on each worker node and
|
||||
set the --tls-cipher-suites parameter as follows, or to a subset of these values.
|
||||
--tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384
|
||||
Based on your system, restart the kubelet service. For example:
|
||||
systemctl daemon-reload
|
||||
systemctl restart kubelet.service
|
||||
scored: false
|
||||
|
||||
- id: 4.2.13
|
||||
text: "Ensure that a limit is set on pod PIDs (Manual)"
|
||||
audit: "/bin/ps -fC $kubeletbin"
|
||||
audit_config: "/bin/cat $kubeletconf"
|
||||
tests:
|
||||
test_items:
|
||||
- flag: --pod-max-pids
|
||||
path: '{.podPidsLimit}'
|
||||
remediation: |
|
||||
Decide on an appropriate level for this parameter and set it,
|
||||
either via the --pod-max-pids command line parameter or the PodPidsLimit configuration file setting.
|
||||
scored: false
|
||||
|
||||
- id: 4.2.14
|
||||
text: "Ensure that the --seccomp-default parameter is set to true (Manual)"
|
||||
audit: "/bin/ps -fC $kubeletbin"
|
||||
audit_config: "/bin/cat $kubeletconf"
|
||||
tests:
|
||||
test_items:
|
||||
- flag: --seccomp-default
|
||||
path: '{.seccompDefault}'
|
||||
remediation: |
|
||||
Set the parameter, either via the --seccomp-default command line parameter or the
|
||||
seccompDefault configuration file setting.
|
||||
By default the seccomp profile is not enabled.
|
||||
scored: false
|
||||
|
||||
- id: 4.3
|
||||
text: "kube-proxy"
|
||||
checks:
|
||||
- id: 4.3.1
|
||||
text: "Ensure that the kube-proxy metrics service is bound to localhost (Automated)"
|
||||
audit: "/bin/ps -fC $proxybin"
|
||||
audit_config: "/bin/sh -c 'if test -e $proxykubeconfig; then cat $proxykubeconfig; fi'"
|
||||
tests:
|
||||
bin_op: or
|
||||
test_items:
|
||||
- flag: "--metrics-bind-address"
|
||||
path: '{.metricsBindAddress}'
|
||||
compare:
|
||||
op: has
|
||||
value: "127.0.0.1"
|
||||
- flag: "--metrics-bind-address"
|
||||
path: '{.metricsBindAddress}'
|
||||
set: false
|
||||
remediation: |
|
||||
Modify or remove any values which bind the metrics service to a non-localhost address.
|
||||
The default value is 127.0.0.1:10249.
|
||||
scored: true
|
||||
515
cfg/cis-1.12/policies.yaml
Normal file
515
cfg/cis-1.12/policies.yaml
Normal file
@@ -0,0 +1,515 @@
|
||||
---
|
||||
controls:
|
||||
version: "cis-1.12"
|
||||
id: 5
|
||||
text: "Kubernetes Policies"
|
||||
type: "policies"
|
||||
groups:
|
||||
- id: 5.1
|
||||
text: "RBAC and Service Accounts"
|
||||
checks:
|
||||
- id: 5.1.1
|
||||
text: "Ensure that the cluster-admin role is only used where required (Manual)"
|
||||
audit: |
|
||||
kubectl get clusterrolebindings -o=custom-columns=NAME:.metadata.name,ROLE:.roleRef.name,SUBJECT:.subjects[*].name --no-headers | while read -r role_name role_binding subject
|
||||
do
|
||||
if [[ "${role_name}" != "cluster-admin" && "${role_binding}" == "cluster-admin" ]]; then
|
||||
is_compliant="false"
|
||||
else
|
||||
is_compliant="true"
|
||||
fi;
|
||||
echo "**role_name: ${role_name} role_binding: ${role_binding} subject: ${subject} is_compliant: ${is_compliant}"
|
||||
done
|
||||
use_multiple_values: true
|
||||
tests:
|
||||
test_items:
|
||||
- flag: "is_compliant"
|
||||
compare:
|
||||
op: eq
|
||||
value: true
|
||||
remediation: |
|
||||
Identify all clusterrolebindings to the cluster-admin role. Check if they are used and
|
||||
if they need this role or if they could use a role with fewer privileges.
|
||||
Where possible, first bind users to a lower privileged role and then remove the
|
||||
clusterrolebinding to the cluster-admin role : kubectl delete clusterrolebinding [name]
|
||||
Condition: is_compliant is false if rolename is not cluster-admin and rolebinding is cluster-admin.
|
||||
scored: false
|
||||
|
||||
- id: 5.1.2
|
||||
text: "Minimize access to secrets (Manual)"
|
||||
audit: "echo \"canGetListWatchSecretsAsSystemAuthenticated: $(kubectl auth can-i get,list,watch secrets --all-namespaces --as=system:authenticated)\""
|
||||
tests:
|
||||
test_items:
|
||||
- flag: "canGetListWatchSecretsAsSystemAuthenticated"
|
||||
compare:
|
||||
op: eq
|
||||
value: no
|
||||
remediation: |
|
||||
Where possible, remove get, list and watch access to Secret objects in the cluster.
|
||||
scored: false
|
||||
|
||||
- id: 5.1.3
|
||||
text: "Minimize wildcard use in Roles and ClusterRoles (Manual)"
|
||||
audit: |
|
||||
# Check Roles
|
||||
kubectl get roles --all-namespaces -o custom-columns=ROLE_NAMESPACE:.metadata.namespace,ROLE_NAME:.metadata.name --no-headers | while read -r role_namespace role_name
|
||||
do
|
||||
role_rules=$(kubectl get role -n "${role_namespace}" "${role_name}" -o=json | jq -c '.rules')
|
||||
if echo "${role_rules}" | grep -q "\[\"\*\"\]"; then
|
||||
role_is_compliant="false"
|
||||
else
|
||||
role_is_compliant="true"
|
||||
fi;
|
||||
echo "**role_name: ${role_name} role_namespace: ${role_namespace} role_rules: ${role_rules} role_is_compliant: ${role_is_compliant}"
|
||||
done
|
||||
|
||||
# Check ClusterRoles
|
||||
kubectl get clusterroles -o custom-columns=CLUSTERROLE_NAME:.metadata.name --no-headers | while read -r clusterrole_name
|
||||
do
|
||||
clusterrole_rules=$(kubectl get clusterrole "${clusterrole_name}" -o=json | jq -c '.rules')
|
||||
if echo "${clusterrole_rules}" | grep -q "\[\"\*\"\]"; then
|
||||
clusterrole_is_compliant="false"
|
||||
else
|
||||
clusterrole_is_compliant="true"
|
||||
fi;
|
||||
echo "**clusterrole_name: ${clusterrole_name} clusterrole_rules: ${clusterrole_rules} clusterrole_is_compliant: ${clusterrole_is_compliant}"
|
||||
done
|
||||
use_multiple_values: true
|
||||
tests:
|
||||
bin_op: or
|
||||
test_items:
|
||||
- flag: "role_is_compliant"
|
||||
compare:
|
||||
op: eq
|
||||
value: true
|
||||
set: true
|
||||
- flag: "clusterrole_is_compliant"
|
||||
compare:
|
||||
op: eq
|
||||
value: true
|
||||
set: true
|
||||
remediation: |
|
||||
Where possible replace any use of wildcards ["*"] in roles and clusterroles with specific
|
||||
objects or actions.
|
||||
Condition: role_is_compliant is false if ["*"] is found in rules.
|
||||
Condition: clusterrole_is_compliant is false if ["*"] is found in rules.
|
||||
scored: false
|
||||
|
||||
- id: 5.1.4
|
||||
text: "Minimize access to create pods (Manual)"
|
||||
audit: |
|
||||
echo "canCreatePodsAsSystemAuthenticated: $(kubectl auth can-i create pods --all-namespaces --as=system:authenticated)"
|
||||
tests:
|
||||
test_items:
|
||||
- flag: "canCreatePodsAsSystemAuthenticated"
|
||||
compare:
|
||||
op: eq
|
||||
value: no
|
||||
remediation: |
|
||||
Where possible, remove create access to pod objects in the cluster.
|
||||
scored: false
|
||||
|
||||
- id: 5.1.5
|
||||
text: "Ensure that default service accounts are not actively used (Manual)"
|
||||
audit: |
|
||||
kubectl get serviceaccount --all-namespaces --field-selector metadata.name=default -o=json | jq -r '.items[] | " namespace: \(.metadata.namespace), kind: \(.kind), name: \(.metadata.name), automountServiceAccountToken: \(.automountServiceAccountToken | if . == null then "notset" else . end )"' | xargs -L 1
|
||||
use_multiple_values: true
|
||||
tests:
|
||||
test_items:
|
||||
- flag: "automountServiceAccountToken"
|
||||
compare:
|
||||
op: eq
|
||||
value: false
|
||||
set: true
|
||||
remediation: |
|
||||
Create explicit service accounts wherever a Kubernetes workload requires specific access
|
||||
to the Kubernetes API server.
|
||||
Modify the configuration of each default service account to include this value
|
||||
`automountServiceAccountToken: false`.
|
||||
scored: false
|
||||
|
||||
- id: 5.1.6
|
||||
text: "Ensure that Service Account Tokens are only mounted where necessary (Manual)"
|
||||
audit: |
|
||||
kubectl get pods --all-namespaces -o custom-columns=POD_NAMESPACE:.metadata.namespace,POD_NAME:.metadata.name,POD_SERVICE_ACCOUNT:.spec.serviceAccount,POD_IS_AUTOMOUNTSERVICEACCOUNTTOKEN:.spec.automountServiceAccountToken --no-headers | while read -r pod_namespace pod_name pod_service_account pod_is_automountserviceaccounttoken
|
||||
do
|
||||
# Retrieve automountServiceAccountToken's value for ServiceAccount and Pod, set to notset if null or <none>.
|
||||
svacc_is_automountserviceaccounttoken=$(kubectl get serviceaccount -n "${pod_namespace}" "${pod_service_account}" -o json | jq -r '.automountServiceAccountToken' | sed -e 's/<none>/notset/g' -e 's/null/notset/g')
|
||||
pod_is_automountserviceaccounttoken=$(echo "${pod_is_automountserviceaccounttoken}" | sed -e 's/<none>/notset/g' -e 's/null/notset/g')
|
||||
if [ "${svacc_is_automountserviceaccounttoken}" = "false" ] && ( [ "${pod_is_automountserviceaccounttoken}" = "false" ] || [ "${pod_is_automountserviceaccounttoken}" = "notset" ] ); then
|
||||
is_compliant="true"
|
||||
elif [ "${svacc_is_automountserviceaccounttoken}" = "true" ] && [ "${pod_is_automountserviceaccounttoken}" = "false" ]; then
|
||||
is_compliant="true"
|
||||
else
|
||||
is_compliant="false"
|
||||
fi
|
||||
echo "**namespace: ${pod_namespace} pod_name: ${pod_name} service_account: ${pod_service_account} pod_is_automountserviceaccounttoken: ${pod_is_automountserviceaccounttoken} svacc_is_automountServiceAccountToken: ${svacc_is_automountserviceaccounttoken} is_compliant: ${is_compliant}"
|
||||
done
|
||||
use_multiple_values: true
|
||||
tests:
|
||||
test_items:
|
||||
- flag: "is_compliant"
|
||||
compare:
|
||||
op: eq
|
||||
value: true
|
||||
remediation: |
|
||||
Modify the definition of ServiceAccounts and Pods which do not need to mount service
|
||||
account tokens to disable it, with `automountServiceAccountToken: false`.
|
||||
If both the ServiceAccount and the Pod's .spec specify a value for automountServiceAccountToken, the Pod spec takes precedence.
|
||||
Condition: Pod is_compliant to true when
|
||||
- ServiceAccount is automountServiceAccountToken: false and Pod is automountServiceAccountToken: false or notset
|
||||
- ServiceAccount is automountServiceAccountToken: true notset and Pod is automountServiceAccountToken: false
|
||||
scored: false
|
||||
|
||||
- id: 5.1.7
|
||||
text: "Avoid use of system:masters group (Manual)"
|
||||
type: "manual"
|
||||
remediation: |
|
||||
Remove the system:masters group from all users in the cluster.
|
||||
scored: false
|
||||
|
||||
- id: 5.1.8
|
||||
text: "Limit use of the Bind, Impersonate and Escalate permissions in the Kubernetes cluster (Manual)"
|
||||
type: "manual"
|
||||
remediation: |
|
||||
Where possible, remove the impersonate, bind and escalate rights from subjects.
|
||||
scored: false
|
||||
|
||||
- id: 5.1.9
|
||||
text: "Minimize access to create persistent volumes (Manual)"
|
||||
type: "manual"
|
||||
remediation: |
|
||||
Where possible, remove create access to PersistentVolume objects in the cluster.
|
||||
scored: false
|
||||
|
||||
- id: 5.1.10
|
||||
text: "Minimize access to the proxy sub-resource of nodes (Manual)"
|
||||
type: "manual"
|
||||
remediation: |
|
||||
Where possible, remove access to the proxy sub-resource of node objects.
|
||||
scored: false
|
||||
|
||||
- id: 5.1.11
|
||||
text: "Minimize access to the approval sub-resource of certificatesigningrequests objects (Manual)"
|
||||
type: "manual"
|
||||
remediation: |
|
||||
Where possible, remove access to the approval sub-resource of certificatesigningrequests objects.
|
||||
scored: false
|
||||
|
||||
- id: 5.1.12
|
||||
text: "Minimize access to webhook configuration objects (Manual)"
|
||||
type: "manual"
|
||||
remediation: |
|
||||
Where possible, remove access to the validatingwebhookconfigurations or mutatingwebhookconfigurations objects
|
||||
scored: false
|
||||
|
||||
- id: 5.1.13
|
||||
text: "Minimize access to the service account token creation (Manual)"
|
||||
type: "manual"
|
||||
remediation: |
|
||||
Where possible, remove access to the token sub-resource of serviceaccount objects.
|
||||
scored: false
|
||||
|
||||
- id: 5.2
|
||||
text: "Pod Security Standards"
|
||||
checks:
|
||||
- id: 5.2.1
|
||||
text: "Ensure that the cluster has at least one active policy control mechanism in place (Manual)"
|
||||
type: "manual"
|
||||
remediation: |
|
||||
Ensure that either Pod Security Admission or an external policy control system is in place
|
||||
for every namespace which contains user workloads.
|
||||
scored: false
|
||||
|
||||
- id: 5.2.2
|
||||
text: "Minimize the admission of privileged containers (Manual)"
|
||||
audit: |
|
||||
kubectl get pods --all-namespaces -o custom-columns=POD_NAME:.metadata.name,POD_NAMESPACE:.metadata.namespace --no-headers | while read -r pod_name pod_namespace
|
||||
do
|
||||
# Retrieve container(s) for each Pod.
|
||||
kubectl get pod "${pod_name}" --namespace "${pod_namespace}" -o json | jq -c '.spec.containers[]' | while read -r container
|
||||
do
|
||||
# Retrieve container's name.
|
||||
container_name=$(echo ${container} | jq -r '.name')
|
||||
# Retrieve container's .securityContext.privileged value.
|
||||
container_privileged=$(echo ${container} | jq -r '.securityContext.privileged' | sed -e 's/null/notset/g')
|
||||
if [ "${container_privileged}" = "false" ] || [ "${container_privileged}" = "notset" ] ; then
|
||||
echo "***pod_name: ${pod_name} container_name: ${container_name} pod_namespace: ${pod_namespace} is_container_privileged: ${container_privileged} is_compliant: true"
|
||||
else
|
||||
echo "***pod_name: ${pod_name} container_name: ${container_name} pod_namespace: ${pod_namespace} is_container_privileged: ${container_privileged} is_compliant: false"
|
||||
fi
|
||||
done
|
||||
done
|
||||
use_multiple_values: true
|
||||
tests:
|
||||
test_items:
|
||||
- flag: "is_compliant"
|
||||
compare:
|
||||
op: eq
|
||||
value: true
|
||||
remediation: |
|
||||
Add policies to each namespace in the cluster which has user workloads to restrict the
|
||||
admission of privileged containers.
|
||||
Audit: the audit list all pods' containers to retrieve their .securityContext.privileged value.
|
||||
Condition: is_compliant is false if container's `.securityContext.privileged` is set to `true`.
|
||||
Default: by default, there are no restrictions on the creation of privileged containers.
|
||||
scored: false
|
||||
|
||||
- id: 5.2.3
|
||||
text: "Minimize the admission of containers wishing to share the host process ID namespace (Manual)"
|
||||
audit: |
|
||||
kubectl get pods --all-namespaces -o custom-columns=POD_NAME:.metadata.name,POD_NAMESPACE:.metadata.namespace --no-headers | while read -r pod_name pod_namespace
|
||||
do
|
||||
# Retrieve spec.hostPID for each pod.
|
||||
pod_hostpid=$(kubectl get pod "${pod_name}" --namespace "${pod_namespace}" -o jsonpath='{.spec.hostPID}' 2>/dev/null)
|
||||
if [ -z "${pod_hostpid}" ]; then
|
||||
pod_hostpid="false"
|
||||
echo "***pod_name: ${pod_name} pod_namespace: ${pod_namespace} is_pod_hostpid: ${pod_hostpid} is_compliant: true"
|
||||
else
|
||||
echo "***pod_name: ${pod_name} pod_namespace: ${pod_namespace} is_pod_hostpid: ${pod_hostpid} is_compliant: false"
|
||||
fi
|
||||
done
|
||||
use_multiple_values: true
|
||||
tests:
|
||||
test_items:
|
||||
- flag: "is_compliant"
|
||||
compare:
|
||||
op: eq
|
||||
value: true
|
||||
remediation: |
|
||||
Add policies to each namespace in the cluster which has user workloads to restrict the
|
||||
admission of `hostPID` containers.
|
||||
Audit: the audit retrieves each Pod' spec.hostPID.
|
||||
Condition: is_compliant is false if Pod's spec.hostPID is set to `true`.
|
||||
Default: by default, there are no restrictions on the creation of hostPID containers.
|
||||
scored: false
|
||||
|
||||
- id: 5.2.4
|
||||
text: "Minimize the admission of containers wishing to share the host IPC namespace (Manual)"
|
||||
audit: |
|
||||
kubectl get pods --all-namespaces -o custom-columns=POD_NAME:.metadata.name,POD_NAMESPACE:.metadata.namespace --no-headers | while read -r pod_name pod_namespace
|
||||
do
|
||||
# Retrieve spec.hostIPC for each pod.
|
||||
pod_hostipc=$(kubectl get pod "${pod_name}" --namespace "${pod_namespace}" -o jsonpath='{.spec.hostIPC}' 2>/dev/null)
|
||||
if [ -z "${pod_hostipc}" ]; then
|
||||
pod_hostipc="false"
|
||||
echo "***pod_name: ${pod_name} pod_namespace: ${pod_namespace} is_pod_hostipc: ${pod_hostipc} is_compliant: true"
|
||||
else
|
||||
echo "***pod_name: ${pod_name} pod_namespace: ${pod_namespace} is_pod_hostipc: ${pod_hostipc} is_compliant: false"
|
||||
fi
|
||||
done
|
||||
use_multiple_values: true
|
||||
tests:
|
||||
test_items:
|
||||
- flag: "is_compliant"
|
||||
compare:
|
||||
op: eq
|
||||
value: true
|
||||
remediation: |
|
||||
Add policies to each namespace in the cluster which has user workloads to restrict the
|
||||
admission of `hostIPC` containers.
|
||||
Audit: the audit retrieves each Pod' spec.IPC.
|
||||
Condition: is_compliant is false if Pod's spec.hostIPC is set to `true`.
|
||||
Default: by default, there are no restrictions on the creation of hostIPC containers.
|
||||
scored: false
|
||||
|
||||
- id: 5.2.5
|
||||
text: "Minimize the admission of containers wishing to share the host network namespace (Manual)"
|
||||
audit: |
|
||||
kubectl get pods --all-namespaces -o custom-columns=POD_NAME:.metadata.name,POD_NAMESPACE:.metadata.namespace --no-headers | while read -r pod_name pod_namespace
|
||||
do
|
||||
# Retrieve spec.hostNetwork for each pod.
|
||||
pod_hostnetwork=$(kubectl get pod "${pod_name}" --namespace "${pod_namespace}" -o jsonpath='{.spec.hostNetwork}' 2>/dev/null)
|
||||
if [ -z "${pod_hostnetwork}" ]; then
|
||||
pod_hostnetwork="false"
|
||||
echo "***pod_name: ${pod_name} pod_namespace: ${pod_namespace} is_pod_hostnetwork: ${pod_hostnetwork} is_compliant: true"
|
||||
else
|
||||
echo "***pod_name: ${pod_name} pod_namespace: ${pod_namespace} is_pod_hostnetwork: ${pod_hostnetwork} is_compliant: false"
|
||||
fi
|
||||
done
|
||||
use_multiple_values: true
|
||||
tests:
|
||||
test_items:
|
||||
- flag: "is_compliant"
|
||||
compare:
|
||||
op: eq
|
||||
value: true
|
||||
remediation: |
|
||||
Add policies to each namespace in the cluster which has user workloads to restrict the
|
||||
admission of `hostNetwork` containers.
|
||||
Audit: the audit retrieves each Pod' spec.hostNetwork.
|
||||
Condition: is_compliant is false if Pod's spec.hostNetwork is set to `true`.
|
||||
Default: by default, there are no restrictions on the creation of hostNetwork containers.
|
||||
scored: false
|
||||
|
||||
- id: 5.2.6
|
||||
text: "Minimize the admission of containers with allowPrivilegeEscalation (Manual)"
|
||||
audit: |
|
||||
kubectl get pods --all-namespaces -o custom-columns=POD_NAME:.metadata.name,POD_NAMESPACE:.metadata.namespace --no-headers | while read -r pod_name pod_namespace
|
||||
do
|
||||
# Retrieve container(s) for each Pod.
|
||||
kubectl get pod "${pod_name}" --namespace "${pod_namespace}" -o json | jq -c '.spec.containers[]' | while read -r container
|
||||
do
|
||||
# Retrieve container's name
|
||||
container_name=$(echo ${container} | jq -r '.name')
|
||||
# Retrieve container's .securityContext.allowPrivilegeEscalation
|
||||
container_allowprivesc=$(echo ${container} | jq -r '.securityContext.allowPrivilegeEscalation' | sed -e 's/null/notset/g')
|
||||
if [ "${container_allowprivesc}" = "false" ] || [ "${container_allowprivesc}" = "notset" ]; then
|
||||
echo "***pod_name: ${pod_name} container_name: ${container_name} pod_namespace: ${pod_namespace} is_container_allowprivesc: ${container_allowprivesc} is_compliant: true"
|
||||
else
|
||||
echo "***pod_name: ${pod_name} container_name: ${container_name} pod_namespace: ${pod_namespace} is_container_allowprivesc: ${container_allowprivesc} is_compliant: false"
|
||||
fi
|
||||
done
|
||||
done
|
||||
use_multiple_values: true
|
||||
tests:
|
||||
test_items:
|
||||
- flag: "is_compliant"
|
||||
compare:
|
||||
op: eq
|
||||
value: true
|
||||
remediation: |
|
||||
Add policies to each namespace in the cluster which has user workloads to restrict the
|
||||
admission of containers with `.securityContext.allowPrivilegeEscalation` set to `true`.
|
||||
Audit: the audit retrieves each Pod's container(s) `.securityContext.allowPrivilegeEscalation`.
|
||||
Condition: is_compliant is false if container's `.securityContext.allowPrivilegeEscalation` is set to `true`.
|
||||
Default: If notset, privilege escalation is allowed (default to true). However if PSP/PSA is used with a `restricted` profile,
|
||||
privilege escalation is explicitly disallowed unless configured otherwise.
|
||||
scored: false
|
||||
|
||||
- id: 5.2.7
|
||||
text: "Minimize the admission of root containers (Manual)"
|
||||
type: "manual"
|
||||
remediation: |
|
||||
Create a policy for each namespace in the cluster, ensuring that either `MustRunAsNonRoot`
|
||||
or `MustRunAs` with the range of UIDs not including 0, is set.
|
||||
scored: false
|
||||
|
||||
- id: 5.2.8
|
||||
text: "Minimize the admission of containers with the NET_RAW capability (Manual)"
|
||||
type: "manual"
|
||||
remediation: |
|
||||
Add policies to each namespace in the cluster which has user workloads to restrict the
|
||||
admission of containers with the `NET_RAW` capability.
|
||||
scored: false
|
||||
|
||||
- id: 5.2.9
|
||||
text: "Minimize the admission of containers with capabilities assigned (Manual)"
|
||||
type: "manual"
|
||||
remediation: |
|
||||
Review the use of capabilities in applications running on your cluster. Where a
|
||||
namespace contains applications which do not require any Linux capabilities to operate
|
||||
consider adding a policy which forbids the admission of containers which do not drop all
|
||||
capabilities.
|
||||
scored: false
|
||||
|
||||
- id: 5.2.10
|
||||
text: "Minimize the admission of Windows HostProcess containers (Manual)"
|
||||
type: "manual"
|
||||
remediation: |
|
||||
Add policies to each namespace in the cluster which has user workloads to restrict the
|
||||
admission of containers that have `.securityContext.windowsOptions.hostProcess` set to `true`.
|
||||
scored: false
|
||||
|
||||
- id: 5.2.11
|
||||
text: "Minimize the admission of HostPath volumes (Manual)"
|
||||
type: "manual"
|
||||
remediation: |
|
||||
Add policies to each namespace in the cluster which has user workloads to restrict the
|
||||
admission of containers with `hostPath` volumes.
|
||||
scored: false
|
||||
|
||||
- id: 5.2.12
|
||||
text: "Minimize the admission of containers which use HostPorts (Manual)"
|
||||
type: "manual"
|
||||
remediation: |
|
||||
Add policies to each namespace in the cluster which has user workloads to restrict the
|
||||
admission of containers which use `hostPort` sections.
|
||||
scored: false
|
||||
|
||||
- id: 5.3
|
||||
text: "Network Policies and CNI"
|
||||
checks:
|
||||
- id: 5.3.1
|
||||
text: "Ensure that the CNI in use supports NetworkPolicies (Manual)"
|
||||
type: "manual"
|
||||
remediation: |
|
||||
If the CNI plugin in use does not support network policies, consideration should be given to
|
||||
making use of a different plugin, or finding an alternate mechanism for restricting traffic
|
||||
in the Kubernetes cluster.
|
||||
scored: false
|
||||
|
||||
- id: 5.3.2
|
||||
text: "Ensure that all Namespaces have NetworkPolicies defined (Manual)"
|
||||
type: "manual"
|
||||
remediation: |
|
||||
Follow the documentation and create NetworkPolicy objects as you need them.
|
||||
scored: false
|
||||
|
||||
- id: 5.4
|
||||
text: "Secrets Management"
|
||||
checks:
|
||||
- id: 5.4.1
|
||||
text: "Prefer using Secrets as files over Secrets as environment variables (Manual)"
|
||||
type: "manual"
|
||||
remediation: |
|
||||
If possible, rewrite application code to read Secrets from mounted secret files, rather than
|
||||
from environment variables.
|
||||
scored: false
|
||||
|
||||
- id: 5.4.2
|
||||
text: "Consider external secret storage (Manual)"
|
||||
type: "manual"
|
||||
remediation: |
|
||||
Refer to the Secrets management options offered by your cloud provider or a third-party
|
||||
secrets management solution.
|
||||
scored: false
|
||||
|
||||
- id: 5.5
|
||||
text: "Extensible Admission Control"
|
||||
checks:
|
||||
- id: 5.5.1
|
||||
text: "Configure Image Provenance using ImagePolicyWebhook admission controller (Manual)"
|
||||
type: "manual"
|
||||
remediation: |
|
||||
Follow the Kubernetes documentation and setup image provenance.
|
||||
scored: false
|
||||
|
||||
- id: 5.6
|
||||
text: "General Policies"
|
||||
checks:
|
||||
- id: 5.6.1
|
||||
text: "Create administrative boundaries between resources using namespaces (Manual)"
|
||||
type: "manual"
|
||||
remediation: |
|
||||
Follow the documentation and create namespaces for objects in your deployment as you need
|
||||
them.
|
||||
scored: false
|
||||
|
||||
- id: 5.6.2
|
||||
text: "Ensure that the seccomp profile is set to docker/default in your Pod definitions (Manual)"
|
||||
type: "manual"
|
||||
remediation: |
|
||||
Use `securityContext` to enable the docker/default seccomp profile in your pod definitions.
|
||||
An example is as below:
|
||||
securityContext:
|
||||
seccompProfile:
|
||||
type: RuntimeDefault
|
||||
scored: false
|
||||
|
||||
- id: 5.6.3
|
||||
text: "Apply SecurityContext to your Pods and Containers (Manual)"
|
||||
type: "manual"
|
||||
remediation: |
|
||||
Follow the Kubernetes documentation and apply SecurityContexts to your Pods. For a
|
||||
suggested list of SecurityContexts, you may refer to the CIS Security Benchmark for Docker
|
||||
Containers.
|
||||
scored: false
|
||||
|
||||
- id: 5.6.4
|
||||
text: "The default namespace should not be used (Manual)"
|
||||
type: "manual"
|
||||
remediation: |
|
||||
Ensure that namespaces are created to allow for appropriate segregation of Kubernetes
|
||||
resources and that all new resources are created in a specific namespace.
|
||||
scored: false
|
||||
@@ -33,7 +33,7 @@ groups:
|
||||
Where possible, first bind users to a lower privileged role and then remove the
|
||||
clusterrolebinding to the cluster-admin role : kubectl delete clusterrolebinding [name]
|
||||
Condition: is_compliant is false if rolename is not cluster-admin and rolebinding is cluster-admin.
|
||||
scored: true
|
||||
scored: false
|
||||
|
||||
- id: 5.1.2
|
||||
text: "Minimize access to secrets (Automated)"
|
||||
@@ -46,7 +46,7 @@ groups:
|
||||
value: no
|
||||
remediation: |
|
||||
Where possible, remove get, list and watch access to Secret objects in the cluster.
|
||||
scored: true
|
||||
scored: false
|
||||
|
||||
- id: 5.1.3
|
||||
text: "Minimize wildcard use in Roles and ClusterRoles (Automated)"
|
||||
@@ -93,7 +93,7 @@ groups:
|
||||
objects or actions.
|
||||
Condition: role_is_compliant is false if ["*"] is found in rules.
|
||||
Condition: clusterrole_is_compliant is false if ["*"] is found in rules.
|
||||
scored: true
|
||||
scored: false
|
||||
|
||||
- id: 5.1.4
|
||||
text: "Minimize access to create pods (Automated)"
|
||||
@@ -107,7 +107,7 @@ groups:
|
||||
value: no
|
||||
remediation: |
|
||||
Where possible, remove create access to pod objects in the cluster.
|
||||
scored: true
|
||||
scored: false
|
||||
- id: 5.1.5
|
||||
text: "Ensure that default service accounts are not actively used (Automated)"
|
||||
audit: |
|
||||
@@ -125,7 +125,7 @@ groups:
|
||||
to the Kubernetes API server.
|
||||
Modify the configuration of each default service account to include this value
|
||||
`automountServiceAccountToken: false`.
|
||||
scored: true
|
||||
scored: false
|
||||
|
||||
- id: 5.1.6
|
||||
text: "Ensure that Service Account Tokens are only mounted where necessary (Automated)"
|
||||
@@ -158,7 +158,7 @@ groups:
|
||||
Condition: Pod is_compliant to true when
|
||||
- ServiceAccount is automountServiceAccountToken: false and Pod is automountServiceAccountToken: false or notset
|
||||
- ServiceAccount is automountServiceAccountToken: true notset and Pod is automountServiceAccountToken: false
|
||||
scored: true
|
||||
scored: false
|
||||
|
||||
- id: 5.1.7
|
||||
text: "Avoid use of system:masters group (Manual)"
|
||||
|
||||
@@ -283,20 +283,28 @@ version_mapping:
|
||||
"1.25": "cis-1.7"
|
||||
"1.26": "cis-1.8"
|
||||
"1.27": "cis-1.9"
|
||||
"1.28": "cis-1.9"
|
||||
"1.29": "cis-1.9"
|
||||
"1.30": "cis-1.10"
|
||||
"1.31": "cis-1.10"
|
||||
"1.28": "cis-1.10"
|
||||
"1.29": "cis-1.11"
|
||||
"1.30": "cis-1.11"
|
||||
"1.31": "cis-1.11"
|
||||
"1.32": "cis-1.12"
|
||||
"1.33": "cis-1.12"
|
||||
"1.34": "cis-1.12"
|
||||
"eks-1.0.1": "eks-1.0.1"
|
||||
"eks-1.1.0": "eks-1.1.0"
|
||||
"eks-1.2.0": "eks-1.2.0"
|
||||
"eks-1.5.0": "eks-1.5.0"
|
||||
"eks-1.7.0": "eks-1.7.0"
|
||||
"eks-1.8.0": "eks-1.8.0"
|
||||
"gke-1.0": "gke-1.0"
|
||||
"gke-1.2.0": "gke-1.2.0"
|
||||
"gke-1.6.0": "gke-1.6.0"
|
||||
"gke-1.8.0": "gke-1.8.0"
|
||||
"ocp-3.10": "rh-0.7"
|
||||
"ocp-3.11": "rh-0.7"
|
||||
"ocp-4.0": "rh-1.0"
|
||||
"ocp-4.11": "rh-1.4"
|
||||
"ocp-4.13": "rh-1.8"
|
||||
"aks-1.0": "aks-1.0"
|
||||
"aks-1.7": "aks-1.7"
|
||||
"ack-1.0": "ack-1.0"
|
||||
@@ -310,6 +318,7 @@ version_mapping:
|
||||
"rke-cis-1.23": "rke-cis-1.23"
|
||||
"rke-cis-1.24": "rke-cis-1.24"
|
||||
"rke2-cis-1.7": "rke2-cis-1.7"
|
||||
"rke2-cis-1.8": "rke2-cis-1.8"
|
||||
"rke2-cis-1.23": "rke2-cis-1.23"
|
||||
"rke2-cis-1.24": "rke2-cis-1.24"
|
||||
|
||||
@@ -380,6 +389,18 @@ target_mapping:
|
||||
- "controlplane"
|
||||
- "etcd"
|
||||
- "policies"
|
||||
"cis-1.11":
|
||||
- "master"
|
||||
- "node"
|
||||
- "controlplane"
|
||||
- "etcd"
|
||||
- "policies"
|
||||
"cis-1.12":
|
||||
- "master"
|
||||
- "node"
|
||||
- "controlplane"
|
||||
- "etcd"
|
||||
- "policies"
|
||||
"gke-1.0":
|
||||
- "master"
|
||||
- "node"
|
||||
@@ -399,6 +420,12 @@ target_mapping:
|
||||
- "controlplane"
|
||||
- "policies"
|
||||
- "managedservices"
|
||||
"gke-1.8.0":
|
||||
- "master"
|
||||
- "node"
|
||||
- "controlplane"
|
||||
- "policies"
|
||||
- "managedservices"
|
||||
"eks-1.0.1":
|
||||
- "master"
|
||||
- "node"
|
||||
@@ -429,6 +456,12 @@ target_mapping:
|
||||
- "controlplane"
|
||||
- "policies"
|
||||
- "managedservices"
|
||||
"eks-1.8.0":
|
||||
- "master"
|
||||
- "node"
|
||||
- "controlplane"
|
||||
- "policies"
|
||||
- "managedservices"
|
||||
"rh-0.7":
|
||||
- "master"
|
||||
- "node"
|
||||
@@ -457,6 +490,18 @@ target_mapping:
|
||||
- "controlplane"
|
||||
- "policies"
|
||||
- "etcd"
|
||||
"rh-1.4":
|
||||
- "master"
|
||||
- "node"
|
||||
- "controlplane"
|
||||
- "policies"
|
||||
- "etcd"
|
||||
"rh-1.8":
|
||||
- "master"
|
||||
- "node"
|
||||
- "controlplane"
|
||||
- "policies"
|
||||
- "etcd"
|
||||
"eks-stig-kubernetes-v1r6":
|
||||
- "node"
|
||||
- "controlplane"
|
||||
@@ -516,6 +561,12 @@ target_mapping:
|
||||
- "controlplane"
|
||||
- "node"
|
||||
- "policies"
|
||||
"rke2-cis-1.8":
|
||||
- "master"
|
||||
- "etcd"
|
||||
- "controlplane"
|
||||
- "node"
|
||||
- "policies"
|
||||
"rke2-cis-1.23":
|
||||
- "master"
|
||||
- "etcd"
|
||||
|
||||
@@ -352,6 +352,10 @@ groups:
|
||||
compare:
|
||||
op: gte
|
||||
value: 0
|
||||
- flag: --event-qps
|
||||
path: '{.eventRecordQPS}'
|
||||
set: false
|
||||
bin_op: or
|
||||
remediation: |
|
||||
If using a Kubelet config file, edit the file to set eventRecordQPS: to an appropriate
|
||||
level.
|
||||
|
||||
9
cfg/eks-1.8.0/config.yaml
Normal file
9
cfg/eks-1.8.0/config.yaml
Normal file
@@ -0,0 +1,9 @@
|
||||
---
|
||||
## Version-specific settings that override the values in cfg/config.yaml
|
||||
## These settings are required if you are using the --asff option to report findings to AWS Security Hub
|
||||
## AWS account number is required.
|
||||
AWS_ACCOUNT: "<AWS_ACCT_NUMBER>"
|
||||
## AWS region is required.
|
||||
AWS_REGION: "<AWS_REGION>"
|
||||
## EKS Cluster ARN is required.
|
||||
CLUSTER_ARN: "<AWS_CLUSTER_ARN>"
|
||||
69
cfg/eks-1.8.0/controlplane.yaml
Normal file
69
cfg/eks-1.8.0/controlplane.yaml
Normal file
@@ -0,0 +1,69 @@
|
||||
---
|
||||
controls:
|
||||
version: "eks-1.8.0"
|
||||
id: 2
|
||||
text: "Control Plane Configuration"
|
||||
type: "controlplane"
|
||||
groups:
|
||||
- id: 2.1
|
||||
text: "Logging"
|
||||
checks:
|
||||
- id: 2.1.1
|
||||
text: "Enable audit Logs (Manual)"
|
||||
type: manual
|
||||
remediation: |
|
||||
From Console:
|
||||
1. For each EKS Cluster in each region;
|
||||
2. Go to 'Amazon EKS' > 'Clusters' > '' > 'Configuration' > 'Logging'.
|
||||
3. Click 'Manage logging'.
|
||||
4. Ensure that all options are toggled to 'Enabled'.
|
||||
API server: Enabled
|
||||
Audit: Enabled
|
||||
Authenticator: Enabled
|
||||
Controller manager: Enabled
|
||||
Scheduler: Enabled
|
||||
5. Click 'Save Changes'.
|
||||
|
||||
From CLI:
|
||||
# For each EKS Cluster in each region;
|
||||
aws eks update-cluster-config \
|
||||
--region '${REGION_CODE}' \
|
||||
--name '${CLUSTER_NAME}' \
|
||||
--logging '{"clusterLogging":[{"types":["api","audit","authenticator","controllerManager","scheduler"],"enabled":true}]}'
|
||||
scored: false
|
||||
|
||||
- id: 2.1.2
|
||||
text: "Ensure audit logs are collected and managed (Manual)"
|
||||
type: manual
|
||||
remediation: |
|
||||
Create or update the audit-policy.yaml to specify the audit logging configuration:
|
||||
apiVersion: audit.k8s.io/v1
|
||||
kind: Policy
|
||||
rules:
|
||||
- level: Metadata
|
||||
resources:
|
||||
- group: ""
|
||||
resources: ["pods"]
|
||||
Apply the audit policy configuration to the cluster:
|
||||
kubectl apply -f <path-to-audit-policy>.yaml
|
||||
Ensure audit logs are forwarded to a centralized logging system like CloudWatch, Elasticsearch, or another log management solution:
|
||||
kubectl create configmap cluster-audit-policy --from-file=audit-policy.yaml -n kube-system
|
||||
kubectl apply -f - <<EOF
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: audit-logging
|
||||
namespace: kube-system
|
||||
spec:
|
||||
containers:
|
||||
- name: audit-log-forwarder
|
||||
image: my-log-forwarder-image
|
||||
volumeMounts:
|
||||
- mountPath: /etc/kubernetes/audit
|
||||
name: audit-config
|
||||
volumes:
|
||||
- name: audit-config
|
||||
configMap:
|
||||
name: cluster-audit-policy
|
||||
EOF
|
||||
scored: false
|
||||
227
cfg/eks-1.8.0/managedservices.yaml
Normal file
227
cfg/eks-1.8.0/managedservices.yaml
Normal file
@@ -0,0 +1,227 @@
|
||||
---
|
||||
controls:
|
||||
version: "eks-1.8.0"
|
||||
id: 5
|
||||
text: "Managed Services"
|
||||
type: "managedservices"
|
||||
groups:
|
||||
- id: 5.1
|
||||
text: "Image Registry and Image Scanning"
|
||||
checks:
|
||||
- id: 5.1.1
|
||||
text: "Ensure Image Vulnerability Scanning using Amazon ECR image scanning or a third party provider (Manual)"
|
||||
type: "manual"
|
||||
remediation: |
|
||||
To utilize AWS ECR for Image scanning please follow the steps below:
|
||||
|
||||
To create a repository configured for scan on push (AWS CLI):
|
||||
aws ecr create-repository --repository-name $REPO_NAME --image-scanning-configuration scanOnPush=true --region $REGION_CODE
|
||||
|
||||
To edit the settings of an existing repository (AWS CLI):
|
||||
aws ecr put-image-scanning-configuration --repository-name $REPO_NAME --image-scanning-configuration scanOnPush=true --region $REGION_CODE
|
||||
|
||||
Use the following steps to start a manual image scan using the AWS Management Console.
|
||||
|
||||
1. Open the Amazon ECR console at https://console.aws.amazon.com/ecr/repositories.
|
||||
2. From the navigation bar, choose the Region to create your repository in.
|
||||
3. In the navigation pane, choose Repositories.
|
||||
4. On the Repositories page, choose the repository that contains the image to scan.
|
||||
5. On the Images page, select the image to scan and then choose Scan.
|
||||
scored: false
|
||||
|
||||
- id: 5.1.2
|
||||
text: "Minimize user access to Amazon ECR (Manual)"
|
||||
type: "manual"
|
||||
remediation: |
|
||||
Before you use IAM to manage access to Amazon ECR, you should understand what IAM features
|
||||
are available to use with Amazon ECR. To get a high-level view of how Amazon ECR and other
|
||||
AWS services work with IAM, see AWS Services That Work with IAM in the IAM User Guide.
|
||||
scored: false
|
||||
|
||||
- id: 5.1.3
|
||||
text: "Minimize cluster access to read-only for Amazon ECR (Manual)"
|
||||
type: "manual"
|
||||
remediation: |
|
||||
You can use your Amazon ECR images with Amazon EKS, but you need to satisfy the following prerequisites.
|
||||
|
||||
The Amazon EKS worker node IAM role (NodeInstanceRole) that you use with your worker nodes must possess
|
||||
the following IAM policy permissions for Amazon ECR.
|
||||
|
||||
{
|
||||
"Version": "2012-10-17",
|
||||
"Statement": [
|
||||
{
|
||||
"Effect": "Allow",
|
||||
"Action": [
|
||||
"ecr:BatchCheckLayerAvailability",
|
||||
"ecr:BatchGetImage",
|
||||
"ecr:GetDownloadUrlForLayer",
|
||||
"ecr:GetAuthorizationToken"
|
||||
],
|
||||
"Resource": "*"
|
||||
}
|
||||
]
|
||||
}
|
||||
scored: false
|
||||
|
||||
- id: 5.1.4
|
||||
text: "Minimize Container Registries to only those approved (Manual)"
|
||||
type: "manual"
|
||||
remediation: |
|
||||
To minimize AWS ECR container registries to only those approved, you can follow these steps:
|
||||
|
||||
1. Define your approval criteria: Determine the criteria that containers must meet to
|
||||
be considered approved. This can include factors such as security, compliance,
|
||||
compatibility, and other requirements.
|
||||
2. Identify all existing ECR registries: Identify all ECR registries that are currently
|
||||
being used in your organization.
|
||||
3. Evaluate ECR registries against approval criteria: Evaluate each ECR registry
|
||||
against your approval criteria to determine whether it should be approved or not.
|
||||
This can be done by reviewing the registry settings and configuration, as well as
|
||||
conducting security assessments and vulnerability scans.
|
||||
4. Establish policies and procedures: Establish policies and procedures that outline
|
||||
how ECR registries will be approved, maintained, and monitored. This should
|
||||
include guidelines for developers to follow when selecting a registry for their
|
||||
container images.
|
||||
5. Implement access controls: Implement access controls to ensure that only
|
||||
approved ECR registries are used to store and distribute container images. This
|
||||
can be done by setting up IAM policies and roles that restrict access to
|
||||
unapproved registries or create a whitelist of approved registries.
|
||||
6. Monitor and review: Continuously monitor and review the use of ECR registries
|
||||
to ensure that they continue to meet your approval criteria. This can include
|
||||
scored: false
|
||||
|
||||
- id: 5.2
|
||||
text: "Identity and Access Management (IAM)"
|
||||
checks:
|
||||
- id: 5.2.1
|
||||
text: "Prefer using dedicated Amazon EKS Service Accounts (Manual)"
|
||||
type: "manual"
|
||||
remediation: |
|
||||
With IAM roles for service accounts on Amazon EKS clusters, you can associate an
|
||||
IAM role with a Kubernetes service account. This service account can then provide
|
||||
AWS permissions to the containers in any pod that uses that service account. With this
|
||||
feature, you no longer need to provide extended permissions to the worker node IAM
|
||||
role so that pods on that node can call AWS APIs.
|
||||
Applications must sign their AWS API requests with AWS credentials. This feature
|
||||
provides a strategy for managing credentials for your applications, similar to the way
|
||||
that Amazon EC2 instance profiles provide credentials to Amazon EC2 instances.
|
||||
Instead of creating and distributing your AWS credentials to the containers or using the
|
||||
Amazon EC2 instance’s role, you can associate an IAM role with a Kubernetes service
|
||||
account. The applications in the pod’s containers can then use an AWS SDK or the
|
||||
AWS CLI to make API requests to authorized AWS services.
|
||||
|
||||
The IAM roles for service accounts feature provides the following benefits:
|
||||
|
||||
- Least privilege - By using the IAM roles for service accounts feature, you no
|
||||
longer need to provide extended permissions to the worker node IAM role so that
|
||||
pods on that node can call AWS APIs. You can scope IAM permissions to a
|
||||
service account, and only pods that use that service account have access to
|
||||
those permissions. This feature also eliminates the need for third-party solutions
|
||||
such as kiam or kube2iam.
|
||||
- Credential isolation - A container can only retrieve credentials for the IAM role
|
||||
that is associated with the service account to which it belongs. A container never
|
||||
has access to credentials that are intended for another container that belongs to
|
||||
another pod.
|
||||
- Audit-ability - Access and event logging is available through CloudTrail to help
|
||||
ensure retrospective auditing.
|
||||
scored: false
|
||||
|
||||
- id: 5.3
|
||||
text: "AWS EKS Key Management Service"
|
||||
checks:
|
||||
- id: 5.3.1
|
||||
text: "Ensure Kubernetes Secrets are encrypted using Customer Master Keys (CMKs) managed in AWS KMS (Manual)"
|
||||
type: "manual"
|
||||
remediation: |
|
||||
This process can only be performed during Cluster Creation.
|
||||
|
||||
Enable 'Secrets Encryption' during Amazon EKS cluster creation as described
|
||||
in the links within the 'References' section.
|
||||
scored: false
|
||||
|
||||
- id: 5.4
|
||||
text: "Cluster Networking"
|
||||
checks:
|
||||
- id: 5.4.1
|
||||
text: "Restrict Access to the Control Plane Endpoint (Manual)"
|
||||
type: "manual"
|
||||
remediation: |
|
||||
By enabling private endpoint access to the Kubernetes API server, all communication
|
||||
between your nodes and the API server stays within your VPC. You can also limit the IP
|
||||
addresses that can access your API server from the internet, or completely disable
|
||||
internet access to the API server.
|
||||
With this in mind, you can update your cluster accordingly using the AWS CLI to ensure
|
||||
that Private Endpoint Access is enabled.
|
||||
If you choose to also enable Public Endpoint Access then you should also configure a
|
||||
list of allowable CIDR blocks, resulting in restricted access from the internet. If you
|
||||
specify no CIDR blocks, then the public API server endpoint is able to receive and
|
||||
process requests from all IP addresses by defaulting to ['0.0.0.0/0'].
|
||||
For example, the following command would enable private access to the Kubernetes
|
||||
API as well as limited public access over the internet from a single IP address (noting
|
||||
the /32 CIDR suffix):
|
||||
aws eks update-cluster-config --region $AWS_REGION --name $CLUSTER_NAME --resources-vpc-config endpointPrivateAccess=true,publicAccessCidrs="203.0.113.5/32"
|
||||
|
||||
Note: The CIDR blocks specified cannot include reserved addresses.
|
||||
There is a maximum number of CIDR blocks that you can specify. For more information,
|
||||
see the EKS Service Quotas link in the references section.
|
||||
For more detailed information, see the EKS Cluster Endpoint documentation link in the
|
||||
references section.
|
||||
scored: false
|
||||
|
||||
- id: 5.4.2
|
||||
text: "Ensure clusters are created with Private Endpoint Enabled and Public Access Disabled (Manual)"
|
||||
type: "manual"
|
||||
remediation: |
|
||||
By enabling private endpoint access to the Kubernetes API server, all communication
|
||||
between your nodes and the API server stays within your VPC.
|
||||
With this in mind, you can update your cluster accordingly using the AWS CLI to ensure
|
||||
that Private Endpoint Access is enabled.
|
||||
For example, the following command would enable private access to the Kubernetes
|
||||
API and ensure that no public access is permitted:
|
||||
aws eks update-cluster-config --region $AWS_REGION --name $CLUSTER_NAME --resources-vpc-config endpointPrivateAccess=true,endpointPublicAccess=false
|
||||
|
||||
Note: For more detailed information, see the EKS Cluster Endpoint documentation link
|
||||
in the references section.
|
||||
scored: false
|
||||
|
||||
- id: 5.4.3
|
||||
text: "Ensure clusters are created with Private Nodes (Manual)"
|
||||
type: "manual"
|
||||
remediation: |
|
||||
aws eks update-cluster-config \
|
||||
--region region-code \
|
||||
--name my-cluster \
|
||||
--resources-vpc-config endpointPublicAccess=true,publicAccessCidrs="203.0.113.5/32",endpointPrivateAccess=true
|
||||
scored: false
|
||||
|
||||
- id: 5.4.4
|
||||
text: "Ensure Network Policy is Enabled and set as appropriate (Manual)"
|
||||
type: "manual"
|
||||
remediation: |
|
||||
Utilize Calico or other network policy engine to segment and isolate your traffic.
|
||||
scored: false
|
||||
|
||||
- id: 5.4.5
|
||||
text: "Encrypt traffic to HTTPS load balancers with TLS certificates (Manual)"
|
||||
type: "manual"
|
||||
remediation: |
|
||||
Your load balancer vendor can provide details on configuring HTTPS with TLS.
|
||||
scored: false
|
||||
|
||||
|
||||
- id: 5.5
|
||||
text: "Authentication and Authorization"
|
||||
checks:
|
||||
- id: 5.5.1
|
||||
text: "Manage Kubernetes RBAC users with AWS IAM Authenticator for Kubernetes or Upgrade to AWS CLI v1.16.156 or greater (Manual)"
|
||||
type: "manual"
|
||||
remediation: |
|
||||
Refer to the 'Managing users or IAM roles for your cluster' in Amazon EKS documentation.
|
||||
|
||||
Note: If using AWS CLI version 1.16.156 or later there is no need to install the AWS
|
||||
IAM Authenticator anymore.
|
||||
The relevant AWS CLI commands, depending on the use case, are:
|
||||
aws eks update-kubeconfig
|
||||
aws eks get-token
|
||||
scored: false
|
||||
6
cfg/eks-1.8.0/master.yaml
Normal file
6
cfg/eks-1.8.0/master.yaml
Normal file
@@ -0,0 +1,6 @@
|
||||
---
|
||||
controls:
|
||||
version: "eks-1.8.0"
|
||||
id: 1
|
||||
text: "Control Plane Components"
|
||||
type: "master"
|
||||
456
cfg/eks-1.8.0/node.yaml
Normal file
456
cfg/eks-1.8.0/node.yaml
Normal file
@@ -0,0 +1,456 @@
|
||||
---
|
||||
controls:
|
||||
version: "eks-1.8.0"
|
||||
id: 3
|
||||
text: "Worker Nodes"
|
||||
type: "node"
|
||||
groups:
|
||||
- id: 3.1
|
||||
text: "Worker Node Configuration Files"
|
||||
checks:
|
||||
- id: 3.1.1
|
||||
text: "Ensure that the kubeconfig file permissions are set to 644 or more restrictive (Automated)"
|
||||
audit: '/bin/sh -c ''if test -e $kubeletkubeconfig; then stat -c permissions=%a $kubeletkubeconfig; fi'' '
|
||||
tests:
|
||||
test_items:
|
||||
- flag: "permissions"
|
||||
compare:
|
||||
op: bitmask
|
||||
value: "644"
|
||||
remediation: |
|
||||
Run the below command (based on the file location on your system) on the each worker node.
|
||||
For example,
|
||||
chmod 644 $kubeletkubeconfig
|
||||
scored: true
|
||||
|
||||
- id: 3.1.2
|
||||
text: "Ensure that the kubelet kubeconfig file ownership is set to root:root (Automated)"
|
||||
audit: '/bin/sh -c ''if test -e $kubeletkubeconfig; then stat -c %U:%G $kubeletkubeconfig; fi'' '
|
||||
tests:
|
||||
test_items:
|
||||
- flag: root:root
|
||||
remediation: |
|
||||
Run the below command (based on the file location on your system) on the each worker node.
|
||||
For example,
|
||||
chown root:root $kubeletkubeconfig
|
||||
scored: true
|
||||
|
||||
- id: 3.1.3
|
||||
text: "Ensure that the kubelet configuration file has permissions set to 644 or more restrictive (Automated)"
|
||||
audit: '/bin/sh -c ''if test -e $kubeletconf; then stat -c permissions=%a $kubeletconf; fi'' '
|
||||
tests:
|
||||
test_items:
|
||||
- flag: "permissions"
|
||||
compare:
|
||||
op: bitmask
|
||||
value: "644"
|
||||
remediation: |
|
||||
Run the following command (using the config file location identified in the Audit step)
|
||||
chmod 644 $kubeletconf
|
||||
scored: true
|
||||
|
||||
- id: 3.1.4
|
||||
text: "Ensure that the kubelet configuration file ownership is set to root:root (Automated)"
|
||||
audit: '/bin/sh -c ''if test -e $kubeletconf; then stat -c %U:%G $kubeletconf; fi'' '
|
||||
tests:
|
||||
test_items:
|
||||
- flag: root:root
|
||||
remediation: |
|
||||
Run the following command (using the config file location identified in the Audit step)
|
||||
chown root:root $kubeletconf
|
||||
scored: true
|
||||
|
||||
- id: 3.2
|
||||
text: "Kubelet"
|
||||
checks:
|
||||
- id: 3.2.1
|
||||
text: "Ensure that the Anonymous Auth is Not Enabled (Automated)"
|
||||
audit: "/bin/ps -fC $kubeletbin"
|
||||
audit_config: "/bin/cat $kubeletconf"
|
||||
tests:
|
||||
test_items:
|
||||
- flag: "--anonymous-auth"
|
||||
path: '{.authentication.anonymous.enabled}'
|
||||
set: true
|
||||
compare:
|
||||
op: eq
|
||||
value: false
|
||||
remediation: |
|
||||
Remediation Method 1:
|
||||
If configuring via the Kubelet config file, you first need to locate the file.
|
||||
To do this, SSH to each node and execute the following command to find the kubelet
|
||||
process:
|
||||
ps -ef | grep kubelet
|
||||
The output of the above command provides details of the active kubelet process, from
|
||||
which we can see the location of the configuration file provided to the kubelet service
|
||||
with the --config argument. The file can be viewed with a command such as more or
|
||||
less, like so:
|
||||
sudo less /path/to/kubelet-config.json
|
||||
Disable Anonymous Authentication by setting the following parameter:
|
||||
"authentication": { "anonymous": { "enabled": false } }
|
||||
|
||||
Remediation Method 2.
|
||||
If using executable arguments, edit the kubelet service file on each worker node and
|
||||
ensure the below parameters are part of the KUBELET_ARGS variable string.
|
||||
For systems using systemd, such as the Amazon EKS Optimised Amazon Linux or
|
||||
Bottlerocket AMIs, then this file can be found at
|
||||
/etc/systemd/system/kubelet.service.d/10-kubelet-args.conf. Otherwise,
|
||||
you may need to look up documentation for your chosen operating system to determine
|
||||
which service manager is configured:
|
||||
--anonymous-auth=false
|
||||
|
||||
For Both Remediation Steps:
|
||||
Based on your system, restart the kubelet service and check the service status.
|
||||
The following example is for operating systems using systemd, such as the Amazon
|
||||
EKS Optimised Amazon Linux or Bottlerocket AMIs, and invokes the systemctl
|
||||
command. If systemctl is not available then you will need to look up documentation for
|
||||
your chosen operating system to determine which service manager is configured:
|
||||
systemctl daemon-reload
|
||||
systemctl restart kubelet.service
|
||||
systemctl status kubelet -l
|
||||
scored: true
|
||||
|
||||
- id: 3.2.2
|
||||
text: "Ensure that the --authorization-mode argument is not set to AlwaysAllow (Automated)"
|
||||
audit: "/bin/ps -fC $kubeletbin"
|
||||
audit_config: "/bin/cat $kubeletconf"
|
||||
tests:
|
||||
test_items:
|
||||
- flag: --authorization-mode
|
||||
path: '{.authorization.mode}'
|
||||
set: true
|
||||
compare:
|
||||
op: nothave
|
||||
value: AlwaysAllow
|
||||
remediation: |
|
||||
Remediation Method 1:
|
||||
If configuring via the Kubelet config file, you first need to locate the file.
|
||||
To do this, SSH to each node and execute the following command to find the kubelet
|
||||
process:
|
||||
ps -ef | grep kubelet
|
||||
The output of the above command provides details of the active kubelet process, from
|
||||
which we can see the location of the configuration file provided to the kubelet service
|
||||
with the --config argument. The file can be viewed with a command such as more or
|
||||
less, like so:
|
||||
sudo less /path/to/kubelet-config.json
|
||||
Enable Webhook Authentication by setting the following parameter:
|
||||
"authentication": { "webhook": { "enabled": true } }
|
||||
Next, set the Authorization Mode to Webhook by setting the following parameter:
|
||||
"authorization": { "mode": "Webhook }
|
||||
Finer detail of the authentication and authorization fields can be found in the
|
||||
Kubelet Configuration documentation.
|
||||
|
||||
Remediation Method 2:
|
||||
If using executable arguments, edit the kubelet service file on each worker node and
|
||||
ensure the below parameters are part of the KUBELET_ARGS variable string.
|
||||
For systems using systemd, such as the Amazon EKS Optimised Amazon Linux or
|
||||
Bottlerocket AMIs, then this file can be found at
|
||||
/etc/systemd/system/kubelet.service.d/10-kubelet-args.conf. Otherwise,
|
||||
you may need to look up documentation for your chosen operating system to determine
|
||||
which service manager is configured:
|
||||
--authentication-token-webhook
|
||||
--authorization-mode=Webhook
|
||||
|
||||
For Both Remediation Steps:
|
||||
Based on your system, restart the kubelet service and check the service status.
|
||||
The following example is for operating systems using systemd, such as the Amazon
|
||||
EKS Optimised Amazon Linux or Bottlerocket AMIs, and invokes the systemctl
|
||||
command. If systemctl is not available then you will need to look up documentation for
|
||||
your chosen operating system to determine which service manager is configured:
|
||||
systemctl daemon-reload
|
||||
systemctl restart kubelet.service
|
||||
systemctl status kubelet -l
|
||||
scored: true
|
||||
|
||||
- id: 3.2.3
|
||||
text: "Ensure that a Client CA File is Configured (Automated)"
|
||||
audit: "/bin/ps -fC $kubeletbin"
|
||||
audit_config: "/bin/cat $kubeletconf"
|
||||
tests:
|
||||
test_items:
|
||||
- flag: --client-ca-file
|
||||
path: '{.authentication.x509.clientCAFile}'
|
||||
set: true
|
||||
remediation: |
|
||||
Remediation Method 1:
|
||||
If configuring via the Kubelet config file, you first need to locate the file.
|
||||
To do this, SSH to each node and execute the following command to find the kubelet
|
||||
process:
|
||||
ps -ef | grep kubelet
|
||||
The output of the above command provides details of the active kubelet process, from
|
||||
which we can see the location of the configuration file provided to the kubelet service
|
||||
with the --config argument. The file can be viewed with a command such as more or
|
||||
less, like so:
|
||||
sudo less /path/to/kubelet-config.json
|
||||
Configure the client certificate authority file by setting the following parameter
|
||||
appropriately:
|
||||
"authentication": { "x509": {"clientCAFile": <path/to/client-ca-file> } }"
|
||||
|
||||
Remediation Method 2:
|
||||
If using executable arguments, edit the kubelet service file on each worker node and
|
||||
ensure the below parameters are part of the KUBELET_ARGS variable string.
|
||||
For systems using systemd, such as the Amazon EKS Optimised Amazon Linux or
|
||||
Bottlerocket AMIs, then this file can be found at
|
||||
/etc/systemd/system/kubelet.service.d/10-kubelet-args.conf. Otherwise,
|
||||
you may need to look up documentation for your chosen operating system to determine
|
||||
which service manager is configured:
|
||||
--client-ca-file=<path/to/client-ca-file>
|
||||
|
||||
For Both Remediation Steps:
|
||||
Based on your system, restart the kubelet service and check the service status.
|
||||
The following example is for operating systems using systemd, such as the Amazon
|
||||
EKS Optimised Amazon Linux or Bottlerocket AMIs, and invokes the systemctl
|
||||
command. If systemctl is not available then you will need to look up documentation for
|
||||
your chosen operating system to determine which service manager is configured:
|
||||
systemctl daemon-reload
|
||||
systemctl restart kubelet.service
|
||||
systemctl status kubelet -l
|
||||
scored: true
|
||||
|
||||
- id: 3.2.4
|
||||
text: "Ensure that the --read-only-port is disabled (Automated)"
|
||||
audit: "/bin/ps -fC $kubeletbin"
|
||||
audit_config: "/bin/cat $kubeletconf"
|
||||
tests:
|
||||
test_items:
|
||||
- flag: "--read-only-port"
|
||||
path: '{.readOnlyPort}'
|
||||
set: true
|
||||
compare:
|
||||
op: eq
|
||||
value: 0
|
||||
remediation: |
|
||||
If modifying the Kubelet config file, edit the kubelet-config.json file
|
||||
/etc/kubernetes/kubelet/kubelet-config.json and set the below parameter to 0
|
||||
"readOnlyPort": 0
|
||||
If using executable arguments, edit the kubelet service file
|
||||
/etc/systemd/system/kubelet.service.d/10-kubelet-args.conf on each
|
||||
worker node and add the below parameter at the end of the KUBELET_ARGS variable
|
||||
string.
|
||||
--read-only-port=0
|
||||
|
||||
Based on your system, restart the kubelet service and check status
|
||||
systemctl daemon-reload
|
||||
systemctl restart kubelet.service
|
||||
systemctl status kubelet -l
|
||||
scored: true
|
||||
|
||||
- id: 3.2.5
|
||||
text: "Ensure that the --streaming-connection-idle-timeout argument is not set to 0 (Automated)"
|
||||
audit: "/bin/ps -fC $kubeletbin"
|
||||
audit_config: "/bin/cat $kubeletconf"
|
||||
tests:
|
||||
test_items:
|
||||
- flag: --streaming-connection-idle-timeout
|
||||
path: '{.streamingConnectionIdleTimeout}'
|
||||
set: true
|
||||
compare:
|
||||
op: noteq
|
||||
value: 0
|
||||
- flag: --streaming-connection-idle-timeout
|
||||
path: '{.streamingConnectionIdleTimeout}'
|
||||
set: false
|
||||
bin_op: or
|
||||
remediation: |
|
||||
Remediation Method 1:
|
||||
If modifying the Kubelet config file, edit the kubelet-config.json file
|
||||
/etc/kubernetes/kubelet/kubelet-config.json and set the below parameter to a
|
||||
non-zero value in the format of #h#m#s
|
||||
"streamingConnectionIdleTimeout": "4h0m0s"
|
||||
You should ensure that the kubelet service file
|
||||
/etc/systemd/system/kubelet.service.d/10-kubelet-args.conf does not
|
||||
specify a --streaming-connection-idle-timeout argument because it would
|
||||
override the Kubelet config file.
|
||||
|
||||
Remediation Method 2:
|
||||
If using executable arguments, edit the kubelet service file
|
||||
/etc/systemd/system/kubelet.service.d/10-kubelet-args.conf on each
|
||||
worker node and add the below parameter at the end of the KUBELET_ARGS variable
|
||||
string.
|
||||
--streaming-connection-idle-timeout=4h0m0s
|
||||
|
||||
Remediation Method 3:
|
||||
If using the api configz endpoint consider searching for the status of
|
||||
"streamingConnectionIdleTimeout": by extracting the live configuration from the
|
||||
nodes running kubelet.
|
||||
**See detailed step-by-step configmap procedures in Reconfigure a Node's Kubelet in a
|
||||
Live Cluster, and then rerun the curl statement from audit process to check for kubelet
|
||||
configuration changes
|
||||
kubectl proxy --port=8001 &
|
||||
export HOSTNAME_PORT=localhost:8001 (example host and port number)
|
||||
export NODE_NAME=ip-192.168.31.226.ec2.internal (example node name from "kubectl get nodes")
|
||||
curl -sSL "http://${HOSTNAME_PORT}/api/v1/nodes/${NODE_NAME}/proxy/configz"
|
||||
|
||||
For all three remediations:
|
||||
Based on your system, restart the kubelet service and check status
|
||||
systemctl daemon-reload
|
||||
systemctl restart kubelet.service
|
||||
systemctl status kubelet -l
|
||||
scored: true
|
||||
|
||||
- id: 3.2.6
|
||||
text: "Ensure that the --make-iptables-util-chains argument is set to true (Automated)"
|
||||
audit: "/bin/ps -fC $kubeletbin"
|
||||
audit_config: "/bin/cat $kubeletconf"
|
||||
tests:
|
||||
test_items:
|
||||
- flag: --make-iptables-util-chains
|
||||
path: '{.makeIPTablesUtilChains}'
|
||||
set: true
|
||||
compare:
|
||||
op: eq
|
||||
value: true
|
||||
- flag: --make-iptables-util-chains
|
||||
path: '{.makeIPTablesUtilChains}'
|
||||
set: false
|
||||
bin_op: or
|
||||
remediation: |
|
||||
Remediation Method 1:
|
||||
If modifying the Kubelet config file, edit the kubelet-config.json file
|
||||
/etc/kubernetes/kubelet/kubelet-config.json and set the below parameter to
|
||||
true
|
||||
"makeIPTablesUtilChains": true
|
||||
Ensure that /etc/systemd/system/kubelet.service.d/10-kubelet-args.conf
|
||||
does not set the --make-iptables-util-chains argument because that would
|
||||
override your Kubelet config file.
|
||||
|
||||
Remediation Method 2:
|
||||
If using executable arguments, edit the kubelet service file
|
||||
/etc/systemd/system/kubelet.service.d/10-kubelet-args.conf on each
|
||||
worker node and add the below parameter at the end of the KUBELET_ARGS variable
|
||||
string.
|
||||
--make-iptables-util-chains:true
|
||||
|
||||
Remediation Method 3:
|
||||
If using the api configz endpoint consider searching for the status of
|
||||
"makeIPTablesUtilChains.: true by extracting the live configuration from the nodes
|
||||
running kubelet.
|
||||
**See detailed step-by-step configmap procedures in Reconfigure a Node's Kubelet in a
|
||||
Live Cluster, and then rerun the curl statement from audit process to check for kubelet
|
||||
configuration changes
|
||||
kubectl proxy --port=8001 &
|
||||
export HOSTNAME_PORT=localhost:8001 (example host and port number)
|
||||
export NODE_NAME=ip-192.168.31.226.ec2.internal (example node name from "kubectl get nodes")
|
||||
curl -sSL "http://${HOSTNAME_PORT}/api/v1/nodes/${NODE_NAME}/proxy/configz"
|
||||
|
||||
For all three remediations:
|
||||
Based on your system, restart the kubelet service and check status
|
||||
systemctl daemon-reload
|
||||
systemctl restart kubelet.service
|
||||
systemctl status kubelet -l
|
||||
scored: true
|
||||
|
||||
- id: 3.2.7
|
||||
text: "Ensure that the --eventRecordQPS argument is set to 0 or a level which ensures appropriate event capture (Automated)"
|
||||
audit: "/bin/ps -fC $kubeletbin"
|
||||
audit_config: "/bin/cat $kubeletconf"
|
||||
tests:
|
||||
test_items:
|
||||
- flag: --event-qps
|
||||
path: '{.eventRecordQPS}'
|
||||
set: true
|
||||
compare:
|
||||
op: gte
|
||||
value: 0
|
||||
- flag: --event-qps
|
||||
path: '{.eventRecordQPS}'
|
||||
set: false
|
||||
bin_op: or
|
||||
remediation: |
|
||||
If using a Kubelet config file, edit the file to set eventRecordQPS: to an appropriate
|
||||
level.
|
||||
If using command line arguments, edit the kubelet service file
|
||||
/etc/systemd/system/kubelet.service.d/10-kubeadm.conf on each worker node
|
||||
and set the below parameter in KUBELET_SYSTEM_PODS_ARGS variable.
|
||||
Based on your system, restart the kubelet service. For example:
|
||||
systemctl daemon-reload
|
||||
systemctl restart kubelet.service
|
||||
scored: true
|
||||
|
||||
- id: 3.2.8
|
||||
text: "Ensure that the --rotate-certificates argument is not present or is set to true (Automated)"
|
||||
audit: "/bin/ps -fC $kubeletbin"
|
||||
audit_config: "/bin/cat $kubeletconf"
|
||||
tests:
|
||||
test_items:
|
||||
- flag: --rotate-certificates
|
||||
path: '{.rotateCertificates}'
|
||||
compare:
|
||||
op: eq
|
||||
value: true
|
||||
- flag: --rotate-certificates
|
||||
path: '{.rotateCertificates}'
|
||||
set: false
|
||||
bin_op: or
|
||||
remediation: |
|
||||
Remediation Method 1:
|
||||
If modifying the Kubelet config file, edit the kubelet-config.json file
|
||||
/etc/kubernetes/kubelet/kubelet-config.json and set the below parameter to
|
||||
true
|
||||
"RotateCertificate":true
|
||||
Additionally, ensure that the kubelet service file
|
||||
/etc/systemd/system/kubelet.service.d/10-kubelet-args.conf does not set the --RotateCertificate
|
||||
executable argument to false because this would override the Kubelet
|
||||
config file.
|
||||
|
||||
Remediation Method 2:
|
||||
If using executable arguments, edit the kubelet service file
|
||||
/etc/systemd/system/kubelet.service.d/10-kubelet-args.conf on each
|
||||
worker node and add the below parameter at the end of the KUBELET_ARGS variable
|
||||
string.
|
||||
--rotate-certificates=true
|
||||
scored: true
|
||||
|
||||
- id: 3.2.9
|
||||
text: "Ensure that the RotateKubeletServerCertificate argument is set to true (Automated)"
|
||||
audit: "/bin/ps -fC $kubeletbin"
|
||||
audit_config: "/bin/cat $kubeletconf"
|
||||
tests:
|
||||
test_items:
|
||||
- flag: RotateKubeletServerCertificate
|
||||
path: '{.featureGates.RotateKubeletServerCertificate}'
|
||||
set: true
|
||||
compare:
|
||||
op: eq
|
||||
value: true
|
||||
remediation: |
|
||||
Remediation Method 1:
|
||||
If modifying the Kubelet config file, edit the kubelet-config.json file
|
||||
/etc/kubernetes/kubelet/kubelet-config.json and set the below parameter to
|
||||
true
|
||||
|
||||
"featureGates": {
|
||||
"RotateKubeletServerCertificate":true
|
||||
},
|
||||
|
||||
Additionally, ensure that the kubelet service file
|
||||
/etc/systemd/system/kubelet.service.d/10-kubelet-args.conf does not set
|
||||
the --rotate-kubelet-server-certificate executable argument to false because
|
||||
this would override the Kubelet config file.
|
||||
|
||||
Remediation Method 2:
|
||||
If using executable arguments, edit the kubelet service file
|
||||
/etc/systemd/system/kubelet.service.d/10-kubelet-args.conf on each
|
||||
worker node and add the below parameter at the end of the KUBELET_ARGS variable
|
||||
string.
|
||||
--rotate-kubelet-server-certificate=true
|
||||
|
||||
Remediation Method 3:
|
||||
If using the api configz endpoint consider searching for the status of
|
||||
"RotateKubeletServerCertificate": by extracting the live configuration from the
|
||||
nodes running kubelet.
|
||||
**See detailed step-by-step configmap procedures in Reconfigure a Node's Kubelet in a
|
||||
Live Cluster, and then rerun the curl statement from audit process to check for kubelet
|
||||
configuration changes
|
||||
kubectl proxy --port=8001 &
|
||||
export HOSTNAME_PORT=localhost:8001 (example host and port number)
|
||||
export NODE_NAME=ip-192.168.31.226.ec2.internal (example node name from "kubectl get nodes")
|
||||
curl -sSL "http://${HOSTNAME_PORT}/api/v1/nodes/${NODE_NAME}/proxy/configz"
|
||||
|
||||
For all three remediation methods:
|
||||
Restart the kubelet service and check status. The example below is for when using
|
||||
systemctl to manage services:
|
||||
systemctl daemon-reload
|
||||
systemctl restart kubelet.service
|
||||
systemctl status kubelet -l
|
||||
scored: true
|
||||
396
cfg/eks-1.8.0/policies.yaml
Normal file
396
cfg/eks-1.8.0/policies.yaml
Normal file
@@ -0,0 +1,396 @@
|
||||
---
|
||||
controls:
|
||||
version: "eks-1.8.0"
|
||||
id: 4
|
||||
text: "Policies"
|
||||
type: "policies"
|
||||
groups:
|
||||
- id: 4.1
|
||||
text: "RBAC and Service Accounts"
|
||||
checks:
|
||||
- id: 4.1.1
|
||||
text: "Ensure that the cluster-admin role is only used where required (Automated)"
|
||||
audit: |
|
||||
kubectl get clusterrolebindings -o json | jq -r '
|
||||
.items[]
|
||||
| select(.roleRef.name == "cluster-admin")
|
||||
| .subjects[]?
|
||||
| select(.kind != "Group" or (.name != "system:masters" and .name != "system:nodes"))
|
||||
| "FOUND_CLUSTER_ADMIN_BINDING"
|
||||
' || echo "NO_CLUSTER_ADMIN_BINDINGS"
|
||||
tests:
|
||||
test_items:
|
||||
- flag: "NO_CLUSTER_ADMIN_BINDINGS"
|
||||
set: true
|
||||
compare:
|
||||
op: eq
|
||||
value: "NO_CLUSTER_ADMIN_BINDINGS"
|
||||
remediation: |
|
||||
Identify all clusterrolebindings to the cluster-admin role. Check if they are used and if
|
||||
they need this role or if they could use a role with fewer privileges.
|
||||
Where possible, first bind users to a lower privileged role and then remove the
|
||||
clusterrolebinding to the cluster-admin role :
|
||||
kubectl delete clusterrolebinding [name]
|
||||
scored: true
|
||||
|
||||
- id: 4.1.2
|
||||
text: "Minimize access to secrets (Automated)"
|
||||
audit: |
|
||||
count=$(kubectl get roles --all-namespaces -o json | jq '
|
||||
.items[]
|
||||
| select(.rules[]?
|
||||
| (.resources[]? == "secrets")
|
||||
and ((.verbs[]? == "get") or (.verbs[]? == "list") or (.verbs[]? == "watch"))
|
||||
)' | wc -l)
|
||||
|
||||
if [ "$count" -gt 0 ]; then
|
||||
echo "SECRETS_ACCESS_FOUND"
|
||||
fi
|
||||
tests:
|
||||
test_items:
|
||||
- flag: "SECRETS_ACCESS_FOUND"
|
||||
set: false
|
||||
remediation: |
|
||||
Where possible, remove get, list and watch access to secret objects in the cluster.
|
||||
scored: true
|
||||
|
||||
- id: 4.1.3
|
||||
text: "Minimize wildcard use in Roles and ClusterRoles (Automated)"
|
||||
audit: |
|
||||
wildcards=$(kubectl get roles --all-namespaces -o json | jq '
|
||||
.items[] | select(
|
||||
.rules[]? | (.verbs[]? == "*" or .resources[]? == "*" or .apiGroups[]? == "*")
|
||||
)' | wc -l)
|
||||
|
||||
wildcards_clusterroles=$(kubectl get clusterroles -o json | jq '
|
||||
.items[] | select(
|
||||
.rules[]? | (.verbs[]? == "*" or .resources[]? == "*" or .apiGroups[]? == "*")
|
||||
)' | wc -l)
|
||||
|
||||
total=$((wildcards + wildcards_clusterroles))
|
||||
|
||||
if [ "$total" -gt 0 ]; then
|
||||
echo "wildcards_present"
|
||||
fi
|
||||
tests:
|
||||
test_items:
|
||||
- flag: wildcards_present
|
||||
set: false
|
||||
remediation: |
|
||||
Where possible replace any use of wildcards in clusterroles and roles with specific
|
||||
objects or actions.
|
||||
scored: true
|
||||
|
||||
- id: 4.1.4
|
||||
text: "Minimize access to create pods (Automated)"
|
||||
audit: |
|
||||
access=$(kubectl get roles,clusterroles -A -o json | jq '
|
||||
[.items[] |
|
||||
select(
|
||||
.rules[]? |
|
||||
(.resources[]? == "pods" and .verbs[]? == "create")
|
||||
)
|
||||
] | length')
|
||||
|
||||
if [ "$access" -gt 0 ]; then
|
||||
echo "pods_create_access"
|
||||
fi
|
||||
tests:
|
||||
test_items:
|
||||
- flag: pods_create_access
|
||||
set: false
|
||||
remediation: |
|
||||
Where possible, remove create access to pod objects in the cluster.
|
||||
scored: true
|
||||
|
||||
- id: 4.1.5
|
||||
text: "Ensure that default service accounts are not actively used. (Automated)"
|
||||
audit: |
|
||||
default_sa_count=$(kubectl get serviceaccounts --all-namespaces -o json | jq '
|
||||
[.items[] | select(.metadata.name == "default" and (.automountServiceAccountToken != false))] | length')
|
||||
if [ "$default_sa_count" -gt 0 ]; then
|
||||
echo "default_sa_not_auto_mounted"
|
||||
fi
|
||||
pods_using_default_sa=$(kubectl get pods --all-namespaces -o json | jq '
|
||||
[.items[] | select(.spec.serviceAccountName == "default")] | length')
|
||||
if [ "$pods_using_default_sa" -gt 0 ]; then
|
||||
echo "default_sa_used_in_pods"
|
||||
fi
|
||||
tests:
|
||||
test_items:
|
||||
- flag: default_sa_not_auto_mounted
|
||||
set: false
|
||||
- flag: default_sa_used_in_pods
|
||||
set: false
|
||||
remediation: |
|
||||
Create explicit service accounts wherever a Kubernetes workload requires specific
|
||||
access to the Kubernetes API server.
|
||||
Modify the configuration of each default service account to include this value
|
||||
automountServiceAccountToken: false
|
||||
Automatic remediation for the default account:
|
||||
kubectl patch serviceaccount default -p
|
||||
$'automountServiceAccountToken: false'
|
||||
scored: true
|
||||
|
||||
- id: 4.1.6
|
||||
text: "Ensure that Service Account Tokens are only mounted where necessary (Automated)"
|
||||
audit: |
|
||||
pods_with_token_mount=$(kubectl get pods --all-namespaces -o json | jq '
|
||||
[.items[] | select(.spec.automountServiceAccountToken != false)] | length')
|
||||
|
||||
if [ "$pods_with_token_mount" -gt 0 ]; then
|
||||
echo "automountServiceAccountToken"
|
||||
fi
|
||||
tests:
|
||||
test_items:
|
||||
- flag: automountServiceAccountToken
|
||||
set: false
|
||||
remediation: |
|
||||
Regularly review pod and service account objects in the cluster to ensure that the automountServiceAccountToken setting is false for pods and accounts that do not explicitly require API server access.
|
||||
scored: true
|
||||
|
||||
- id: 4.1.7
|
||||
text: "Cluster Access Manager API to streamline and enhance the management of access controls within EKS clusters (Manual)"
|
||||
type: "manual"
|
||||
remediation: |
|
||||
Log in to the AWS Management Console.
|
||||
Navigate to Amazon EKS and select your EKS cluster.
|
||||
|
||||
Go to the Access tab and click on "Manage Access" in the "Access Configuration section".
|
||||
Under Cluster Authentication Mode for Cluster Access settings.
|
||||
Click EKS API to change cluster will source authenticated IAM principals only from EKS access entry APIs.
|
||||
Click ConfigMap to change cluster will source authenticated IAM principals only from the aws-auth ConfigMap.
|
||||
Note: EKS API and ConfigMap must be selected during Cluster creation and cannot be changed once the Cluster is provisioned.
|
||||
scored: false
|
||||
|
||||
- id: 4.1.8
|
||||
text: "Limit use of the Bind, Impersonate and Escalate permissions in the Kubernetes cluster (Manual)"
|
||||
type: "manual"
|
||||
remediation: |
|
||||
Where possible, remove the impersonate, bind and escalate rights from subjects.
|
||||
scored: false
|
||||
|
||||
- id: 4.1.9
|
||||
text: "Minimize access to create PersistentVolume objects (Manual)"
|
||||
type: "manual"
|
||||
remediation: |
|
||||
Review the RBAC rules in the cluster and identify users, groups, or service accounts
|
||||
with create permissions on PersistentVolume resources.
|
||||
Where possible, remove or restrict create access to PersistentVolume objects to
|
||||
trusted administrators only.
|
||||
scored: false
|
||||
|
||||
- id: 4.1.10
|
||||
text: "Minimize access to the proxy sub-resource of Node objects (Manual)"
|
||||
type: "manual"
|
||||
remediation: |
|
||||
Review RBAC roles and bindings in the cluster to identify users, groups,
|
||||
or service accounts with access to the proxy sub-resource of Node objects.
|
||||
Where possible, remove or restrict access to the node proxy sub-resource
|
||||
to trusted administrators only.
|
||||
scored: false
|
||||
|
||||
- id: 4.1.11
|
||||
text: "Minimize access to webhook configuration objects (Manual)"
|
||||
type: "manual"
|
||||
remediation: |
|
||||
Review RBAC roles and bindings in the cluster to identify users, groups,
|
||||
or service accounts with access to validatingwebhookconfigurations or
|
||||
mutatingwebhookconfigurations objects. Where possible, remove or restrict
|
||||
access to these webhook configuration objects to trusted administrators only.
|
||||
scored: false
|
||||
|
||||
- id: 4.1.12
|
||||
text: "Minimize access to the service account token creation (Manual)"
|
||||
type: "manual"
|
||||
remediation: |
|
||||
Review RBAC roles and bindings in the cluster to identify users, groups,
|
||||
or service accounts with access to create the token sub-resource of
|
||||
serviceaccount objects. Where possible, remove or restrict access to
|
||||
token creation to trusted administrators only.
|
||||
scored: false
|
||||
|
||||
- id: 4.2
|
||||
text: "Pod Security Standards"
|
||||
checks:
|
||||
- id: 4.2.1
|
||||
text: "Minimize the admission of privileged containers (Automated)"
|
||||
audit: |
|
||||
kubectl get pods --all-namespaces -o json | \
|
||||
jq -r 'if any(.items[]?.spec.containers[]?; .securityContext?.privileged == true) then "PRIVILEGED_FOUND" else "NO_PRIVILEGED" end'
|
||||
tests:
|
||||
test_items:
|
||||
- flag: "NO_PRIVILEGED"
|
||||
set: true
|
||||
compare:
|
||||
op: eq
|
||||
value: "NO_PRIVILEGED"
|
||||
remediation: |
|
||||
Add policies to each namespace in the cluster which has user workloads to restrict the admission of privileged containers.
|
||||
To enable PSA for a namespace in your cluster, set the pod-security.kubernetes.io/enforce label with the policy value you want to enforce.
|
||||
kubectl label --overwrite ns NAMESPACE pod-security.kubernetes.io/enforce=restricted
|
||||
The above command enforces the restricted policy for the NAMESPACE namespace.
|
||||
You can also enable Pod Security Admission for all your namespaces. For example:
|
||||
kubectl label --overwrite ns --all pod-security.kubernetes.io/warn=baseline
|
||||
scored: true
|
||||
|
||||
- id: 4.2.2
|
||||
text: "Minimize the admission of containers wishing to share the host process ID namespace (Automated)"
|
||||
audit: |
|
||||
kubectl get pods --all-namespaces -o json | \
|
||||
jq -r 'if any(.items[]?; .spec.hostPID == true) then "HOSTPID_FOUND" else "NO_HOSTPID" end'
|
||||
tests:
|
||||
test_items:
|
||||
- flag: "NO_HOSTPID"
|
||||
set: true
|
||||
compare:
|
||||
op: eq
|
||||
value: "NO_HOSTPID"
|
||||
remediation: |
|
||||
Add policies to each namespace in the cluster which has user workloads to restrict the
|
||||
admission of hostPID containers.
|
||||
scored: true
|
||||
|
||||
- id: 4.2.3
|
||||
text: "Minimize the admission of containers wishing to share the host IPC namespace (Automated)"
|
||||
audit: |
|
||||
kubectl get pods --all-namespaces -o json | jq -r 'if any(.items[]?; .spec.hostIPC == true) then "HOSTIPC_FOUND" else "NO_HOSTIPC" end'
|
||||
tests:
|
||||
test_items:
|
||||
- flag: "NO_HOSTIPC"
|
||||
set: true
|
||||
compare:
|
||||
op: eq
|
||||
value: "NO_HOSTIPC"
|
||||
remediation: |
|
||||
Add policies to each namespace in the cluster which has user workloads to restrict the
|
||||
admission of hostIPC containers.
|
||||
scored: true
|
||||
|
||||
- id: 4.2.4
|
||||
text: "Minimize the admission of containers wishing to share the host network namespace (Automated)"
|
||||
audit: |
|
||||
kubectl get pods --all-namespaces -o json | jq -r 'if any(.items[]?; .spec.hostNetwork == true) then "HOSTNETWORK_FOUND" else "NO_HOSTNETWORK" end'
|
||||
tests:
|
||||
test_items:
|
||||
- flag: "NO_HOSTNETWORK"
|
||||
set: true
|
||||
compare:
|
||||
op: eq
|
||||
value: "NO_HOSTNETWORK"
|
||||
remediation: |
|
||||
Add policies to each namespace in the cluster which has user workloads to restrict the
|
||||
admission of hostNetwork containers.
|
||||
scored: true
|
||||
|
||||
- id: 4.2.5
|
||||
text: "Minimize the admission of containers with allowPrivilegeEscalation (Automated)"
|
||||
audit: |
|
||||
kubectl get pods --all-namespaces -o json | \
|
||||
jq -r 'if any(.items[]?.spec.containers[]?; .securityContext?.allowPrivilegeEscalation == true) then "ALLOWPRIVILEGEESCALTION_FOUND" else "NO_ALLOWPRIVILEGEESCALATION" end'
|
||||
tests:
|
||||
test_items:
|
||||
- flag: "NO_ALLOWPRIVILEGEESCALATION"
|
||||
set: true
|
||||
compare:
|
||||
op: eq
|
||||
value: "NO_ALLOWPRIVILEGEESCALATION"
|
||||
remediation: |
|
||||
Add policies to each namespace in the cluster which has user workloads to restrict the
|
||||
admission of containers with .spec.allowPrivilegeEscalation set to true.
|
||||
scored: true
|
||||
|
||||
- id: 4.3
|
||||
text: "CNI Plugin"
|
||||
checks:
|
||||
- id: 4.3.1
|
||||
text: "Ensure CNI plugin supports network policies (Manual)"
|
||||
type: "manual"
|
||||
remediation: |
|
||||
As with RBAC policies, network policies should adhere to the policy of least privileged
|
||||
access. Start by creating a deny all policy that restricts all inbound and outbound traffic
|
||||
from a namespace or create a global policy using Calico.
|
||||
scored: false
|
||||
|
||||
- id: 4.3.2
|
||||
text: "Ensure that all Namespaces have Network Policies defined (Automated)"
|
||||
audit: |
|
||||
ns_without_np=$(kubectl get namespaces -o json | jq -r '.items[].metadata.name' | while read ns; do
|
||||
count=$(kubectl get networkpolicy -n $ns --no-headers 2>/dev/null | wc -l)
|
||||
if [ "$count" -eq 0 ]; then echo $ns; fi
|
||||
done)
|
||||
if [ -z "$ns_without_np" ]; then
|
||||
echo "ALL_NAMESPACES_HAVE_NETWORK_POLICIES"
|
||||
else
|
||||
echo "NAMESPACES_WITHOUT_NETWORK_POLICIES: $ns_without_np"
|
||||
fi
|
||||
tests:
|
||||
test_items:
|
||||
- flag: "ALL_NAMESPACES_HAVE_NETWORK_POLICIES"
|
||||
set: true
|
||||
compare:
|
||||
op: eq
|
||||
value: "ALL_NAMESPACES_HAVE_NETWORK_POLICIES"
|
||||
remediation: |
|
||||
Create at least one NetworkPolicy in each namespace to control and restrict traffic between pods as needed.
|
||||
scored: true
|
||||
|
||||
- id: 4.4
|
||||
text: "Secrets Management"
|
||||
checks:
|
||||
- id: 4.4.1
|
||||
text: "Prefer using secrets as files over secrets as environment variables (Automated)"
|
||||
audit: |
|
||||
result=$(kubectl get all --all-namespaces -o jsonpath='{range .items[?(@..secretKeyRef)]}{.metadata.namespace} {.kind} {.metadata.name}{"\n"}{end}')
|
||||
if [ -z "$result" ]; then
|
||||
echo "NO_SECRETS_AS_ENV_VARS"
|
||||
else
|
||||
echo "SECRETS_AS_ENV_VARS_FOUND: $result"
|
||||
fi
|
||||
tests:
|
||||
test_items:
|
||||
- flag: "NO_SECRETS_AS_ENV_VARS"
|
||||
set: true
|
||||
compare:
|
||||
op: eq
|
||||
value: "NO_SECRETS_AS_ENV_VARS"
|
||||
remediation: |
|
||||
If possible, rewrite application code to read secrets from mounted secret files, rather than
|
||||
from environment variables.
|
||||
scored: true
|
||||
|
||||
- id: 4.4.2
|
||||
text: "Consider external secret storage (Manual)"
|
||||
type: "manual"
|
||||
remediation: |
|
||||
Refer to the secrets management options offered by your cloud provider or a third-party
|
||||
secrets management solution.
|
||||
scored: false
|
||||
|
||||
- id: 4.5
|
||||
text: "General Policies"
|
||||
checks:
|
||||
- id: 4.5.1
|
||||
text: "Create administrative boundaries between resources using namespaces (Manual)"
|
||||
type: "manual"
|
||||
remediation: |
|
||||
Follow the documentation and create namespaces for objects in your deployment as you need
|
||||
them.
|
||||
scored: false
|
||||
|
||||
- id: 4.5.2
|
||||
text: "The default namespace should not be used (Automated)"
|
||||
audit: |
|
||||
output=$(kubectl get $(kubectl api-resources --verbs=list --namespaced=true -o name | paste -sd, -) --ignore-not-found -n default 2>/dev/null | grep -v "^kubernetes ")
|
||||
if [ -z "$output" ]; then
|
||||
echo "NO_USER_RESOURCES_IN_DEFAULT"
|
||||
else
|
||||
echo "USER_RESOURCES_IN_DEFAULT_FOUND: $output"
|
||||
fi
|
||||
tests:
|
||||
test_items:
|
||||
- flag: "NO_USER_RESOURCES_IN_DEFAULT"
|
||||
set: true
|
||||
remediation: |
|
||||
Create and use dedicated namespaces for resources instead of the default namespace. Move any user-defined objects out of the default namespace to improve resource segregation and RBAC control.
|
||||
scored: true
|
||||
9
cfg/gke-1.8.0/config.yaml
Normal file
9
cfg/gke-1.8.0/config.yaml
Normal file
@@ -0,0 +1,9 @@
|
||||
---
|
||||
## Version-specific settings that override the values in cfg/config.yaml
|
||||
|
||||
node:
|
||||
proxy:
|
||||
defaultkubeconfig: "/var/lib/kubelet/kubeconfig"
|
||||
|
||||
kubelet:
|
||||
defaultconf: "/etc/kubernetes/kubelet-config.yaml"
|
||||
6
cfg/gke-1.8.0/controlplane.yaml
Normal file
6
cfg/gke-1.8.0/controlplane.yaml
Normal file
@@ -0,0 +1,6 @@
|
||||
---
|
||||
controls:
|
||||
version: "gke-1.8.0"
|
||||
id: 2
|
||||
text: "Control Plane Configuration"
|
||||
type: "controlplane"
|
||||
719
cfg/gke-1.8.0/managedservices.yaml
Normal file
719
cfg/gke-1.8.0/managedservices.yaml
Normal file
@@ -0,0 +1,719 @@
|
||||
---
|
||||
controls:
|
||||
version: "gke-1.8.0"
|
||||
id: 5
|
||||
text: "Managed Services"
|
||||
type: "managedservices"
|
||||
groups:
|
||||
- id: 5.1
|
||||
text: "Image Registry and Image Scanning"
|
||||
checks:
|
||||
- id: 5.1.1
|
||||
text: "Ensure Image Vulnerability Scanning is enabled (Automated)"
|
||||
audit: "gcloud services list --enabled"
|
||||
type: "manual"
|
||||
remediation: |
|
||||
For Images Hosted in GCR:
|
||||
Using Google Cloud Console
|
||||
Go to GCR by visiting: https://console.cloud.google.com/gcr
|
||||
Select Settings and, under the Vulnerability Scanning heading, click the TURN ON button.
|
||||
Using Command Line
|
||||
gcloud services enable containeranalysis.googleapis.com
|
||||
For Images Hosted in AR:
|
||||
Using Google Cloud Console
|
||||
Go to GCR by visiting: https://console.cloud.google.com/artifacts
|
||||
Select Settings and, under the Vulnerability Scanning heading, click the ENABLE button.
|
||||
Using Command Line
|
||||
gcloud services enable containerscanning.googleapis.com
|
||||
scored: false
|
||||
|
||||
- id: 5.1.2
|
||||
text: "Minimize user access to Container Image repositories (Manual)"
|
||||
audit: |
|
||||
gcloud projects get-iam-policy <project_id> \
|
||||
--flatten="bindings[].members" \
|
||||
--format='table(bindings.members,bindings.role)' \
|
||||
--filter="bindings.role:roles/storage.admin OR bindings.role:roles/storage.objectAdmin OR bindings.role:roles/storage.objectCreator OR bindings.role:roles/storage.legacyBucketOwner OR bindings.role:roles/storage.legacyBucketWriter OR bindings.role:roles/storage.legacyObjectOwner"
|
||||
type: "manual"
|
||||
remediation: |
|
||||
For Images Hosted in AR:
|
||||
Using Command Line:
|
||||
|
||||
gcloud artifacts repositories set-iam-policy <repository-name> <path-to-policy-file> \
|
||||
--location <repository-location>
|
||||
|
||||
To learn how to configure policy files see: https://cloud.google.com/artifact-registry/docs/access-control#grant
|
||||
|
||||
For Images Hosted in GCR:
|
||||
Using Command Line:
|
||||
To change roles at the GCR bucket level:
|
||||
Firstly, run the following if read permissions are required:
|
||||
|
||||
gsutil iam ch <type>:<email_address>:objectViewer gs://artifacts.<project_id>.appspot.com
|
||||
|
||||
Then remove the excessively privileged role (Storage Admin / Storage Object
|
||||
Admin / Storage Object Creator) using:
|
||||
|
||||
gsutil iam ch -d <type>:<email_address>:<role> gs://artifacts.<project_id>.appspot.com
|
||||
|
||||
where:
|
||||
<type> can be one of the following:
|
||||
user, if the <email_address> is a Google account.
|
||||
serviceAccount, if <email_address> specifies a Service account.
|
||||
<email_address> can be one of the following:
|
||||
a Google account (for example, someone@example.com).
|
||||
a Cloud IAM service account.
|
||||
|
||||
To modify roles defined at the project level and subsequently inherited within the GCR
|
||||
bucket, or the Service Account User role, extract the IAM policy file, modify it
|
||||
accordingly and apply it using:
|
||||
|
||||
gcloud projects set-iam-policy <project_id> <policy_file>
|
||||
scored: false
|
||||
|
||||
- id: 5.1.3
|
||||
text: "Minimize cluster access to read-only for Container Image repositories (Manual)"
|
||||
audit: |
|
||||
gcloud projects get-iam-policy <project_id> \
|
||||
--flatten="bindings[].members" \
|
||||
--format='table(bindings.members,bindings.role)' \
|
||||
--filter="bindings.role:roles/storage.admin OR bindings.role:roles/storage.objectAdmin OR bindings.role:roles/storage.objectCreator OR bindings.role:roles/storage.legacyBucketOwner OR bindings.role:roles/storage.legacyBucketWriter OR bindings.role:roles/storage.legacyObjectOwner"
|
||||
type: "manual"
|
||||
remediation: |
|
||||
For Images Hosted in AR:
|
||||
Using Command Line:
|
||||
Add artifactregistry.reader role
|
||||
|
||||
gcloud artifacts repositories add-iam-policy-binding <repository> \
|
||||
--location=<repository-location> \
|
||||
--member='serviceAccount:<email-address>' \
|
||||
--role='roles/artifactregistry.reader'
|
||||
|
||||
Remove any roles other than artifactregistry.reader
|
||||
|
||||
gcloud artifacts repositories remove-iam-policy-binding <repository> \
|
||||
--location <repository-location> \
|
||||
--member='serviceAccount:<email-address>' \
|
||||
--role='<role-name>'
|
||||
|
||||
For Images Hosted in GCR:
|
||||
For an account explicitly granted to the bucket:
|
||||
Firstly add read access to the Kubernetes Service Account:
|
||||
|
||||
gsutil iam ch <type>:<email_address>:objectViewer gs://artifacts.<project_id>.appspot.com
|
||||
|
||||
where:
|
||||
<type> can be one of the following:
|
||||
user, if the <email_address> is a Google account.
|
||||
serviceAccount, if <email_address> specifies a Service account.
|
||||
<email_address> can be one of the following:
|
||||
a Google account (for example, someone@example.com).
|
||||
a Cloud IAM service account.
|
||||
|
||||
Then remove the excessively privileged role (Storage Admin / Storage Object
|
||||
Admin / Storage Object Creator) using:
|
||||
|
||||
gsutil iam ch -d <type>:<email_address>:<role> gs://artifacts.<project_id>.appspot.com
|
||||
|
||||
For an account that inherits access to the GCR Bucket through Project level
|
||||
permissions, modify the Projects IAM policy file accordingly, then upload it using:
|
||||
|
||||
gcloud projects set-iam-policy <project_id> <policy_file>
|
||||
scored: false
|
||||
|
||||
- id: 5.1.4
|
||||
text: "Ensure only trusted container images are used (Manual)"
|
||||
audit: |
|
||||
gcloud container clusters describe $CLUSTER_NAME --zone $COMPUTE_ZONE --format json | jq .binaryAuthorization
|
||||
type: "manual"
|
||||
remediation: |
|
||||
Using Command Line:
|
||||
Update the cluster to enable Binary Authorization:
|
||||
|
||||
gcloud container cluster update <cluster_name> --enable-binauthz
|
||||
|
||||
Create a Binary Authorization Policy using the Binary Authorization Policy Reference:
|
||||
https://cloud.google.com/binary-authorization/docs/policy-yaml-reference for guidance.
|
||||
|
||||
Import the policy file into Binary Authorization:
|
||||
|
||||
gcloud container binauthz policy import <yaml_policy>
|
||||
scored: false
|
||||
|
||||
- id: 5.2
|
||||
text: "Identity and Access Management (IAM)"
|
||||
checks:
|
||||
- id: 5.2.1
|
||||
text: "Ensure GKE clusters are not running using the Compute Engine default service account (Automated))"
|
||||
audit: |
|
||||
gcloud container node-pools describe $NODE_POOL --cluster $CLUSTER_NAME --zone $COMPUTE_ZONE --format json | jq '.config.serviceAccount'
|
||||
type: "manual"
|
||||
remediation: |
|
||||
Using Command Line:
|
||||
To create a minimally privileged service account:
|
||||
|
||||
gcloud iam service-accounts create <node_sa_name> \
|
||||
--display-name "GKE Node Service Account"
|
||||
export NODE_SA_EMAIL=gcloud iam service-accounts list \
|
||||
--format='value(email)' --filter='displayName:GKE Node Service Account'
|
||||
|
||||
Grant the following roles to the service account:
|
||||
|
||||
export PROJECT_ID=gcloud config get-value project
|
||||
gcloud projects add-iam-policy-binding <project_id> --member \
|
||||
serviceAccount:<node_sa_email> --role roles/monitoring.metricWriter
|
||||
gcloud projects add-iam-policy-binding <project_id> --member \
|
||||
serviceAccount:<node_sa_email> --role roles/monitoring.viewer
|
||||
gcloud projects add-iam-policy-binding <project_id> --member \
|
||||
serviceAccount:<node_sa_email> --role roles/logging.logWriter
|
||||
|
||||
To create a new Node pool using the Service account, run the following command:
|
||||
|
||||
gcloud container node-pools create <node_pool> \
|
||||
--service-account=<sa_name>@<project_id>.iam.gserviceaccount.com \
|
||||
--cluster=<cluster_name> --zone <compute_zone>
|
||||
|
||||
Note: The workloads will need to be migrated to the new Node pool, and the old node
|
||||
pools that use the default service account should be deleted to complete the
|
||||
remediation.
|
||||
scored: false
|
||||
|
||||
- id: 5.2.2
|
||||
text: "Prefer using dedicated GCP Service Accounts and Workload Identity (Manual)"
|
||||
audit: |
|
||||
gcloud container clusters describe $CLUSTER_NAME --zone $COMPUTE_ZONE --format json | jq .workloadIdentityConfig
|
||||
type: "manual"
|
||||
remediation: |
|
||||
Using Command Line:
|
||||
|
||||
gcloud container clusters update <cluster_name> --zone <cluster_zone> \
|
||||
--workload-pool <project_id>.svc.id.goog
|
||||
|
||||
Note that existing Node pools are unaffected. New Node pools default to --workload-
|
||||
metadata-from-node=GKE_METADATA_SERVER.
|
||||
|
||||
Then, modify existing Node pools to enable GKE_METADATA_SERVER:
|
||||
|
||||
gcloud container node-pools update <node_pool_name> --cluster <cluster_name> \
|
||||
--zone <cluster_zone> --workload-metadata=GKE_METADATA
|
||||
|
||||
Workloads may need to be modified in order for them to use Workload Identity as
|
||||
described within: https://cloud.google.com/kubernetes-engine/docs/how-to/workload-identity.
|
||||
Also consider the effects on the availability of hosted workloads as Node pools
|
||||
are updated. It may be more appropriate to create new Node Pools.
|
||||
scored: false
|
||||
|
||||
- id: 5.3
|
||||
text: "Cloud Key Management Service (Cloud KMS)"
|
||||
checks:
|
||||
- id: 5.3.1
|
||||
text: "Ensure Kubernetes Secrets are encrypted using keys managed in Cloud KMS (Automated)"
|
||||
audit: |
|
||||
gcloud container clusters describe $CLUSTER_NAME --zone $COMPUTE_ZONE --format json | jq '.databaseEncryption'
|
||||
type: "manual"
|
||||
remediation: |
|
||||
To create a key:
|
||||
Create a key ring:
|
||||
|
||||
gcloud kms keyrings create <ring_name> --location <location> --project \
|
||||
<key_project_id>
|
||||
|
||||
Create a key:
|
||||
|
||||
gcloud kms keys create <key_name> --location <location> --keyring <ring_name> \
|
||||
--purpose encryption --project <key_project_id>
|
||||
|
||||
Grant the Kubernetes Engine Service Agent service account the Cloud KMS
|
||||
CryptoKey Encrypter/Decrypter role:
|
||||
|
||||
gcloud kms keys add-iam-policy-binding <key_name> --location <location> \
|
||||
--keyring <ring_name> --member serviceAccount:<service_account_name> \
|
||||
--role roles/cloudkms.cryptoKeyEncrypterDecrypter --project <key_project_id>
|
||||
|
||||
To create a new cluster with Application-layer Secrets Encryption:
|
||||
|
||||
gcloud container clusters create <cluster_name> --cluster-version=latest \
|
||||
--zone <zone> \
|
||||
--database-encryption-key projects/<key_project_id>/locations/<location>/keyRings/<ring_name>/cryptoKeys/<key_name> \
|
||||
--project <cluster_project_id>
|
||||
|
||||
To enable on an existing cluster:
|
||||
|
||||
gcloud container clusters update <cluster_name> --zone <zone> \
|
||||
--database-encryption-key projects/<key_project_id>/locations/<location>/keyRings/<ring_name>/cryptoKeys/<key_name> \
|
||||
--project <cluster_project_id>
|
||||
scored: false
|
||||
|
||||
- id: 5.4
|
||||
text: "Node Metadata"
|
||||
checks:
|
||||
- id: 5.4.1
|
||||
text: "Ensure the GKE Metadata Server is Enabled (Automated)"
|
||||
audit: |
|
||||
gcloud container clusters describe $CLUSTER_NAME --zone $COMPUTE_ZONE --format json | jq '.nodePools[].config.workloadMetadataConfig'
|
||||
type: "manual"
|
||||
remediation: |
|
||||
Using Command Line:
|
||||
|
||||
gcloud container clusters update <cluster_name> --identity-namespace=<project_id>.svc.id.goog
|
||||
|
||||
Note that existing Node pools are unaffected. New Node pools default to --workload-
|
||||
metadata-from-node=GKE_METADATA_SERVER.
|
||||
|
||||
To modify an existing Node pool to enable GKE Metadata Server:
|
||||
|
||||
gcloud container node-pools update <node_pool_name> --cluster=<cluster_name> \
|
||||
--workload-metadata-from-node=GKE_METADATA_SERVER
|
||||
|
||||
Workloads may need modification in order for them to use Workload Identity as
|
||||
described within: https://cloud.google.com/kubernetes-engine/docs/how-to/workload-identity.
|
||||
scored: false
|
||||
|
||||
- id: 5.5
|
||||
text: "Node Configuration and Maintenance"
|
||||
checks:
|
||||
- id: 5.5.1
|
||||
text: "Ensure Container-Optimized OS (cos_containerd) is used for GKE node images (Automated)"
|
||||
audit: |
|
||||
gcloud container node-pools describe $NODE_POOL --cluster $CLUSTER_NAME --zone $COMPUTE_ZONE --format json | jq '.config.imageType'
|
||||
type: "manual"
|
||||
remediation: |
|
||||
Using Command Line:
|
||||
To set the node image to cos for an existing cluster's Node pool:
|
||||
|
||||
gcloud container clusters upgrade <cluster_name> --image-type cos_containerd \
|
||||
--zone <compute_zone> --node-pool <node_pool_name>
|
||||
scored: false
|
||||
|
||||
- id: 5.5.2
|
||||
text: "Ensure Node Auto-Repair is enabled for GKE nodes (Automated)"
|
||||
audit: |
|
||||
gcloud container node-pools describe $POOL_NAME --cluster $CLUSTER_NAME --zone $COMPUTE_ZONE --format json | jq '.management'
|
||||
type: "manual"
|
||||
remediation: |
|
||||
Using Command Line:
|
||||
To enable node auto-repair for an existing cluster's Node pool:
|
||||
|
||||
gcloud container node-pools update <node_pool_name> --cluster <cluster_name> \
|
||||
--zone <compute_zone> --enable-autorepair
|
||||
scored: false
|
||||
|
||||
- id: 5.5.3
|
||||
text: "Ensure Node Auto-Upgrade is enabled for GKE nodes (Automated)"
|
||||
audit: |
|
||||
gcloud container node-pools describe $POOL_NAME --cluster $CLUSTER_NAME --zone $COMPUTE_ZONE --format json | jq '.management'
|
||||
type: "manual"
|
||||
remediation: |
|
||||
Using Command Line:
|
||||
To enable node auto-upgrade for an existing cluster's Node pool, run the following
|
||||
command:
|
||||
|
||||
gcloud container node-pools update <node_pool_name> --cluster <cluster_name> \
|
||||
--zone <cluster_zone> --enable-autoupgrade
|
||||
scored: false
|
||||
|
||||
- id: 5.5.4
|
||||
text: "When creating New Clusters - Automate GKE version management using Release Channels (Automated)"
|
||||
audit: |
|
||||
gcloud container clusters describe $CLUSTER_NAME --zone $COMPUTE_ZONE --format json | jq .releaseChannel.channel
|
||||
type: "manual"
|
||||
remediation: |
|
||||
Using Command Line:
|
||||
Create a new cluster by running the following command:
|
||||
|
||||
gcloud container clusters create <cluster_name> --zone <cluster_zone> \
|
||||
--release-channel <release_channel>
|
||||
|
||||
where <release_channel> is stable or regular, according to requirements.
|
||||
scored: false
|
||||
|
||||
- id: 5.5.5
|
||||
text: "Ensure Shielded GKE Nodes are Enabled (Automated)"
|
||||
audit: |
|
||||
gcloud container clusters describe $CLUSTER_NAME --zone $COMPUTE_ZONE --format json | jq '.shieldedNodes'
|
||||
type: "manual"
|
||||
remediation: |
|
||||
Using Command Line:
|
||||
To migrate an existing cluster, the flag --enable-shielded-nodes needs to be
|
||||
specified in the cluster update command:
|
||||
|
||||
gcloud container clusters update <cluster_name> --zone <cluster_zone> \
|
||||
--enable-shielded-nodes
|
||||
scored: false
|
||||
|
||||
- id: 5.5.6
|
||||
text: "Ensure Integrity Monitoring for Shielded GKE Nodes is Enabled (Automated)"
|
||||
audit: |
|
||||
gcloud container node-pools describe $POOL_NAME --cluster $CLUSTER_NAME --zone $COMPUTE_ZONE --format json | jq .config.shieldedInstanceConfig
|
||||
type: "manual"
|
||||
remediation: |
|
||||
Using Command Line:
|
||||
To create a Node pool within the cluster with Integrity Monitoring enabled, run the
|
||||
following command:
|
||||
|
||||
gcloud container node-pools create <node_pool_name> --cluster <cluster_name> \
|
||||
--zone <compute_zone> --shielded-integrity-monitoring
|
||||
|
||||
Workloads from existing non-conforming Node pools will need to be migrated to the
|
||||
newly created Node pool, then delete non-conforming Node pools to complete the
|
||||
remediation
|
||||
scored: false
|
||||
|
||||
- id: 5.5.7
|
||||
text: "Ensure Secure Boot for Shielded GKE Nodes is Enabled (Automated)"
|
||||
audit: |
|
||||
gcloud container node-pools describe $POOL_NAME --cluster $CLUSTER_NAME --zone $COMPUTE_ZONE --format json | jq .config.shieldedInstanceConfig
|
||||
type: "manual"
|
||||
remediation: |
|
||||
Using Command Line:
|
||||
To create a Node pool within the cluster with Secure Boot enabled, run the following
|
||||
command:
|
||||
|
||||
gcloud container node-pools create <node_pool_name> --cluster <cluster_name> \
|
||||
--zone <compute_zone> --shielded-secure-boot
|
||||
|
||||
Workloads will need to be migrated from existing non-conforming Node pools to the
|
||||
newly created Node pool, then delete the non-conforming pools.
|
||||
scored: false
|
||||
|
||||
- id: 5.6
|
||||
text: "Cluster Networking"
|
||||
checks:
|
||||
- id: 5.6.1
|
||||
text: "Enable VPC Flow Logs and Intranode Visibility (Automated)"
|
||||
audit: |
|
||||
gcloud container clusters describe $CLUSTER_NAME --zone $COMPUTE_ZONE --format json | jq '.networkConfig.enableIntraNodeVisibility'
|
||||
type: "manual"
|
||||
remediation: |
|
||||
Using Command Line:
|
||||
1. Find the subnetwork name associated with the cluster.
|
||||
|
||||
gcloud container clusters describe <cluster_name> \
|
||||
--region <cluster_region> - -format json | jq '.subnetwork'
|
||||
|
||||
2. Update the subnetwork to enable VPC Flow Logs.
|
||||
gcloud compute networks subnets update <subnet_name> --enable-flow-logs
|
||||
scored: false
|
||||
|
||||
- id: 5.6.2
|
||||
text: "Ensure use of VPC-native clusters (Automated)"
|
||||
audit: |
|
||||
gcloud container clusters describe $CLUSTER_NAME --zone $COMPUTE_ZONE --format json | jq '.ipAllocationPolicy.useIpAliases'
|
||||
type: "manual"
|
||||
remediation: |
|
||||
Using Command Line:
|
||||
To enable Alias IP on a new cluster, run the following command:
|
||||
|
||||
gcloud container clusters create <cluster_name> --zone <compute_zone> \
|
||||
--enable-ip-alias
|
||||
scored: false
|
||||
|
||||
- id: 5.6.3
|
||||
text: "Ensure Control Plane Authorized Networks is Enabled (Automated)"
|
||||
audit: |
|
||||
gcloud container clusters describe $CLUSTER_NAME --zone $COMPUTE_ZONE --format json | jq '.masterAuthorizedNetworksConfig'
|
||||
type: "manual"
|
||||
remediation: |
|
||||
Using Command Line:
|
||||
To enable Control Plane Authorized Networks for an existing cluster, run the following
|
||||
command:
|
||||
|
||||
gcloud container clusters update <cluster_name> --zone <compute_zone> \
|
||||
--enable-master-authorized-networks
|
||||
|
||||
Along with this, you can list authorized networks using the --master-authorized-networks
|
||||
flag which contains a list of up to 20 external networks that are allowed to
|
||||
connect to your cluster's control plane through HTTPS. You provide these networks as
|
||||
a comma-separated list of addresses in CIDR notation (such as 90.90.100.0/24).
|
||||
scored: false
|
||||
|
||||
- id: 5.6.4
|
||||
text: "Ensure clusters are created with Private Endpoint Enabled and Public Access Disabled (Automated)"
|
||||
audit: |
|
||||
gcloud container clusters describe $CLUSTER_NAME --zone $COMPUTE_ZONE --format json | jq '.privateClusterConfig.enablePrivateEndpoint'
|
||||
type: "manual"
|
||||
remediation: |
|
||||
Using Command Line:
|
||||
Create a cluster with a Private Endpoint enabled and Public Access disabled by including
|
||||
the --enable-private-endpoint flag within the cluster create command:
|
||||
|
||||
gcloud container clusters create <cluster_name> --enable-private-endpoint
|
||||
|
||||
Setting this flag also requires the setting of --enable-private-nodes, --enable-ip-alias
|
||||
and --master-ipv4-cidr=<master_cidr_range>.
|
||||
scored: false
|
||||
|
||||
- id: 5.6.5
|
||||
text: "Ensure clusters are created with Private Nodes (Automated)"
|
||||
audit: |
|
||||
gcloud container clusters describe $CLUSTER_NAME --zone $COMPUTE_ZONE --format json | jq '.privateClusterConfig.enablePrivateNodes'
|
||||
type: "manual"
|
||||
remediation: |
|
||||
Using Command Line:
|
||||
To create a cluster with Private Nodes enabled, include the --enable-private-nodes
|
||||
flag within the cluster create command:
|
||||
|
||||
gcloud container clusters create <cluster_name> --enable-private-nodes
|
||||
|
||||
Setting this flag also requires the setting of --enable-ip-alias and
|
||||
--master-ipv4-cidr=<master_cidr_range>.
|
||||
scored: false
|
||||
|
||||
- id: 5.6.6
|
||||
text: "Consider firewalling GKE worker nodes (Manual)"
|
||||
audit: |
|
||||
gcloud compute instances describe $INSTANCE_NAME --zone $COMPUTE_ZONE --format json | jq '{tags: .tags.items[], serviceaccount:.serviceAccounts[].email, network: .networkInterfaces[].network}'
|
||||
type: "manual"
|
||||
remediation: |
|
||||
Using Command Line:
|
||||
Use the following command to generate firewall rules, setting the variables as
|
||||
appropriate:
|
||||
|
||||
gcloud compute firewall-rules create <firewall_rule_name> \
|
||||
--network <network> --priority <priority> --direction <direction> \
|
||||
--action <action> --target-tags <tag> \
|
||||
--target-service-accounts <service_account> \
|
||||
--source-ranges <source_cidr_range> --source-tags <source_tags> \
|
||||
--source-service-accounts <source_service_account> \
|
||||
--destination-ranges <destination_cidr_range> --rules <rules>
|
||||
scored: false
|
||||
|
||||
- id: 5.6.7
|
||||
text: "Ensure use of Google-managed SSL Certificates (Automated)"
|
||||
audit: |
|
||||
svc_json="$(kubectl get svc -A -o json 2>/dev/null || echo '{"items":[],"__err":"SVC_FORBIDDEN"}')"
|
||||
ing_json="$(kubectl get ingress -A -o json 2>/dev/null || echo '{"items":[],"__err":"INGRESS_FORBIDDEN"}')"
|
||||
mc_json ="$(kubectl get managedcertificates -A -o json 2>/dev/null || echo '{"items":[],"__err":"MC_FORBIDDEN"}')"
|
||||
|
||||
printf '%s\n%s\n%s\n' "$svc_json" "$ing_json" "$mc_json" \
|
||||
| jq -rs '
|
||||
(.[0] // {}) as $svcsRaw |
|
||||
(.[1] // {}) as $ingsRaw |
|
||||
(.[2] // {}) as $mcsRaw |
|
||||
|
||||
# If any list failed, surface an error and DO NOT print the success string
|
||||
if ($svcsRaw.__err or $ingsRaw.__err or $mcsRaw.__err) then
|
||||
"ERROR_KUBECTL_LIST:" +
|
||||
([
|
||||
($svcsRaw.__err // empty),
|
||||
($ingsRaw.__err // empty),
|
||||
($mcsRaw.__err // empty)
|
||||
] | join(","))
|
||||
else
|
||||
($svcsRaw.items // []) as $svcs |
|
||||
($ingsRaw.items // []) as $ings |
|
||||
($mcsRaw.items // []) as $mcs |
|
||||
|
||||
def trim: gsub("^\\s+|\\s+$";"");
|
||||
def hasmc($ns;$name): any($mcs[]?; .metadata.namespace==$ns and .metadata.name==$name);
|
||||
|
||||
([
|
||||
# Public Services (not eligible for managed certs)
|
||||
$svcs[]? | select(.spec.type=="LoadBalancer")
|
||||
| "FOUND_PUBLIC_LB_SERVICE:\(.metadata.namespace // "default"):\(.metadata.name)"
|
||||
] + [
|
||||
# Ingresses missing managed-certs annotation
|
||||
$ings[]? as $i
|
||||
| ($i.metadata.annotations."networking.gke.io/managed-certificates" // "") as $ann
|
||||
| select($ann=="")
|
||||
| "FOUND_INGRESS_WITHOUT_MANAGED_CERT:\($i.metadata.namespace // "default"):\($i.metadata.name)"
|
||||
] + [
|
||||
# Ingresses referencing non-existent ManagedCertificate(s)
|
||||
$ings[]? as $i
|
||||
| ($i.metadata.annotations."networking.gke.io/managed-certificates" // "") as $ann
|
||||
| select($ann!="")
|
||||
| ($i.metadata.namespace // "default") as $ns
|
||||
| ($ann | split(",") | map(trim) | map(select(length>0)) | .[]) as $mc
|
||||
| select(hasmc($ns;$mc) | not)
|
||||
| "FOUND_MISSING_MANAGED_CERT_RESOURCE:\($ns):\($i.metadata.name):cert=\($mc)"
|
||||
]) as $f
|
||||
| if ($f|length)>0
|
||||
then $f[]
|
||||
else "ALL_INGRESSES_USE_MANAGED_CERTS_AND_NO_PUBLIC_LB_SERVICES"
|
||||
end
|
||||
end
|
||||
'
|
||||
tests:
|
||||
test_items:
|
||||
- flag: "ALL_INGRESSES_USE_MANAGED_CERTS_AND_NO_PUBLIC_LB_SERVICES"
|
||||
set: true
|
||||
compare:
|
||||
op: eq
|
||||
value: "ALL_INGRESSES_USE_MANAGED_CERTS_AND_NO_PUBLIC_LB_SERVICES"
|
||||
remediation: |
|
||||
If services of type:LoadBalancer are discovered, consider replacing the Service with
|
||||
an Ingress.
|
||||
|
||||
To configure the Ingress and use Google-managed SSL certificates, follow the
|
||||
instructions as listed at: https://cloud.google.com/kubernetes-engine/docs/how-
|
||||
to/managed-certs.
|
||||
scored: true
|
||||
|
||||
- id: 5.7
|
||||
text: "Logging"
|
||||
checks:
|
||||
- id: 5.7.1
|
||||
text: "Ensure Logging and Cloud Monitoring is Enabled (Automated)"
|
||||
audit: |
|
||||
gcloud container clusters describe $CLUSTER_NAME --zone $COMPUTE_ZONE --format json | jq '.loggingService'
|
||||
type: "manual"
|
||||
remediation: |
|
||||
To enable Logging for an existing cluster, run the following command:
|
||||
gcloud container clusters update <cluster_name> --zone <compute_zone> \
|
||||
--logging=<components_to_be_logged>
|
||||
|
||||
See https://cloud.google.com/sdk/gcloud/reference/container/clusters/update#--logging
|
||||
for a list of available components for logging.
|
||||
|
||||
To enable Cloud Monitoring for an existing cluster, run the following command:
|
||||
gcloud container clusters update <cluster_name> --zone <compute_zone> \
|
||||
--monitoring=<components_to_be_logged>
|
||||
|
||||
See https://cloud.google.com/sdk/gcloud/reference/container/clusters/update#--
|
||||
monitoring for a list of available components for Cloud Monitoring.
|
||||
scored: false
|
||||
|
||||
- id: 5.7.2
|
||||
text: "Enable Linux auditd logging (Manual)"
|
||||
audit: |
|
||||
kubectl get daemonsets -A -o json | jq '.items[] | select (.spec.template.spec.containers[].image | contains ("gcr.io/stackdriver-agents/stackdriver-logging-agent"))'| jq '{name: .metadata.name, annotations: .metadata.annotations."kubernetes.io/description", namespace: .metadata.namespace, status: .status}'
|
||||
type: "manual"
|
||||
remediation: |
|
||||
Using Command Line:
|
||||
Download the example manifests:
|
||||
curl https://raw.githubusercontent.com/GoogleCloudPlatform/k8s-node-tools/master/os-audit/cos-auditd-logging.yaml > cos-auditd-logging.yaml
|
||||
|
||||
Edit the example manifests if needed. Then, deploy them:
|
||||
kubectl apply -f cos-auditd-logging.yaml
|
||||
|
||||
Verify that the logging Pods have started. If a different Namespace was defined in the
|
||||
manifests, replace cos-auditd with the name of the namespace being used:
|
||||
kubectl get pods --namespace=cos-auditd
|
||||
scored: false
|
||||
|
||||
- id: 5.8
|
||||
text: "Authentication and Authorization"
|
||||
checks:
|
||||
- id: 5.8.1
|
||||
text: "Ensure authentication using Client Certificates is Disabled (Automated)"
|
||||
audit: |
|
||||
gcloud container clusters describe $CLUSTER_NAME --zone $COMPUTE_ZONE --format json | jq '.masterAuth.clientKey'
|
||||
type: "manual"
|
||||
remediation: |
|
||||
Using Command Line:
|
||||
Create a new cluster without a Client Certificate:
|
||||
gcloud container clusters create [CLUSTER_NAME] \
|
||||
--no-issue-client-certificate
|
||||
scored: false
|
||||
|
||||
- id: 5.8.2
|
||||
text: "Manage Kubernetes RBAC users with Google Groups for GKE (Manual)"
|
||||
audit: |
|
||||
gcloud container clusters create <cluster_name> --security-group <security_group_name>
|
||||
type: "manual"
|
||||
remediation: |
|
||||
Using Command Line:
|
||||
Follow the G Suite Groups instructions at: https://cloud.google.com/kubernetes-
|
||||
engine/docs/how-to/role-based-access-control#google-groups-for-gke.
|
||||
|
||||
Then, create a cluster with:
|
||||
gcloud container clusters create <cluster_name> --security-group <security_group_name>
|
||||
|
||||
Finally create Roles, ClusterRoles, RoleBindings, and ClusterRoleBindings that
|
||||
reference the G Suite Groups.
|
||||
scored: false
|
||||
|
||||
- id: 5.8.3
|
||||
text: "Ensure Legacy Authorization (ABAC) is Disabled (Automated)"
|
||||
audit: |
|
||||
gcloud container clusters describe $CLUSTER_NAME --zone $COMPUTE_ZONE --format json | jq '.legacyAbac'
|
||||
type: "manual"
|
||||
remediation: |
|
||||
Using Command Line:
|
||||
To disable Legacy Authorization for an existing cluster, run the following command:
|
||||
gcloud container clusters update <cluster_name> --zone <compute_zone> \
|
||||
--no-enable-legacy-authorization
|
||||
scored: false
|
||||
|
||||
- id: 5.9
|
||||
text: "Storage"
|
||||
checks:
|
||||
- id: 5.9.1
|
||||
text: "Enable Customer-Managed Encryption Keys (CMEK) for GKE Persistent Disks (PD) (Manual)"
|
||||
audit: |
|
||||
gcloud compute disks describe $PV_NAME --zone $COMPUTE_ZONE --format json | jq '.diskEncryptionKey.kmsKeyName'
|
||||
type: "manual"
|
||||
remediation: |
|
||||
Using Command Line:
|
||||
Follow the instructions detailed at: https://cloud.google.com/kubernetes-engine/docs/how-to/using-cmek.
|
||||
scored: false
|
||||
|
||||
- id: 5.9.2
|
||||
text: "Enable Customer-Managed Encryption Keys (CMEK) for Boot Disks (Automated)"
|
||||
audit: |
|
||||
gcloud container node-pools describe $NODE_POOL --cluster $CLUSTER_NAME --zone $COMPUTE_ZONE
|
||||
type: "manual"
|
||||
remediation: |
|
||||
Using Command Line:
|
||||
Create a new node pool using customer-managed encryption keys for the node boot
|
||||
disk, of <disk_type> either pd-standard or pd-ssd:
|
||||
gcloud container node-pools create <cluster_name> --disk-type <disk_type> \
|
||||
--boot-disk-kms-key projects/<key_project_id>/locations/<location>/keyRings/<ring_name>/cryptoKeys/<key_name>
|
||||
|
||||
Create a cluster using customer-managed encryption keys for the node boot disk, of
|
||||
<disk_type> either pd-standard or pd-ssd:
|
||||
gcloud container clusters create <cluster_name> --disk-type <disk_type> \
|
||||
--boot-disk-kms-key projects/<key_project_id>/locations/<location>/keyRings/<ring_name>/cryptoKeys/<key_name>
|
||||
scored: false
|
||||
|
||||
- id: 5.10
|
||||
text: "Other Cluster Configurations"
|
||||
checks:
|
||||
- id: 5.10.1
|
||||
text: "Ensure Kubernetes Web UI is Disabled (Automated)"
|
||||
audit: |
|
||||
gcloud container clusters describe $CLUSTER_NAME --zone $COMPUTE_ZONE --format json | jq '.addonsConfig.kubernetesDashboard'
|
||||
type: "manual"
|
||||
remediation: |
|
||||
Using Command Line:
|
||||
To disable the Kubernetes Dashboard on an existing cluster, run the following
|
||||
command:
|
||||
gcloud container clusters update <cluster_name> --zone <zone> \
|
||||
--update-addons=KubernetesDashboard=DISABLED
|
||||
scored: false
|
||||
|
||||
- id: 5.10.2
|
||||
text: "Ensure that Alpha clusters are not used for production workloads (Automated)"
|
||||
audit: |
|
||||
gcloud container clusters describe $CLUSTER_NAME --zone $COMPUTE_ZONE --format json | jq '.enableKubernetesAlpha'
|
||||
type: "manual"
|
||||
remediation: |
|
||||
Using Command Line:
|
||||
Upon creating a new cluster
|
||||
gcloud container clusters create [CLUSTER_NAME] \
|
||||
--zone [COMPUTE_ZONE]
|
||||
|
||||
Do not use the --enable-kubernetes-alpha argument.
|
||||
scored: false
|
||||
|
||||
- id: 5.10.3
|
||||
text: "Consider GKE Sandbox for running untrusted workloads (Manual)"
|
||||
audit: |
|
||||
gcloud container node-pools describe $NODE_POOL --cluster $CLUSTER_NAME --zone $COMPUTE_ZONE --format json | jq '.config.sandboxConfig'
|
||||
type: "manual"
|
||||
remediation: |
|
||||
Using Command Line:
|
||||
To enable GKE Sandbox on an existing cluster, a new Node pool must be created,
|
||||
which can be done using:
|
||||
gcloud container node-pools create <node_pool_name> --zone <compute-zone> \
|
||||
--cluster <cluster_name> --image-type=cos_containerd --sandbox="type=gvisor"
|
||||
scored: false
|
||||
|
||||
- id: 5.10.5
|
||||
text: "Enable Security Posture (Manual)"
|
||||
audit: "gcloud container clusters --location describe"
|
||||
type: "manual"
|
||||
remediation: |
|
||||
Enable security posture via the UI, gCloud or API.
|
||||
https://cloud.google.com/kubernetes-engine/docs/how-to/protect-workload-configuration
|
||||
scored: false
|
||||
6
cfg/gke-1.8.0/master.yaml
Normal file
6
cfg/gke-1.8.0/master.yaml
Normal file
@@ -0,0 +1,6 @@
|
||||
---
|
||||
controls:
|
||||
version: "gke-1.8.0"
|
||||
id: 1
|
||||
text: "Control Plane Components"
|
||||
type: "master"
|
||||
65
cfg/gke-1.8.0/node.yaml
Normal file
65
cfg/gke-1.8.0/node.yaml
Normal file
@@ -0,0 +1,65 @@
|
||||
---
|
||||
controls:
|
||||
version: "gke-1.8.0"
|
||||
id: 3
|
||||
text: "Worker Nodes"
|
||||
type: "node"
|
||||
groups:
|
||||
- id: 3.1
|
||||
text: "Worker Node Configuration Files"
|
||||
checks:
|
||||
- id: 3.1.1
|
||||
text: "Ensure that the kubeconfig file permissions are set to 644 or more restrictive (Automated)"
|
||||
audit: '/bin/sh -c ''if test -e $proxykubeconfig; then stat -c permissions=%a $proxykubeconfig; fi'' '
|
||||
tests:
|
||||
test_items:
|
||||
- flag: "permissions"
|
||||
compare:
|
||||
op: bitmask
|
||||
value: "644"
|
||||
remediation: |
|
||||
Run the below command (based on the file location on your system) on each worker node.
|
||||
For example,
|
||||
|
||||
chmod 644 $proxykubeconfig
|
||||
scored: true
|
||||
|
||||
- id: 3.1.2
|
||||
text: "Ensure that the kubelet kubeconfig file ownership is set to root:root (Automated)"
|
||||
audit: '/bin/sh -c ''if test -e $proxykubeconfig; then stat -c %U:%G $proxykubeconfig; fi'' '
|
||||
tests:
|
||||
test_items:
|
||||
- flag: root:root
|
||||
remediation: |
|
||||
Run the below command (based on the file location on your system) on each worker node.
|
||||
For example:
|
||||
|
||||
chown root:root $proxykubeconfig
|
||||
scored: true
|
||||
|
||||
- id: 3.1.3
|
||||
text: "Ensure that the kubelet configuration file has permissions set to 644 (Automated)"
|
||||
audit: '/bin/sh -c ''if test -e $kubeletconf; then stat -c permissions=%a $kubeletconf; fi'' '
|
||||
tests:
|
||||
test_items:
|
||||
- flag: "permissions"
|
||||
compare:
|
||||
op: bitmask
|
||||
value: "644"
|
||||
remediation: |
|
||||
Run the following command (using the kubelet config file location)
|
||||
|
||||
chmod 644 $kubeletconf
|
||||
scored: true
|
||||
|
||||
- id: 3.1.4
|
||||
text: "Ensure that the kubelet configuration file ownership is set to root:root (Automated)"
|
||||
audit: '/bin/sh -c ''if test -e $kubeletconf; then stat -c %U:%G $kubeletconf; fi'' '
|
||||
tests:
|
||||
test_items:
|
||||
- flag: root:root
|
||||
remediation: |
|
||||
Run the following command (using the config file location identied in the Audit step)
|
||||
|
||||
chown root:root $kubeletconf
|
||||
scored: true
|
||||
473
cfg/gke-1.8.0/policies.yaml
Normal file
473
cfg/gke-1.8.0/policies.yaml
Normal file
@@ -0,0 +1,473 @@
|
||||
---
|
||||
controls:
|
||||
version: "gke-1.8.0"
|
||||
id: 4
|
||||
text: "Kubernetes Policies"
|
||||
type: "policies"
|
||||
groups:
|
||||
- id: 4.1
|
||||
text: "RBAC and Service Accounts"
|
||||
checks:
|
||||
- id: 4.1.1
|
||||
text: "Ensure that the cluster-admin role is only used where required (Automated)"
|
||||
audit: |
|
||||
kubectl get clusterrolebindings -o json | jq -r '
|
||||
[
|
||||
.items[]
|
||||
| select(.roleRef.name == "cluster-admin")
|
||||
| .subjects[]?
|
||||
| select(.kind != "Group" or .name != "system:masters")
|
||||
]
|
||||
| if length == 0
|
||||
then "NO_CLUSTER_ADMIN_BINDINGS"
|
||||
else "FOUND_CLUSTER_ADMIN_BINDING"
|
||||
end
|
||||
'
|
||||
tests:
|
||||
test_items:
|
||||
- flag: "NO_CLUSTER_ADMIN_BINDINGS"
|
||||
set: true
|
||||
compare:
|
||||
op: eq
|
||||
value: "NO_CLUSTER_ADMIN_BINDINGS"
|
||||
remediation: |
|
||||
Identify all ClusterRoleBindings to the "cluster-admin" role and review their subjects:
|
||||
|
||||
kubectl get clusterrolebindings -o=custom-columns=NAME:.metadata.name,ROLE:.roleRef.name,SUBJECTS:.subjects[*].name | grep cluster-admin
|
||||
|
||||
If non-system principals (users, groups, or service accounts) do not strictly require cluster-admin,
|
||||
rebind them to a least-privileged (Cluster)Role and then remove the excessive binding:
|
||||
|
||||
kubectl delete clusterrolebinding <binding-name>
|
||||
|
||||
Notes:
|
||||
- Do not modify bindings with the "system:" prefix that are required for core components.
|
||||
- Prefer assigning narrowly scoped Roles/ClusterRoles that grant only the permissions needed.
|
||||
scored: true
|
||||
|
||||
- id: 4.1.2
|
||||
text: "Minimize access to secrets (Automated)"
|
||||
audit: |
|
||||
count=$(kubectl get roles --all-namespaces -o json | jq '
|
||||
.items[]
|
||||
| select(.rules[]?
|
||||
| (.resources[]? == "secrets")
|
||||
and ((.verbs[]? == "get") or (.verbs[]? == "list") or (.verbs[]? == "watch"))
|
||||
)' | wc -l)
|
||||
|
||||
if [ "$count" -gt 0 ]; then
|
||||
echo "SECRETS_ACCESS_FOUND"
|
||||
fi
|
||||
tests:
|
||||
test_items:
|
||||
- flag: "SECRETS_ACCESS_FOUND"
|
||||
set: false
|
||||
remediation: |
|
||||
Where possible, remove get, list and watch access to Secret objects in the cluster.
|
||||
scored: true
|
||||
|
||||
- id: 4.1.3
|
||||
text: "Minimize wildcard use in Roles and ClusterRoles (Automated)"
|
||||
audit: |
|
||||
wildcards=$(kubectl get roles --all-namespaces -o json | jq '
|
||||
.items[] | select(
|
||||
.rules[]? | (.verbs[]? == "*" or .resources[]? == "*" or .apiGroups[]? == "*")
|
||||
)' | wc -l)
|
||||
|
||||
wildcards_clusterroles=$(kubectl get clusterroles -o json | jq '
|
||||
.items[] | select(
|
||||
.rules[]? | (.verbs[]? == "*" or .resources[]? == "*" or .apiGroups[]? == "*")
|
||||
)' | wc -l)
|
||||
|
||||
total=$((wildcards + wildcards_clusterroles))
|
||||
|
||||
if [ "$total" -gt 0 ]; then
|
||||
echo "wildcards_present"
|
||||
fi
|
||||
tests:
|
||||
test_items:
|
||||
- flag: wildcards_present
|
||||
set: false
|
||||
remediation: |
|
||||
Where possible replace any use of wildcards in clusterroles and roles with specific
|
||||
objects or actions.
|
||||
scored: true
|
||||
|
||||
- id: 4.1.4
|
||||
text: "Ensure that default service accounts are not actively used (Automated)"
|
||||
audit: |
|
||||
echo "🔹 Default Service Accounts with automountServiceAccountToken enabled:"
|
||||
default_sa_count=$(kubectl get serviceaccounts --all-namespaces -o json | jq '
|
||||
[.items[] | select(.metadata.name == "default" and (.automountServiceAccountToken != false))] | length')
|
||||
if [ "$default_sa_count" -gt 0 ]; then
|
||||
echo "default_sa_not_auto_mounted"
|
||||
fi
|
||||
|
||||
echo "\n🔹 Pods using default ServiceAccount:"
|
||||
pods_using_default_sa=$(kubectl get pods --all-namespaces -o json | jq '
|
||||
[.items[] | select(.spec.serviceAccountName == "default")] | length')
|
||||
if [ "$pods_using_default_sa" -gt 0 ]; then
|
||||
echo "default_sa_used_in_pods"
|
||||
fi
|
||||
tests:
|
||||
test_items:
|
||||
- flag: default_sa_not_auto_mounted
|
||||
set: false
|
||||
- flag: default_sa_used_in_pods
|
||||
set: false
|
||||
remediation: |
|
||||
Create explicit service accounts wherever a Kubernetes workload requires specific
|
||||
access to the Kubernetes API server.
|
||||
|
||||
Modify the configuration of each default service account to include this value
|
||||
|
||||
automountServiceAccountToken: false
|
||||
scored: true
|
||||
|
||||
- id: 4.1.5
|
||||
text: "Ensure that Service Account Tokens are only mounted where necessary (Automated)"
|
||||
audit: |
|
||||
echo "🔹 Pods with automountServiceAccountToken enabled:"
|
||||
pods_with_token_mount=$(kubectl get pods --all-namespaces -o json | jq '
|
||||
[.items[] | select(.spec.automountServiceAccountToken != false)] | length')
|
||||
|
||||
if [ "$pods_with_token_mount" -gt 0 ]; then
|
||||
echo "automountServiceAccountToken"
|
||||
fi
|
||||
tests:
|
||||
test_items:
|
||||
- flag: automountServiceAccountToken
|
||||
set: false
|
||||
remediation: |
|
||||
Modify the definition of pods and service accounts which do not need to mount service
|
||||
account tokens to disable it.
|
||||
scored: true
|
||||
|
||||
- id: 4.1.6
|
||||
text: "Avoid use of system:masters group (Automated)"
|
||||
audit: |
|
||||
found=0
|
||||
for csr in $(kubectl get csr -o name 2>/dev/null | sed 's|^.*/||'); do
|
||||
req=$(kubectl get csr "$csr" -o jsonpath='{.spec.request}' 2>/dev/null)
|
||||
[ -z "$req" ] && continue
|
||||
if echo "$req" | base64 -d 2>/dev/null | openssl req -noout -text 2>/dev/null | grep -q 'O = system:masters'; then
|
||||
conds=$(kubectl get csr "$csr" -o json | jq -r '[.status.conditions[]?.type] | join(",")')
|
||||
echo "FOUND_SYSTEM_MASTERS_CSR:${csr}:${conds:-NONE}"
|
||||
found=1
|
||||
fi
|
||||
done
|
||||
if [ "$found" -eq 0 ]; then
|
||||
echo "NO_SYSTEM_MASTERS_CREDENTIALS_FOUND"
|
||||
fi
|
||||
tests:
|
||||
test_items:
|
||||
- flag: "NO_SYSTEM_MASTERS_CREDENTIALS_FOUND"
|
||||
set: true
|
||||
compare:
|
||||
op: eq
|
||||
value: "NO_SYSTEM_MASTERS_CREDENTIALS_FOUND"
|
||||
remediation: |
|
||||
Remove the system:masters group from all users in the cluster.
|
||||
scored: true
|
||||
|
||||
- id: 4.1.7
|
||||
text: "Limit use of the Bind, Impersonate and Escalate permissions in the Kubernetes cluster (Manual)"
|
||||
type: "manual"
|
||||
remediation: |
|
||||
Where possible, remove the impersonate, bind and escalate rights from subjects.
|
||||
scored: false
|
||||
|
||||
- id: 4.1.8
|
||||
text: "Avoid bindings to system:anonymous (Automated)"
|
||||
audit: |
|
||||
# Flags any ClusterRoleBinding/RoleBinding that targets the user "system:anonymous".
|
||||
# Prints "NO_ANONYMOUS_BINDINGS" when none are found.
|
||||
(
|
||||
kubectl get clusterrolebindings -o json | jq -r '
|
||||
.items[]
|
||||
| select((.subjects | length) > 0)
|
||||
| select(any(.subjects[]?;
|
||||
.kind=="User" and .name=="system:anonymous"
|
||||
))
|
||||
| "FOUND_ANONYMOUS:ClusterRoleBinding:\(.metadata.name):ROLE=\(.roleRef.kind)/\(.roleRef.name)"
|
||||
';
|
||||
kubectl get rolebindings -A -o json | jq -r '
|
||||
.items[]
|
||||
| select((.subjects | length) > 0)
|
||||
| select(any(.subjects[]?;
|
||||
.kind=="User" and .name=="system:anonymous"
|
||||
))
|
||||
| "FOUND_ANONYMOUS:RoleBinding:\(.metadata.namespace):\(.metadata.name):ROLE=\(.roleRef.kind)/\(.roleRef.name)"
|
||||
'
|
||||
) | (grep -q '^FOUND_ANONYMOUS:' && cat || echo 'NO_ANONYMOUS_BINDINGS')
|
||||
tests:
|
||||
test_items:
|
||||
- flag: "NO_ANONYMOUS_BINDINGS"
|
||||
set: true
|
||||
compare:
|
||||
op: eq
|
||||
value: "NO_ANONYMOUS_BINDINGS"
|
||||
remediation: |
|
||||
Identify all clusterrolebindings and rolebindings to the user system:anonymous.
|
||||
Check if they are used and review the permissions associated with the binding using the
|
||||
commands in the Audit section above or refer to GKE documentation
|
||||
(https://cloud.google.com/kubernetes-engine/docs/best-practices/rbac#detect-prevent-default).
|
||||
|
||||
Strongly consider replacing unsafe bindings with an authenticated, user-defined group.
|
||||
Where possible, bind to non-default, user-defined groups with least-privilege roles.
|
||||
|
||||
If there are any unsafe bindings to the user system:anonymous, proceed to delete them
|
||||
after consideration for cluster operations with only necessary, safer bindings.
|
||||
|
||||
kubectl delete clusterrolebinding [CLUSTER_ROLE_BINDING_NAME]
|
||||
kubectl delete rolebinding [ROLE_BINDING_NAME] --namespace [ROLE_BINDING_NAMESPACE]
|
||||
scored: true
|
||||
|
||||
- id: 4.1.9
|
||||
text: "Avoid non-default bindings to system:unauthenticated (Automated)"
|
||||
audit: |
|
||||
# Flags any non-default binding to the group "system:unauthenticated".
|
||||
# Prints "NO_NON_DEFAULT_UNAUTH_BINDINGS" when none are found.
|
||||
(
|
||||
kubectl get clusterrolebindings -o json | jq -r '
|
||||
.items[]
|
||||
| select(.metadata.name != "system:public-info-viewer")
|
||||
| select((.subjects | length) > 0)
|
||||
| select(any(.subjects[]?;
|
||||
.kind=="Group" and .name=="system:unauthenticated"
|
||||
))
|
||||
| "FOUND_UNAUTH:ClusterRoleBinding:\(.metadata.name):ROLE=\(.roleRef.kind)/\(.roleRef.name)"
|
||||
';
|
||||
kubectl get rolebindings -A -o json | jq -r '
|
||||
.items[]
|
||||
| select((.subjects | length) > 0)
|
||||
| select(any(.subjects[]?;
|
||||
.kind=="Group" and .name=="system:unauthenticated"
|
||||
))
|
||||
| "FOUND_UNAUTH:RoleBinding:\(.metadata.namespace):\(.metadata.name):ROLE=\(.roleRef.kind)/\(.roleRef.name)"
|
||||
'
|
||||
) | (grep -q "^FOUND_UNAUTH:" && cat || echo "NO_NON_DEFAULT_UNAUTH_BINDINGS")
|
||||
tests:
|
||||
test_items:
|
||||
- flag: "NO_NON_DEFAULT_UNAUTH_BINDINGS"
|
||||
set: true
|
||||
compare:
|
||||
op: eq
|
||||
value: "NO_NON_DEFAULT_UNAUTH_BINDINGS"
|
||||
remediation: |
|
||||
Identify all non-default clusterrolebindings and rolebindings to the group
|
||||
system:unauthenticated. Check if they are used and review the permissions
|
||||
associated with the binding using the commands in the Audit section above or refer to
|
||||
GKE documentation (https://cloud.google.com/kubernetes-engine/docs/best-practices/rbac#detect-prevent-default).
|
||||
|
||||
Strongly consider replacing non-default, unsafe bindings with an authenticated, user-
|
||||
defined group. Where possible, bind to non-default, user-defined groups with least-
|
||||
privilege roles.
|
||||
|
||||
If there are any non-default, unsafe bindings to the group system:unauthenticated,
|
||||
proceed to delete them after consideration for cluster operations with only necessary,
|
||||
safer bindings.
|
||||
|
||||
kubectl delete clusterrolebinding [CLUSTER_ROLE_BINDING_NAME]
|
||||
kubectl delete rolebinding [ROLE_BINDING_NAME] --namespace [ROLE_BINDING_NAMESPACE]
|
||||
scored: true
|
||||
|
||||
- id: 4.1.10
|
||||
text: "Avoid non-default bindings to system:authenticated (Automated)"
|
||||
audit: |
|
||||
# Flags any non-default binding to the group "system:authenticated".
|
||||
# Allowed defaults (CRB): system:basic-user, system:discovery
|
||||
# Prints "NO_NON_DEFAULT_AUTH_BINDINGS" when none are found.
|
||||
(
|
||||
kubectl get clusterrolebindings -o json | jq -r '
|
||||
.items[]
|
||||
| select((.metadata.name != "system:basic-user") and (.metadata.name != "system:discovery"))
|
||||
| select((.subjects | length) > 0)
|
||||
| select(any(.subjects[]?;
|
||||
.kind=="Group" and .name=="system:authenticated"
|
||||
))
|
||||
| "FOUND_AUTH:ClusterRoleBinding:\(.metadata.name):ROLE=\(.roleRef.kind)/\(.roleRef.name)"
|
||||
';
|
||||
kubectl get rolebindings -A -o json | jq -r '
|
||||
.items[]
|
||||
| select((.subjects | length) > 0)
|
||||
| select(any(.subjects[]?;
|
||||
.kind=="Group" and .name=="system:authenticated"
|
||||
))
|
||||
| "FOUND_AUTH:RoleBinding:\(.metadata.namespace):\(.metadata.name):ROLE=\(.roleRef.kind)/\(.roleRef.name)"
|
||||
'
|
||||
) | (grep -q "^FOUND_AUTH:" && cat || echo "NO_NON_DEFAULT_AUTH_BINDINGS")
|
||||
tests:
|
||||
test_items:
|
||||
- flag: "NO_NON_DEFAULT_AUTH_BINDINGS"
|
||||
set: true
|
||||
compare:
|
||||
op: eq
|
||||
value: "NO_NON_DEFAULT_AUTH_BINDINGS"
|
||||
remediation: |
|
||||
Identify all non-default clusterrolebindings and rolebindings to the group
|
||||
system:authenticated. Check if they are used and review the permissions associated
|
||||
with the binding using the commands in the Audit section above or refer to GKE
|
||||
documentation.
|
||||
|
||||
Strongly consider replacing non-default, unsafe bindings with an authenticated, user-
|
||||
defined group. Where possible, bind to non-default, user-defined groups with least-
|
||||
privilege roles.
|
||||
|
||||
If there are any non-default, unsafe bindings to the group system:authenticated,
|
||||
proceed to delete them after consideration for cluster operations with only necessary,
|
||||
safer bindings.
|
||||
|
||||
kubectl delete clusterrolebinding [CLUSTER_ROLE_BINDING_NAME]
|
||||
kubectl delete rolebinding [ROLE_BINDING_NAME] --namespace [ROLE_BINDING_NAMESPACE]
|
||||
scored: true
|
||||
|
||||
- id: 4.2
|
||||
text: "Pod Security Standards"
|
||||
checks:
|
||||
- id: 4.2.1
|
||||
text: "Ensure that the cluster enforces Pod Security Standard Baseline profile or stricter for all namespaces. (Manual)"
|
||||
type: "manual"
|
||||
remediation: |
|
||||
Ensure that Pod Security Admission is in place for every namespace which contains
|
||||
user workloads.
|
||||
Run the following command to enforce the Baseline profile in a namespace:
|
||||
|
||||
kubectl label namespace pod-security.kubernetes.io/enforce=baseline
|
||||
scored: false
|
||||
|
||||
- id: 4.3
|
||||
text: "Network Policies and CNI"
|
||||
checks:
|
||||
- id: 4.3.1
|
||||
text: "Ensure that the CNI in use supports Network Policies (Manual)"
|
||||
type: "manual"
|
||||
remediation: |
|
||||
To use a CNI plugin with Network Policy, enable Network Policy in GKE, and the CNI plugin
|
||||
will be updated. See Recommendation 5.6.7.
|
||||
scored: false
|
||||
|
||||
- id: 4.3.2
|
||||
text: "Ensure that all Namespaces have Network Policies defined (Automated)"
|
||||
audit: |
|
||||
(kubectl get ns -o json; kubectl get networkpolicy -A -o json) \
|
||||
| jq -rs '
|
||||
(.[0].items | map(.metadata.name)
|
||||
| map(select(.!="kube-system" and .!="kube-public" and .!="kube-node-lease"))) as $ns
|
||||
|
|
||||
( (.[1].items // [])
|
||||
| sort_by(.metadata.namespace)
|
||||
| group_by(.metadata.namespace)
|
||||
| map({key: .[0].metadata.namespace, value: length})
|
||||
| from_entries
|
||||
) as $np
|
||||
|
|
||||
[ $ns[] | select( ($np[.] // 0) == 0 ) ] as $missing
|
||||
|
|
||||
if ($missing|length)>0
|
||||
then ($missing[] | "FOUND_NAMESPACE_WITHOUT_NETWORKPOLICY:"+.)
|
||||
else "ALL_NAMESPACES_HAVE_NETWORK_POLICIES"
|
||||
end
|
||||
'
|
||||
tests:
|
||||
test_items:
|
||||
- flag: "ALL_NAMESPACES_HAVE_NETWORK_POLICIES"
|
||||
set: true
|
||||
compare:
|
||||
op: eq
|
||||
value: "ALL_NAMESPACES_HAVE_NETWORK_POLICIES"
|
||||
remediation: |
|
||||
Follow the documentation and create NetworkPolicy objects as needed.
|
||||
See: https://cloud.google.com/kubernetes-engine/docs/how-to/network-policy#creating_a_network_policy
|
||||
for more information.
|
||||
scored: true
|
||||
|
||||
- id: 4.4
|
||||
text: "Secrets Management"
|
||||
checks:
|
||||
- id: 4.4.1
|
||||
text: "Prefer using secrets as files over secrets as environment variables (Automated)"
|
||||
audit: |
|
||||
output=$(kubectl get all --all-namespaces -o jsonpath='{range .items[?(@..secretKeyRef)]} {.kind} {.metadata.name} {"\n"}{end}')
|
||||
if [ -z "$output" ]; then echo "NO_ENV_SECRET_REFERENCES"; else echo "ENV_SECRET_REFERENCES_FOUND"; fi
|
||||
tests:
|
||||
test_items:
|
||||
- flag: "NO_ENV_SECRET_REFERENCES"
|
||||
set: true
|
||||
compare:
|
||||
op: eq
|
||||
value: "NO_ENV_SECRET_REFERENCES"
|
||||
remediation: |
|
||||
if possible, rewrite application code to read secrets from mounted secret files, rather than
|
||||
from environment variables.
|
||||
scored: true
|
||||
|
||||
- id: 4.4.2
|
||||
text: "Consider external secret storage (Manual)"
|
||||
type: "manual"
|
||||
remediation: |
|
||||
Refer to the secrets management options offered by your cloud provider or a third-party
|
||||
secrets management solution.
|
||||
scored: false
|
||||
|
||||
- id: 4.5
|
||||
text: "Extensible Admission Control"
|
||||
checks:
|
||||
- id: 4.5.1
|
||||
text: "Configure Image Provenance using ImagePolicyWebhook admission controller (Manual)"
|
||||
type: "manual"
|
||||
remediation: |
|
||||
Follow the Kubernetes documentation and setup image provenance.
|
||||
Also see recommendation 5.10.4.
|
||||
scored: false
|
||||
|
||||
- id: 4.6
|
||||
text: "General Policies"
|
||||
checks:
|
||||
- id: 4.6.1
|
||||
text: "Create administrative boundaries between resources using namespaces (Manual)"
|
||||
type: "manual"
|
||||
remediation: |
|
||||
Follow the documentation and create namespaces for objects in your deployment as you need
|
||||
them.
|
||||
scored: false
|
||||
|
||||
- id: 4.6.2
|
||||
text: "Ensure that the seccomp profile is set to RuntimeDefault in your pod definitions (Automated)"
|
||||
type: "manual"
|
||||
remediation: |
|
||||
Use security context to enable the RuntimeDefault seccomp profile in your pod
|
||||
definitions. An example is as below:
|
||||
|
||||
{
|
||||
"namespace": "kube-system",
|
||||
"name": "metrics-server-v0.7.0-dbcc8ddf6-gz7d4",
|
||||
"seccompProfile": "RuntimeDefault"
|
||||
}
|
||||
scored: false
|
||||
|
||||
- id: 4.6.3
|
||||
text: "Apply Security Context to Your Pods and Containers (Manual)"
|
||||
type: "manual"
|
||||
remediation: |
|
||||
Follow the Kubernetes documentation and apply security contexts to your pods. For a
|
||||
suggested list of security contexts, you may refer to the CIS Google Container-
|
||||
Optimized OS Benchmark.
|
||||
scored: false
|
||||
|
||||
- id: 4.6.4
|
||||
text: "The default namespace should not be used (Automated)"
|
||||
audit: |
|
||||
output=$(kubectl get all -n default --no-headers 2>/dev/null | grep -v '^service\s\+kubernetes\s' || true)
|
||||
if [ -z "$output" ]; then echo "DEFAULT_NAMESPACE_UNUSED"; else echo "DEFAULT_NAMESPACE_IN_USE"; fi
|
||||
tests:
|
||||
test_items:
|
||||
- flag: "DEFAULT_NAMESPACE_UNUSED"
|
||||
set: true
|
||||
compare:
|
||||
op: eq
|
||||
value: "DEFAULT_NAMESPACE_UNUSED"
|
||||
remediation: |
|
||||
Ensure that namespaces are created to allow for appropriate segregation of Kubernetes
|
||||
resources and that all new resources are created in a specific namespace.
|
||||
scored: true
|
||||
2
cfg/rh-1.4/config.yaml
Normal file
2
cfg/rh-1.4/config.yaml
Normal file
@@ -0,0 +1,2 @@
|
||||
---
|
||||
## Version-specific settings that override the values in cfg/config.yaml
|
||||
62
cfg/rh-1.4/controlplane.yaml
Normal file
62
cfg/rh-1.4/controlplane.yaml
Normal file
@@ -0,0 +1,62 @@
|
||||
---
|
||||
controls:
|
||||
version: rh-1.4
|
||||
id: 3
|
||||
text: "Control Plane Configuration"
|
||||
type: "controlplane"
|
||||
groups:
|
||||
- id: 3.1
|
||||
text: "Authentication and Authorization"
|
||||
checks:
|
||||
- id: 3.1.1
|
||||
text: "Client certificate authentication should not be used for users (Manual)"
|
||||
audit: |
|
||||
# To verify user authentication is enabled
|
||||
oc describe authentication
|
||||
# To verify that an identity provider is configured
|
||||
oc get identity
|
||||
# To verify that a custom cluster-admin user exists
|
||||
oc get clusterrolebindings -o=custom-columns=NAME:.metadata.name,ROLE:.roleRef.name,SUBJECT:.subjects[*].kind | grep cluster-admin | grep User
|
||||
# To verity that kbueadmin is removed, no results should be returned
|
||||
oc get secrets kubeadmin -n kube-system
|
||||
type: manual
|
||||
remediation: |
|
||||
Configure an identity provider for the OpenShift cluster.
|
||||
Understanding identity provider configuration | Authentication | OpenShift
|
||||
Container Platform 4.5. Once an identity provider has been defined,
|
||||
you can use RBAC to define and apply permissions.
|
||||
After you define an identity provider and create a new cluster-admin user,
|
||||
remove the kubeadmin user to improve cluster security.
|
||||
scored: false
|
||||
|
||||
- id: 3.2
|
||||
text: "Logging"
|
||||
checks:
|
||||
- id: 3.2.1
|
||||
text: "Ensure that a minimal audit policy is created (Manual)"
|
||||
audit: |
|
||||
#To view kube apiserver log files
|
||||
oc adm node-logs --role=master --path=kube-apiserver/
|
||||
#To view openshift apiserver log files
|
||||
oc adm node-logs --role=master --path=openshift-apiserver/
|
||||
#To verify kube apiserver audit config
|
||||
oc get configmap config -n openshift-kube-apiserver -ojson | jq -r '.data["config.yaml"]' | jq '.auditConfig[]?'
|
||||
#To verify openshift apiserver audit config
|
||||
oc get configmap config -n openshift-apiserver -ojson | jq -r '.data["config.yaml"]' | jq '.auditConfig[]?'
|
||||
type: manual
|
||||
remediation: |
|
||||
No remediation required.
|
||||
scored: false
|
||||
|
||||
- id: 3.2.2
|
||||
text: "Ensure that the audit policy covers key security concerns (Manual)"
|
||||
audit: |
|
||||
#To verify openshift apiserver audit config
|
||||
oc get configmap config -n openshift-kube-apiserver -ojson | jq -r '.data["config.yaml"]' | jq '.auditConfig.policyConfiguration.rules[]?'
|
||||
#To verify kube apiserver audit config
|
||||
oc get configmap config -n openshift-apiserver -ojson | jq -r '.data["config.yaml"]' | jq '.auditConfig.policyConfiguration.rules[]?'
|
||||
type: manual
|
||||
remediation: |
|
||||
In OpenShift 4.6 and higher, if appropriate for your needs,
|
||||
modify the audit policy.
|
||||
scored: false
|
||||
183
cfg/rh-1.4/etcd.yaml
Normal file
183
cfg/rh-1.4/etcd.yaml
Normal file
@@ -0,0 +1,183 @@
|
||||
---
|
||||
controls:
|
||||
version: rh-1.4
|
||||
id: 2
|
||||
text: "Etcd"
|
||||
type: "etcd"
|
||||
groups:
|
||||
- id: 2
|
||||
text: "Etcd"
|
||||
checks:
|
||||
- id: 2.1
|
||||
text: "Ensure that the --cert-file and --key-file arguments are set as appropriate (Manual)"
|
||||
audit: |
|
||||
# Get the node name where the pod is running
|
||||
NODE_NAME=$(oc get pod "$HOSTNAME" -o=jsonpath='{.spec.nodeName}')
|
||||
# Get the pod name in the openshift-etcd namespace
|
||||
POD_NAME=$(oc get pods -n openshift-etcd -l app=etcd --field-selector spec.nodeName="$NODE_NAME" -o jsonpath='{.items[0].metadata.name}' 2>/dev/null)
|
||||
if [ -z "$POD_NAME" ]; then
|
||||
echo "No matching file found on the current node."
|
||||
else
|
||||
# Execute the stat command
|
||||
oc exec -n openshift-etcd -c etcd "$POD_NAME" -- ps -o command= -C etcd | sed 's/.*\(--cert-file=[^ ]*\).*/\1/'
|
||||
oc exec -n openshift-etcd -c etcd "$POD_NAME" -- ps -o command= -C etcd | sed 's/.*\(--key-file=[^ ]*\).*/\1/'
|
||||
fi
|
||||
use_multiple_values: true
|
||||
tests:
|
||||
test_items:
|
||||
- flag: "file"
|
||||
compare:
|
||||
op: regex
|
||||
value: '\/etc\/kubernetes\/static-pod-certs\/secrets\/etcd-all-(serving|certs)\/etcd-serving-.*\.(?:crt|key)'
|
||||
remediation: |
|
||||
OpenShift does not use the etcd-certfile or etcd-keyfile flags.
|
||||
Certificates for etcd are managed by the etcd cluster operator.
|
||||
scored: true
|
||||
|
||||
- id: 2.2
|
||||
text: "Ensure that the --client-cert-auth argument is set to true (Manual)"
|
||||
audit: |
|
||||
# Get the node name where the pod is running
|
||||
NODE_NAME=$(oc get pod "$HOSTNAME" -o=jsonpath='{.spec.nodeName}')
|
||||
# Get the pod name in the openshift-etcd namespace
|
||||
POD_NAME=$(oc get pods -n openshift-etcd -l app=etcd --field-selector spec.nodeName="$NODE_NAME" -o jsonpath='{.items[0].metadata.name}' 2>/dev/null)
|
||||
if [ -z "$POD_NAME" ]; then
|
||||
echo "No matching file found on the current node."
|
||||
else
|
||||
# Execute the stat command
|
||||
oc exec -n openshift-etcd -c etcd "$POD_NAME" -- ps -o command= -C etcd | sed 's/.*\(--client-cert-auth=[^ ]*\).*/\1/'
|
||||
fi
|
||||
use_multiple_values: true
|
||||
tests:
|
||||
test_items:
|
||||
- flag: "--client-cert-auth"
|
||||
compare:
|
||||
op: eq
|
||||
value: true
|
||||
remediation: |
|
||||
This setting is managed by the cluster etcd operator. No remediation required."
|
||||
scored: true
|
||||
|
||||
- id: 2.3
|
||||
text: "Ensure that the --auto-tls argument is not set to true (Manual)"
|
||||
audit: |
|
||||
# Returns 0 if found, 1 if not found
|
||||
# Get the node name where the pod is running
|
||||
NODE_NAME=$(oc get pod "$HOSTNAME" -o=jsonpath='{.spec.nodeName}')
|
||||
# Get the pod name in the openshift-etcd namespace
|
||||
POD_NAME=$(oc get pods -n openshift-etcd -l app=etcd --field-selector spec.nodeName="$NODE_NAME" -o jsonpath='{.items[0].metadata.name}' 2>/dev/null)
|
||||
if [ -z "$POD_NAME" ]; then
|
||||
echo "No matching file found on the current node."
|
||||
else
|
||||
# Execute the stat command
|
||||
oc exec -n openshift-etcd -c etcd "$POD_NAME" -- ps -o command= -C etcd | grep -- --auto-tls=true 2>/dev/null ; echo exit_code=$?
|
||||
fi
|
||||
use_multiple_values: true
|
||||
tests:
|
||||
test_items:
|
||||
- flag: "exit_code"
|
||||
compare:
|
||||
op: eq
|
||||
value: "1"
|
||||
remediation: |
|
||||
This setting is managed by the cluster etcd operator. No remediation required.
|
||||
scored: true
|
||||
|
||||
- id: 2.4
|
||||
text: "Ensure that the --peer-cert-file and --peer-key-file arguments are set as appropriate (Manual)"
|
||||
audit: |
|
||||
# Get the node name where the pod is running
|
||||
NODE_NAME=$(oc get pod "$HOSTNAME" -o=jsonpath='{.spec.nodeName}')
|
||||
# Get the pod name in the openshift-etcd namespace
|
||||
POD_NAME=$(oc get pods -n openshift-etcd -l app=etcd --field-selector spec.nodeName="$NODE_NAME" -o jsonpath='{.items[0].metadata.name}' 2>/dev/null)
|
||||
if [ -z "$POD_NAME" ]; then
|
||||
echo "No matching file found on the current node."
|
||||
else
|
||||
# Execute the stat command
|
||||
oc exec -n openshift-etcd -c etcd "$POD_NAME" -- ps -o command= -C etcd | sed 's/.*\(--peer-cert-file=[^ ]*\).*/\1/'
|
||||
oc exec -n openshift-etcd -c etcd "$POD_NAME" -- ps -o command= -C etcd | sed 's/.*\(--peer-key-file=[^ ]*\).*/\1/'
|
||||
fi
|
||||
use_multiple_values: true
|
||||
tests:
|
||||
test_items:
|
||||
- flag: "file"
|
||||
compare:
|
||||
op: regex
|
||||
value: '\/etc\/kubernetes\/static-pod-certs\/secrets\/etcd-all-(peer|certs)\/etcd-peer-.*\.(?:crt|key)'
|
||||
remediation: |
|
||||
None. This configuration is managed by the etcd operator.
|
||||
scored: true
|
||||
|
||||
- id: 2.5
|
||||
text: "Ensure that the --peer-client-cert-auth argument is set to true (Manual)"
|
||||
audit: |
|
||||
# Get the node name where the pod is running
|
||||
NODE_NAME=$(oc get pod "$HOSTNAME" -o=jsonpath='{.spec.nodeName}')
|
||||
# Get the pod name in the openshift-etcd namespace
|
||||
POD_NAME=$(oc get pods -n openshift-etcd -l app=etcd --field-selector spec.nodeName="$NODE_NAME" -o jsonpath='{.items[0].metadata.name}' 2>/dev/null)
|
||||
if [ -z "$POD_NAME" ]; then
|
||||
echo "No matching file found on the current node."
|
||||
else
|
||||
# Execute the stat command
|
||||
oc exec -n openshift-etcd -c etcd "$POD_NAME" -- ps -o command= -C etcd | sed 's/.*\(--peer-client-cert-auth=[^ ]*\).*/\1/'
|
||||
fi
|
||||
use_multiple_values: true
|
||||
tests:
|
||||
test_items:
|
||||
- flag: "--peer-client-cert-auth"
|
||||
compare:
|
||||
op: eq
|
||||
value: true
|
||||
remediation: |
|
||||
This setting is managed by the cluster etcd operator. No remediation required.
|
||||
scored: true
|
||||
|
||||
- id: 2.6
|
||||
text: "Ensure that the --peer-auto-tls argument is not set to true (Manual)"
|
||||
audit: |
|
||||
# Returns 0 if found, 1 if not found
|
||||
# Get the node name where the pod is running
|
||||
NODE_NAME=$(oc get pod "$HOSTNAME" -o=jsonpath='{.spec.nodeName}')
|
||||
# Get the pod name in the openshift-etcd namespace
|
||||
POD_NAME=$(oc get pods -n openshift-etcd -l app=etcd --field-selector spec.nodeName="$NODE_NAME" -o jsonpath='{.items[0].metadata.name}' 2>/dev/null)
|
||||
if [ -z "$POD_NAME" ]; then
|
||||
echo "No matching file found on the current node."
|
||||
else
|
||||
# Execute the stat command
|
||||
oc exec -n openshift-etcd -c etcd "$POD_NAME" -- ps -o command= -C etcd | grep -- --peer-auto-tls=true 2>/dev/null ; echo exit_code=$?
|
||||
fi
|
||||
use_multiple_values: true
|
||||
tests:
|
||||
test_items:
|
||||
- flag: "exit_code"
|
||||
compare:
|
||||
op: eq
|
||||
value: "1"
|
||||
remediation: |
|
||||
This setting is managed by the cluster etcd operator. No remediation required.
|
||||
scored: true
|
||||
|
||||
- id: 2.7
|
||||
text: "Ensure that a unique Certificate Authority is used for etcd (Manual)"
|
||||
audit: |
|
||||
# Get the node name where the pod is running
|
||||
NODE_NAME=$(oc get pod "$HOSTNAME" -o=jsonpath='{.spec.nodeName}')
|
||||
# Get the pod name in the openshift-etcd namespace
|
||||
POD_NAME=$(oc get pods -n openshift-etcd -l app=etcd --field-selector spec.nodeName="$NODE_NAME" -o jsonpath='{.items[0].metadata.name}' 2>/dev/null)
|
||||
if [ -z "$POD_NAME" ]; then
|
||||
echo "No matching file found on the current node."
|
||||
else
|
||||
# Execute the stat command
|
||||
oc exec -n openshift-etcd -c etcd "$POD_NAME" -- ps -o command= -C etcd | sed 's/.*\(--trusted-ca-file=[^ ]*\).*/\1/'
|
||||
oc exec -n openshift-etcd -c etcd "$POD_NAME" -- ps -o command= -C etcd | sed 's/.*\(--peer-trusted-ca-file=[^ ]*\).*/\1/'
|
||||
fi
|
||||
use_multiple_values: true
|
||||
tests:
|
||||
test_items:
|
||||
- flag: "file"
|
||||
compare:
|
||||
op: regex
|
||||
value: '\/etc\/kubernetes\/static-pod-certs\/configmaps\/(?:etcd-(?:serving|peer-client)-ca\/ca-bundle\.crt|etcd-all-bundles\/server-ca-bundle\.crt)'
|
||||
remediation: |
|
||||
None required. Certificates for etcd are managed by the OpenShift cluster etcd operator.
|
||||
scored: true
|
||||
1350
cfg/rh-1.4/master.yaml
Normal file
1350
cfg/rh-1.4/master.yaml
Normal file
File diff suppressed because it is too large
Load Diff
485
cfg/rh-1.4/node.yaml
Normal file
485
cfg/rh-1.4/node.yaml
Normal file
@@ -0,0 +1,485 @@
|
||||
---
|
||||
controls:
|
||||
version: rh-1.4
|
||||
id: 4
|
||||
text: "Worker Nodes"
|
||||
type: "node"
|
||||
groups:
|
||||
- id: 4.1
|
||||
text: "Worker Node Configuration Files"
|
||||
checks:
|
||||
- id: 4.1.1
|
||||
text: "Ensure that the kubelet service file permissions are set to 644 or more restrictive (Automated)"
|
||||
audit: |
|
||||
NODE_NAME=$(oc get pod $HOSTNAME -o=jsonpath='{.spec.nodeName}')
|
||||
oc debug node/$NODE_NAME -- chroot /host stat -c "$NODE_NAME %n permissions=%a" /etc/systemd/system/kubelet.service 2> /dev/null
|
||||
tests:
|
||||
test_items:
|
||||
- flag: "permissions"
|
||||
compare:
|
||||
op: bitmask
|
||||
value: "644"
|
||||
remediation: |
|
||||
By default, the kubelet service file has permissions of 644.
|
||||
scored: true
|
||||
|
||||
- id: 4.1.2
|
||||
text: "Ensure that the kubelet service file ownership is set to root:root (Automated)"
|
||||
audit: |
|
||||
# Should return root:root for each node
|
||||
NODE_NAME=$(oc get pod $HOSTNAME -o=jsonpath='{.spec.nodeName}')
|
||||
oc debug node/$NODE_NAME -- chroot /host stat -c "$NODE_NAME %n %U:%G" /etc/systemd/system/kubelet.service 2> /dev/null
|
||||
tests:
|
||||
test_items:
|
||||
- flag: root:root
|
||||
remediation: |
|
||||
By default, the kubelet service file has ownership of root:root.
|
||||
scored: true
|
||||
|
||||
- id: 4.1.3
|
||||
text: "If proxy kube proxy configuration file exists ensure permissions are set to 644 or more restrictive (Manual)"
|
||||
audit: |
|
||||
# Get the node name where the pod is running
|
||||
NODE_NAME=$(oc get pod "$HOSTNAME" -o=jsonpath='{.spec.nodeName}')
|
||||
# Get the pod name in the openshift-sdn namespace
|
||||
POD_NAME=$(oc get pods -n openshift-sdn -l app=sdn --field-selector spec.nodeName="$NODE_NAME" -o jsonpath='{.items[0].metadata.name}' 2>/dev/null)
|
||||
|
||||
if [ -z "$POD_NAME" ]; then
|
||||
echo "No matching pods found on the current node."
|
||||
else
|
||||
# Execute the stat command
|
||||
oc exec -n openshift-sdn "$POD_NAME" -- stat -Lc "$i %n permissions=%a" /config/kube-proxy-config.yaml 2>/dev/null
|
||||
fi
|
||||
tests:
|
||||
test_items:
|
||||
- flag: "permissions"
|
||||
set: true
|
||||
compare:
|
||||
op: bitmask
|
||||
value: "644"
|
||||
remediation: |
|
||||
None needed.
|
||||
scored: true
|
||||
|
||||
- id: 4.1.4
|
||||
text: "If proxy kubeconfig file exists ensure ownership is set to root:root (Manual)"
|
||||
audit: |
|
||||
# Get the node name where the pod is running
|
||||
NODE_NAME=$(oc get pod "$HOSTNAME" -o=jsonpath='{.spec.nodeName}')
|
||||
# Get the pod name in the openshift-sdn namespace
|
||||
POD_NAME=$(oc get pods -n openshift-sdn -l app=sdn --field-selector spec.nodeName="$NODE_NAME" -o jsonpath='{.items[0].metadata.name}' 2>/dev/null)
|
||||
|
||||
if [ -z "$POD_NAME" ]; then
|
||||
echo "No matching pods found on the current node."
|
||||
else
|
||||
# Execute the stat command
|
||||
oc exec -n openshift-sdn "$POD_NAME" -- stat -Lc "$i %n %U:%G" /config/kube-proxy-config.yaml 2>/dev/null
|
||||
fi
|
||||
use_multiple_values: true
|
||||
tests:
|
||||
test_items:
|
||||
- flag: root:root
|
||||
remediation: |
|
||||
None required. The configuration is managed by OpenShift operators.
|
||||
scored: true
|
||||
|
||||
- id: 4.1.5
|
||||
text: "Ensure that the --kubeconfig kubelet.conf file permissions are set to 644 or more restrictive (Manual)"
|
||||
audit: |
|
||||
# Check permissions
|
||||
NODE_NAME=$(oc get pod $HOSTNAME -o=jsonpath='{.spec.nodeName}')
|
||||
oc debug node/$NODE_NAME -- chroot /host stat -c "$NODE_NAME %n permissions=%a" /etc/kubernetes/kubelet.conf 2> /dev/null
|
||||
use_multiple_values: true
|
||||
tests:
|
||||
test_items:
|
||||
- flag: "permissions"
|
||||
compare:
|
||||
op: bitmask
|
||||
value: "644"
|
||||
remediation: |
|
||||
None required.
|
||||
scored: true
|
||||
|
||||
- id: 4.1.6
|
||||
text: "Ensure that the --kubeconfig kubelet.conf file ownership is set to root:root (Manual)"
|
||||
audit: |
|
||||
NODE_NAME=$(oc get pod $HOSTNAME -o=jsonpath='{.spec.nodeName}')
|
||||
oc debug node/$NODE_NAME -- chroot /host stat -c "$NODE_NAME %n %U:%G" /etc/kubernetes/kubelet.conf 2> /dev/null
|
||||
use_multiple_values: true
|
||||
tests:
|
||||
test_items:
|
||||
- flag: root:root
|
||||
remediation: |
|
||||
None required.
|
||||
scored: true
|
||||
|
||||
- id: 4.1.7
|
||||
text: "Ensure that the certificate authorities file permissions are set to 644 or more restrictive"
|
||||
audit: |
|
||||
NODE_NAME=$(oc get pod $HOSTNAME -o=jsonpath='{.spec.nodeName}')
|
||||
oc get --raw /api/v1/nodes/$NODE_NAME/proxy/configz | jq '.kubeletconfig.authentication.x509.clientCAFile'
|
||||
oc debug node/$NODE_NAME -- chroot /host stat -c "$NODE_NAME permissions=%a" /etc/kubernetes/kubelet-ca.crt 2> /dev/null
|
||||
tests:
|
||||
test_items:
|
||||
- flag: "/etc/kubernetes/kubelet-ca.crt"
|
||||
- flag: "permissions"
|
||||
compare:
|
||||
op: bitmask
|
||||
value: "644"
|
||||
remediation: |
|
||||
No remediation required. OpenShift sets /etc/kubernetes/kubelet-ca.crt to 644 by default.
|
||||
If permissions are more permissive than 644, update with: chmod 644 /etc/kubernetes/kubelet-ca.crt
|
||||
scored: true
|
||||
|
||||
|
||||
- id: 4.1.8
|
||||
text: "Ensure that the client certificate authorities file ownership is set to root:root (Automated)"
|
||||
audit: |
|
||||
NODE_NAME=$(oc get pod $HOSTNAME -o=jsonpath='{.spec.nodeName}')
|
||||
oc debug node/$NODE_NAME -- chroot /host stat -c "$NODE_NAME %n %U:%G" /etc/kubernetes/kubelet-ca.crt 2> /dev/null
|
||||
use_multiple_values: true
|
||||
tests:
|
||||
test_items:
|
||||
- flag: root:root
|
||||
remediation: |
|
||||
None required.
|
||||
scored: true
|
||||
|
||||
- id: 4.1.9
|
||||
text: "Ensure that the kubelet --config configuration file has permissions set to 600 or more restrictive (Automated)"
|
||||
audit: |
|
||||
NODE_NAME=$(oc get pod $HOSTNAME -o=jsonpath='{.spec.nodeName}')
|
||||
oc debug node/$NODE_NAME -- chroot /host stat -c "$NODE_NAME %n permissions=%a" /var/lib/kubelet/config.json 2> /dev/null
|
||||
use_multiple_values: true
|
||||
tests:
|
||||
test_items:
|
||||
- flag: "permissions"
|
||||
compare:
|
||||
op: bitmask
|
||||
value: "600"
|
||||
remediation: |
|
||||
None required.
|
||||
scored: true
|
||||
|
||||
- id: 4.1.10
|
||||
text: "Ensure that the kubelet configuration file ownership is set to root:root (Automated)"
|
||||
audit: |
|
||||
NODE_NAME=$(oc get pod $HOSTNAME -o=jsonpath='{.spec.nodeName}')
|
||||
oc debug node/$NODE_NAME -- chroot /host stat -c "$NODE_NAME %n %U:%G" /var/lib/kubelet/config.json 2> /dev/null
|
||||
use_multiple_values: true
|
||||
tests:
|
||||
test_items:
|
||||
- flag: root:root
|
||||
remediation: |
|
||||
None required.
|
||||
scored: true
|
||||
|
||||
- id: 4.2
|
||||
text: "Kubelet"
|
||||
checks:
|
||||
- id: 4.2.1
|
||||
text: "Activate Garbage collection in OpenShift Container Platform 4, as appropriate (Manual)"
|
||||
audit: |
|
||||
echo "Retrieving and inspecting garbage collection configuration from node-local kubelet configz..."
|
||||
|
||||
NODE_NAME=$(oc get pod $HOSTNAME -o=jsonpath='{.spec.nodeName}')
|
||||
oc get --raw /api/v1/nodes/$NODE_NAME/proxy/configz | jq '.kubeletconfig'
|
||||
tests:
|
||||
test_items:
|
||||
- flag: "evictionHard"
|
||||
- flag: "imageGCHighThresholdPercent"
|
||||
- flag: "imageGCLowThresholdPercent"
|
||||
- flag: "imageMinimumGCAge"
|
||||
remediation: |
|
||||
OpenShift manages node garbage collection through KubeletConfig custom resources per MachineConfigPool.
|
||||
To configure or adjust garbage collection thresholds, follow the documentation:
|
||||
https://docs.openshift.com/container-platform/latest/nodes/nodes/nodes-nodes-garbage-collection.html
|
||||
|
||||
Example: Create or modify a KubeletConfig object to include:
|
||||
---
|
||||
evictionHard:
|
||||
"memory.available": "200Mi"
|
||||
"nodefs.available": "10%"
|
||||
"imagefs.available": "15%"
|
||||
imageGCHighThresholdPercent: 85
|
||||
imageGCLowThresholdPercent: 80
|
||||
imageMinimumGCAge: "2m0s"
|
||||
|
||||
Then apply the `KubeletConfig` to the appropriate `MachineConfigPool`.
|
||||
scored: true
|
||||
|
||||
- id: 4.2.2
|
||||
text: "Ensure that the --anonymous-auth argument is set to false (Automated)"
|
||||
audit: |
|
||||
echo "Checking if anonymous-auth is disabled in kubelet configuration on the current node..."
|
||||
|
||||
NODE_NAME=$(oc get pod $HOSTNAME -o=jsonpath='{.spec.nodeName}')
|
||||
oc get --raw /api/v1/nodes/$NODE_NAME/proxy/configz | jq '.kubeletconfig.authentication.anonymous.enabled'
|
||||
tests:
|
||||
test_items:
|
||||
- flag: "false"
|
||||
remediation: |
|
||||
By default, OpenShift sets anonymous-auth to false in Kubelet configuration.
|
||||
If this value is found to be true, create or patch a KubeletConfig object with:
|
||||
|
||||
---
|
||||
kind: KubeletConfig
|
||||
apiVersion: machineconfiguration.openshift.io/v1
|
||||
metadata:
|
||||
name: disable-anonymous-auth
|
||||
spec:
|
||||
kubeletConfig:
|
||||
authentication:
|
||||
anonymous:
|
||||
enabled: false
|
||||
|
||||
Then apply this KubeletConfig to the appropriate MachineConfigPool.
|
||||
See OpenShift documentation on configuring node-level security settings.
|
||||
scored: true
|
||||
|
||||
- id: 4.2.3
|
||||
text: "Ensure that the --authorization-mode argument is not set to AlwaysAllow (Automated)"
|
||||
audit: |
|
||||
echo "Checking kubelet authorization mode on the current node..."
|
||||
|
||||
NODE_NAME=$(oc get pod $HOSTNAME -o=jsonpath='{.spec.nodeName}')
|
||||
oc get --raw /api/v1/nodes/$NODE_NAME/proxy/configz | jq '.kubeletconfig.authorization.mode'
|
||||
tests:
|
||||
test_items:
|
||||
- flag: AlwaysAllow
|
||||
set: false
|
||||
remediation: |
|
||||
No remediation required. By default, OpenShift uses secure authorization modes such as 'Webhook' and does not allow AlwaysAllow.
|
||||
If AlwaysAllow is found, the node must be reconfigured using a KubeletConfig applied through the appropriate MachineConfigPool.
|
||||
scored: true
|
||||
|
||||
|
||||
- id: 4.2.4
|
||||
text: "Ensure that the --client-ca-file argument is set as appropriate (Automated)"
|
||||
audit: |
|
||||
echo "Checking Kubelet 'clientCAFile' setting on current node..."
|
||||
|
||||
NODE_NAME=$(oc get pod $HOSTNAME -o=jsonpath='{.spec.nodeName}')
|
||||
oc get --raw /api/v1/nodes/$NODE_NAME/proxy/configz \
|
||||
| jq '.kubeletconfig.authentication.x509.clientCAFile'
|
||||
tests:
|
||||
test_items:
|
||||
- flag: "/etc/kubernetes/kubelet-ca.crt"
|
||||
remediation: |
|
||||
No remediation required. OpenShift sets the clientCAFile by default to /etc/kubernetes/kubelet-ca.crt.
|
||||
Manual modification is unsupported and unnecessary as OpenShift manages Kubelet certificate authentication via the Machine Config Operator.
|
||||
scored: true
|
||||
|
||||
|
||||
- id: 4.2.5
|
||||
text: "Verify that the read only port is not used or is set to 0 (Automated)"
|
||||
audit: |
|
||||
echo "Checking 'kubelet-read-only-port' argument in openshift-kube-apiserver config..."
|
||||
|
||||
oc -n openshift-kube-apiserver get configmap config -o json \
|
||||
| jq -r '.data["config.yaml"]' \
|
||||
| yq '.apiServerArguments."kubelet-read-only-port"[0]'
|
||||
tests:
|
||||
test_items:
|
||||
- flag: "0"
|
||||
remediation: |
|
||||
No remediation is required if the read-only port is set to 0.
|
||||
If this value is not set to 0 (or the argument is missing), create a KubeletConfig object and apply it to the appropriate MachineConfigPool to disable the read-only port.
|
||||
|
||||
Example KubeletConfig:
|
||||
---
|
||||
apiVersion: machineconfiguration.openshift.io/v1
|
||||
kind: KubeletConfig
|
||||
metadata:
|
||||
name: disable-readonly-port
|
||||
spec:
|
||||
kubeletConfig:
|
||||
readOnlyPort: 0
|
||||
scored: true
|
||||
|
||||
|
||||
- id: 4.2.6
|
||||
text: "Ensure that the --streaming-connection-idle-timeout argument is not set to 0 (automated)"
|
||||
audit: |
|
||||
NODE_NAME=$(oc get pod $HOSTNAME -o=jsonpath='{.spec.nodeName}')
|
||||
oc get --raw /api/v1/nodes/$NODE_NAME/proxy/configz \
|
||||
| jq '.kubeletconfig'
|
||||
tests:
|
||||
test_items:
|
||||
- path: ".streamingConnectionIdleTimeout"
|
||||
compare:
|
||||
op: noteq
|
||||
value: "0s"
|
||||
remediation: |
|
||||
By default, OpenShift sets streamingConnectionIdleTimeout to 4h0m0s.
|
||||
If it is manually set to "0s", this disables timeouts — which is insecure.
|
||||
|
||||
To remediate, create a `KubeletConfig` CR with a safer timeout (e.g., 1h0m0s):
|
||||
---
|
||||
apiVersion: machineconfiguration.openshift.io/v1
|
||||
kind: KubeletConfig
|
||||
metadata:
|
||||
name: set-streaming-timeout
|
||||
spec:
|
||||
kubeletConfig:
|
||||
streamingConnectionIdleTimeout: "1h0m0s"
|
||||
scored: true
|
||||
|
||||
- id: 4.2.7
|
||||
text: "Ensure that the --make-iptables-util-chains argument is set to true (manual)"
|
||||
audit: |
|
||||
echo "Checking 'makeIPTablesUtilChains' setting in Kubelet config on current node..."
|
||||
|
||||
NODE_NAME=$(oc get pod $HOSTNAME -o=jsonpath='{.spec.nodeName}')
|
||||
oc get --raw /api/v1/nodes/$NODE_NAME/proxy/configz \
|
||||
| jq '.kubeletconfig'
|
||||
tests:
|
||||
test_items:
|
||||
- path: ".makeIPTablesUtilChains"
|
||||
compare:
|
||||
op: eq
|
||||
value: true
|
||||
remediation: |
|
||||
No remediation is required.
|
||||
By default, OpenShift sets makeIPTablesUtilChains to true.
|
||||
This allows Kubelet to manage iptables rules and keep them in sync with the dynamic pod network configuration.
|
||||
scored: true
|
||||
|
||||
|
||||
- id: 4.2.8
|
||||
text: "Ensure that the kubeAPIQPS [--event-qps] argument is set to 0 or a level which ensures appropriate event capture (manual)"
|
||||
audit: |
|
||||
echo "Checking 'kubeAPIQPS' setting in Kubelet config on current node..."
|
||||
|
||||
NODE_NAME=$(oc get pod $HOSTNAME -o=jsonpath='{.spec.nodeName}')
|
||||
oc get --raw /api/v1/nodes/$NODE_NAME/proxy/configz \
|
||||
| jq '.kubeletconfig'
|
||||
tests:
|
||||
test_items:
|
||||
- path: ".kubeAPIQPS"
|
||||
compare:
|
||||
op: gte
|
||||
value: 1
|
||||
remediation: |
|
||||
OpenShift sets kubeAPIQPS to a default of 50, which is appropriate in most environments.
|
||||
If kubeAPIQPS is set to 0, event rate limiting is disabled, which can overwhelm the kubelet with excessive events.
|
||||
|
||||
To configure a proper limit, create or modify a `KubeletConfig` resource with an appropriate value:
|
||||
|
||||
---
|
||||
apiVersion: machineconfiguration.openshift.io/v1
|
||||
kind: KubeletConfig
|
||||
metadata:
|
||||
name: set-kubeapiqps
|
||||
spec:
|
||||
kubeletConfig:
|
||||
kubeAPIQPS: 50
|
||||
scored: true
|
||||
|
||||
|
||||
- id: 4.2.9
|
||||
text: "Ensure that the --tls-cert-file and --tls-private-key-file arguments are set as appropriate (manual)"
|
||||
audit: |
|
||||
oc get configmap config -n openshift-kube-apiserver -ojson \
|
||||
| jq -r '.data["config.yaml"]' \
|
||||
| jq -r '.apiServerArguments["kubelet-client-certificate"][]?'
|
||||
|
||||
oc get configmap config -n openshift-kube-apiserver -ojson \
|
||||
| jq -r '.data["config.yaml"]' \
|
||||
| jq -r '.apiServerArguments["kubelet-client-key"][]?'
|
||||
tests:
|
||||
bin_op: and
|
||||
test_items:
|
||||
- flag: "/etc/kubernetes/static-pod-certs/secrets/kubelet-client/tls.crt"
|
||||
- flag: "/etc/kubernetes/static-pod-certs/secrets/kubelet-client/tls.key"
|
||||
remediation: |
|
||||
No remediation is required. OpenShift manages secure TLS connections to kubelets by default using its internal certificate authority.
|
||||
These X.509 certificates are rotated and validated automatically by the platform.
|
||||
Manual modifications to the TLS paths or keys are not supported and can lead to cluster issues.
|
||||
scored: true
|
||||
|
||||
|
||||
- id: 4.2.10
|
||||
text: "Ensure that the --rotate-certificates argument is not set to false (manual)"
|
||||
audit: |
|
||||
NODE_NAME=$(oc get pod $HOSTNAME -o=jsonpath='{.spec.nodeName}')
|
||||
oc get --raw /api/v1/nodes/$NODE_NAME/proxy/configz \
|
||||
| jq '.kubeletconfig'
|
||||
tests:
|
||||
test_items:
|
||||
- path: ".rotateCertificates"
|
||||
compare:
|
||||
op: eq
|
||||
value: true
|
||||
remediation: |
|
||||
No remediation required. By default, OpenShift enables certificate rotation via rotateCertificates=true.
|
||||
If disabled, you must either enable rotation via KubeletConfig or implement external certificate renewal.
|
||||
|
||||
Example remediation using KubeletConfig:
|
||||
---
|
||||
apiVersion: machineconfiguration.openshift.io/v1
|
||||
kind: KubeletConfig
|
||||
metadata:
|
||||
name: enable-cert-rotation
|
||||
spec:
|
||||
kubeletConfig:
|
||||
rotateCertificates: true
|
||||
scored: true
|
||||
|
||||
- id: 4.2.11
|
||||
text: "Verify that the RotateKubeletServerCertificate argument is set to true (manual)"
|
||||
audit: |
|
||||
echo "Checking that RotateKubeletServerCertificate is enabled in kubelet config on current node..."
|
||||
|
||||
NODE_NAME=$(oc get pod $HOSTNAME -o=jsonpath='{.spec.nodeName}')
|
||||
|
||||
echo "Verifying feature gate: RotateKubeletServerCertificate"
|
||||
oc get --raw /api/v1/nodes/$NODE_NAME/proxy/configz \
|
||||
| jq '.kubeletconfig.featureGates.RotateKubeletServerCertificate'
|
||||
|
||||
echo "Verifying that certificate rotation is enabled"
|
||||
oc get --raw /api/v1/nodes/$NODE_NAME/proxy/configz \
|
||||
| jq '.kubeletconfig.rotateCertificates'
|
||||
tests:
|
||||
bin_op: and
|
||||
test_items:
|
||||
- flag: "RotateKubeletServerCertificate"
|
||||
compare:
|
||||
op: eq
|
||||
value: true
|
||||
- flag: "rotateCertificates"
|
||||
compare:
|
||||
op: eq
|
||||
value: true
|
||||
remediation: |
|
||||
No remediation is required. OpenShift enables RotateKubeletServerCertificate by default and manages certificate rotation automatically.
|
||||
If the feature gate or rotation setting is disabled, configure a `KubeletConfig` CR and apply it to the MachineConfigPool:
|
||||
|
||||
---
|
||||
apiVersion: machineconfiguration.openshift.io/v1
|
||||
kind: KubeletConfig
|
||||
metadata:
|
||||
name: enable-server-cert-rotation
|
||||
spec:
|
||||
kubeletConfig:
|
||||
rotateCertificates: true
|
||||
featureGates:
|
||||
RotateKubeletServerCertificate: true
|
||||
scored: true
|
||||
|
||||
- id: 4.2.13
|
||||
text: "Ensure that the Kubelet only makes use of Strong Cryptographic Ciphers (Manual)"
|
||||
audit: |
|
||||
# needs verification
|
||||
# verify cipher suites
|
||||
oc describe --namespace=openshift-ingress-operator ingresscontroller/default
|
||||
oc get kubeapiservers.operator.openshift.io cluster -o json |jq .spec.observedConfig.servingInfo
|
||||
oc get openshiftapiservers.operator.openshift.io cluster -o json |jq .spec.observedConfig.servingInfo
|
||||
oc get cm -n openshift-authentication v4-0-config-system-cliconfig -o jsonpath='{.data.v4\-0\-config\-system\-cliconfig}' | jq .servingInfo
|
||||
#check value for tlsSecurityProfile; null is returned if default is used
|
||||
oc get kubeapiservers.operator.openshift.io cluster -o json |jq .spec.tlsSecurityProfile
|
||||
type: manual
|
||||
remediation: |
|
||||
Follow the directions above and in the OpenShift documentation to configure the tlsSecurityProfile.
|
||||
Configuring Ingress
|
||||
scored: false
|
||||
486
cfg/rh-1.4/policies.yaml
Normal file
486
cfg/rh-1.4/policies.yaml
Normal file
@@ -0,0 +1,486 @@
|
||||
---
|
||||
controls:
|
||||
version: rh-1.4
|
||||
id: 5
|
||||
text: "Policies"
|
||||
type: "policies"
|
||||
groups:
|
||||
- id: 5.1
|
||||
text: "RBAC and Service Accounts"
|
||||
checks:
|
||||
- id: 5.1.1
|
||||
text: "Ensure that the cluster-admin role is only used where required (Manual)"
|
||||
type: "manual"
|
||||
audit: |
|
||||
#To get a list of users and service accounts with the cluster-admin role
|
||||
oc get clusterrolebindings -o=customcolumns=NAME:.metadata.name,ROLE:.roleRef.name,SUBJECT:.subjects[*].kind |
|
||||
grep cluster-admin
|
||||
#To verity that kbueadmin is removed, no results should be returned
|
||||
oc get secrets kubeadmin -n kube-system
|
||||
remediation: |
|
||||
Identify all clusterrolebindings to the cluster-admin role. Check if they are used and if they need this role or if they could use a role with fewer privileges.
|
||||
Where possible, first bind users to a lower privileged role and then remove the clusterrolebinding to the cluster-admin role :
|
||||
oc delete clusterrolebinding [name]
|
||||
scored: false
|
||||
|
||||
- id: 5.1.2
|
||||
text: "Minimize access to secrets (Manual)"
|
||||
type: "manual"
|
||||
remediation: |
|
||||
Where possible, remove get, list and watch access to secret objects in the cluster.
|
||||
scored: false
|
||||
|
||||
- id: 5.1.3
|
||||
text: "Minimize wildcard use in Roles and ClusterRoles (Manual)"
|
||||
type: "manual"
|
||||
remediation: |
|
||||
Where possible replace any use of wildcards in clusterroles and roles with specific
|
||||
objects or actions.
|
||||
scored: false
|
||||
|
||||
- id: 5.1.4
|
||||
text: "Minimize access to create pods (Manual)"
|
||||
type: "manual"
|
||||
remediation: |
|
||||
Where possible, remove create access to pod objects in the cluster.
|
||||
scored: false
|
||||
|
||||
- id: 5.1.5
|
||||
text: "Ensure that default service accounts are not actively used. (Manual)"
|
||||
type: "manual"
|
||||
remediation: |
|
||||
None required.
|
||||
scored: false
|
||||
|
||||
- id: 5.1.6
|
||||
text: "Ensure that Service Account Tokens are only mounted where necessary (Manual)"
|
||||
type: "manual"
|
||||
remediation: |
|
||||
Modify the definition of pods and service accounts which do not need to mount service
|
||||
account tokens to disable it.
|
||||
scored: false
|
||||
|
||||
- id: 5.2
|
||||
text: "Security Context Constraints (SCCs)"
|
||||
checks:
|
||||
- id: 5.2.1
|
||||
text: "Minimize the admission of privileged containers (Manual)"
|
||||
audit: |
|
||||
oc get scc -o json \
|
||||
| jq -r '[.items[] | select(.allowPrivilegedContainer==false) | .metadata.name]
|
||||
| length
|
||||
| if . > 0 then "pass" else "fail" end'
|
||||
tests:
|
||||
test_items:
|
||||
- flag: "pass"
|
||||
remediation: |
|
||||
If no SCCs exist that restrict privileged containers, create one by running:
|
||||
|
||||
oc create -f - <<EOF
|
||||
apiVersion: security.openshift.io/v1
|
||||
kind: SecurityContextConstraints
|
||||
metadata:
|
||||
name: restricted-no-priv
|
||||
allowPrivilegedContainer: false
|
||||
runAsUser:
|
||||
type: MustRunAsRange
|
||||
seLinuxContext:
|
||||
type: MustRunAs
|
||||
users: []
|
||||
groups:
|
||||
- system:authenticated
|
||||
EOF
|
||||
|
||||
Then apply appropriate RBAC to assign this SCC only to necessary service accounts, groups, or users.
|
||||
Carefully avoid assigning `allowPrivilegedContainer: true` in any SCC that is broadly bound.
|
||||
scored: true
|
||||
|
||||
- id: 5.2.2
|
||||
text: "Minimize the admission of containers wishing to share the host process ID namespace (Manual)"
|
||||
audit: |
|
||||
oc get scc -o json \
|
||||
| jq -r '[.items[] | select(.allowHostPID==true) | .metadata.name]
|
||||
| length
|
||||
| if . > 0 then "pass" else "fail" end'
|
||||
tests:
|
||||
test_items:
|
||||
- flag: "pass"
|
||||
remediation: |
|
||||
If SCCs with `allowHostPID: true` exist, ensure they are restricted to trusted service accounts only.
|
||||
|
||||
To create a restrictive SCC that prevents host PID sharing:
|
||||
|
||||
---
|
||||
apiVersion: security.openshift.io/v1
|
||||
kind: SecurityContextConstraints
|
||||
metadata:
|
||||
name: restricted-no-hostpid
|
||||
allowHostPID: false
|
||||
runAsUser:
|
||||
type: MustRunAsRange
|
||||
seLinuxContext:
|
||||
type: MustRunAs
|
||||
users: []
|
||||
groups:
|
||||
- system:authenticated
|
||||
---
|
||||
|
||||
Apply the SCC and bind it only to users or groups that do **not** need hostPID access.
|
||||
scored: true
|
||||
|
||||
- id: 5.2.3
|
||||
text: "Minimize the admission of containers wishing to share the host IPC namespace (Manual)"
|
||||
audit: |
|
||||
oc get scc -o json \
|
||||
| jq -r '[.items[] | select(.allowHostIPC==false) | .metadata.name]
|
||||
| length
|
||||
| if . > 0 then "pass" else "fail" end'
|
||||
tests:
|
||||
test_items:
|
||||
- flag: "pass"
|
||||
remediation: |
|
||||
If no SCCs restrict hostIPC usage, create one that explicitly sets allowHostIPC: false:
|
||||
|
||||
---
|
||||
apiVersion: security.openshift.io/v1
|
||||
kind: SecurityContextConstraints
|
||||
metadata:
|
||||
name: restricted-no-hostipc
|
||||
allowHostIPC: false
|
||||
runAsUser:
|
||||
type: MustRunAsRange
|
||||
seLinuxContext:
|
||||
type: MustRunAs
|
||||
users: []
|
||||
groups:
|
||||
- system:authenticated
|
||||
---
|
||||
|
||||
Then assign this SCC to general workloads and ensure any SCCs allowing hostIPC are tightly scoped via RBAC.
|
||||
scored: true
|
||||
|
||||
- id: 5.2.4
|
||||
text: "Minimize the admission of containers wishing to share the host network namespace (manual)"
|
||||
audit: |
|
||||
oc get scc -o json \
|
||||
| jq -r '[.items[] | select(.allowHostNetwork==false) | .metadata.name]
|
||||
| length
|
||||
| if . > 0 then "pass" else "fail" end'
|
||||
tests:
|
||||
test_items:
|
||||
- flag: "pass"
|
||||
remediation: |
|
||||
If no SCCs restrict host networking, create one by running:
|
||||
|
||||
---
|
||||
apiVersion: security.openshift.io/v1
|
||||
kind: SecurityContextConstraints
|
||||
metadata:
|
||||
name: restricted-no-hostnetwork
|
||||
allowHostNetwork: false
|
||||
runAsUser:
|
||||
type: MustRunAsRange
|
||||
seLinuxContext:
|
||||
type: MustRunAs
|
||||
users: []
|
||||
groups:
|
||||
- system:authenticated
|
||||
---
|
||||
|
||||
Ensure only workloads that require `hostNetwork: true` (e.g., CNI, infra pods) are allowed to use SCCs where it is explicitly enabled. Restrict access to such SCCs using RBAC.
|
||||
scored: true
|
||||
|
||||
- id: 5.2.5
|
||||
text: "Minimize the admission of containers with allowPrivilegeEscalation (manual)"
|
||||
audit: |
|
||||
oc get scc -o json \
|
||||
| jq -r '[.items[] | select(.allowPrivilegeEscalation==false) | .metadata.name]
|
||||
| length
|
||||
| if . > 0 then "pass" else "fail" end'
|
||||
tests:
|
||||
test_items:
|
||||
- flag: "pass"
|
||||
remediation: |
|
||||
If no SCCs exist that restrict the use of privilege escalation, create a custom SCC:
|
||||
|
||||
---
|
||||
apiVersion: security.openshift.io/v1
|
||||
kind: SecurityContextConstraints
|
||||
metadata:
|
||||
name: restricted-no-priv-escalation
|
||||
allowPrivilegeEscalation: false
|
||||
runAsUser:
|
||||
type: MustRunAsRange
|
||||
seLinuxContext:
|
||||
type: MustRunAs
|
||||
users: []
|
||||
groups:
|
||||
- system:authenticated
|
||||
---
|
||||
|
||||
Assign this SCC only to workloads and users that **do not require** the ability to escalate privileges.
|
||||
Use RBAC to restrict access to SCCs where `allowPrivilegeEscalation` is `true` to only trusted service accounts or admin roles.
|
||||
scored: true
|
||||
|
||||
|
||||
- id: 5.2.6
|
||||
text: "Minimize the admission of root containers (manual)"
|
||||
audit: |
|
||||
sccs=$(oc get scc -o json | jq -r '.items[] | select(.runAsUser.type == "MustRunAsNonRoot") | .metadata.name')
|
||||
if [[ -n "$sccs" ]]; then
|
||||
echo "pass"
|
||||
else
|
||||
echo "fail"
|
||||
fi
|
||||
tests:
|
||||
test_items:
|
||||
- flag: "pass"
|
||||
remediation: |
|
||||
If no SCC is found with `runAsUser.type: MustRunAsNonRoot`, create one as follows:
|
||||
|
||||
---
|
||||
apiVersion: security.openshift.io/v1
|
||||
kind: SecurityContextConstraints
|
||||
metadata:
|
||||
name: restricted-nonroot
|
||||
allowPrivilegeEscalation: false
|
||||
runAsUser:
|
||||
type: MustRunAsNonRoot
|
||||
seLinuxContext:
|
||||
type: MustRunAs
|
||||
users: []
|
||||
groups:
|
||||
- system:authenticated
|
||||
---
|
||||
|
||||
Assign this SCC only to workloads that must not run as root.
|
||||
If an SCC allows `RunAsAny`, audit and restrict access using RBAC to prevent misuse.
|
||||
scored: true
|
||||
|
||||
- id: 5.2.7
|
||||
text: "Minimize the admission of containers with the NET_RAW capability (manual)"
|
||||
audit: |
|
||||
oc get scc -o json \
|
||||
| jq -r '[.items[]
|
||||
| select((.requiredDropCapabilities // []) | index("ALL"))
|
||||
| .metadata.name]
|
||||
| length
|
||||
| if . > 0 then "pass" else "fail" end'
|
||||
tests:
|
||||
test_items:
|
||||
- flag: "pass"
|
||||
remediation: |
|
||||
If no SCCs drop ALL capabilities, create a custom SCC that explicitly drops NET_RAW:
|
||||
|
||||
---
|
||||
apiVersion: security.openshift.io/v1
|
||||
kind: SecurityContextConstraints
|
||||
metadata:
|
||||
name: restricted-no-netraw
|
||||
requiredDropCapabilities:
|
||||
- NET_RAW
|
||||
allowPrivilegedContainer: false
|
||||
runAsUser:
|
||||
type: MustRunAsRange
|
||||
seLinuxContext:
|
||||
type: MustRunAs
|
||||
users: []
|
||||
groups:
|
||||
- system:authenticated
|
||||
---
|
||||
|
||||
Apply this SCC to workloads that do not require NET_RAW.
|
||||
If NET_RAW is required (e.g., for low-level networking apps), isolate those workloads with a specific SCC and restrict access via RBAC.
|
||||
scored: true
|
||||
|
||||
|
||||
- id: 5.2.8
|
||||
text: "Minimize the admission of containers with added capabilities (manual)"
|
||||
audit: |
|
||||
oc get scc -o json \
|
||||
| jq -r '[.items[]
|
||||
| select(.allowedCapabilities == null)
|
||||
| .metadata.name]
|
||||
| length
|
||||
| if . > 0 then "pass" else "fail" end'
|
||||
oc get scc -o json \
|
||||
| jq -r '[.items[]
|
||||
| select(.defaultAddCapabilities == null)
|
||||
| .metadata.name]
|
||||
| length
|
||||
| if . > 0 then "true" else "false" end'
|
||||
tests:
|
||||
test_items:
|
||||
- flag: "pass"
|
||||
- flag: "true"
|
||||
remediation: |
|
||||
If no SCCs restrict added capabilities, create a custom SCC as shown below:
|
||||
|
||||
---
|
||||
apiVersion: security.openshift.io/v1
|
||||
kind: SecurityContextConstraints
|
||||
metadata:
|
||||
name: restricted-no-added-caps
|
||||
allowPrivilegedContainer: false
|
||||
allowedCapabilities: []
|
||||
defaultAddCapabilities: []
|
||||
runAsUser:
|
||||
type: MustRunAsRange
|
||||
seLinuxContext:
|
||||
type: MustRunAs
|
||||
users: []
|
||||
groups:
|
||||
- system:authenticated
|
||||
---
|
||||
|
||||
Assign this SCC to workloads that do **not** require elevated capabilities.
|
||||
Create separate SCCs for workloads that require specific capabilities, and use RBAC to tightly restrict access to them.
|
||||
scored: true
|
||||
|
||||
- id: 5.2.9
|
||||
text: "Minimize the admission of containers with capabilities assigned (manual)"
|
||||
audit: |
|
||||
oc get scc -o json \
|
||||
| jq -r '[.items[]
|
||||
| select((.requiredDropCapabilities // []) | index("ALL"))
|
||||
| .metadata.name]
|
||||
| length
|
||||
| if . > 0 then "true" else "false" end'
|
||||
tests:
|
||||
test_items:
|
||||
- flag: "true"
|
||||
remediation: |
|
||||
If no SCCs drop all capabilities, create one that sets 'requiredDropCapabilities: [ALL]':
|
||||
|
||||
---
|
||||
apiVersion: security.openshift.io/v1
|
||||
kind: SecurityContextConstraints
|
||||
metadata:
|
||||
name: restricted-drop-all-capabilities
|
||||
requiredDropCapabilities:
|
||||
- ALL
|
||||
allowPrivilegedContainer: false
|
||||
runAsUser:
|
||||
type: MustRunAsRange
|
||||
seLinuxContext:
|
||||
type: MustRunAs
|
||||
users: []
|
||||
groups:
|
||||
- system:authenticated
|
||||
---
|
||||
|
||||
Apply this SCC to general-purpose workloads that do not require elevated Linux capabilities.
|
||||
If certain workloads require capabilities, create a separate SCC with minimal permissions and scope it using RBAC.
|
||||
scored: true
|
||||
|
||||
- id: 5.2.10
|
||||
text: "Minimize access to privileged Security Context Constraints (Manual)"
|
||||
type: "manual"
|
||||
remediation: |
|
||||
Remove any users and groups who do not need access to an SCC, following the
|
||||
principle of least privilege.
|
||||
You can remove users and groups from an SCC using the oc edit scc $NAME
|
||||
command.
|
||||
Additionally, you can create your own SCCs that contain the container functionality you
|
||||
need for a particular use case and assign that SCC to users and groups if the default
|
||||
SCCs are not appropriate for your use case.
|
||||
scored: false
|
||||
|
||||
|
||||
- id: 5.3
|
||||
text: "Network Policies and CNI"
|
||||
checks:
|
||||
- id: 5.3.1
|
||||
text: "Ensure that the CNI in use supports Network Policies (Manual)"
|
||||
type: "manual"
|
||||
remediation: |
|
||||
None required.
|
||||
scored: false
|
||||
|
||||
- id: 5.3.2
|
||||
text: "Ensure that all Namespaces have Network Policies defined (Manual)"
|
||||
type: "manual"
|
||||
audit: |
|
||||
#Run the following command and review the NetworkPolicy objects created in the cluster.
|
||||
oc -n all get networkpolicy
|
||||
remediation: |
|
||||
Follow the documentation and create NetworkPolicy objects as you need them.
|
||||
scored: false
|
||||
|
||||
- id: 5.4
|
||||
text: "Secrets Management"
|
||||
checks:
|
||||
- id: 5.4.1
|
||||
text: "Prefer using secrets as files over secrets as environment variables (Manual)"
|
||||
type: "manual"
|
||||
audit: |
|
||||
#Run the following command to find references to objects which use environment variables defined from secrets.
|
||||
oc get all -o jsonpath='{range .items[?(@..secretKeyRef)]} {.kind}
|
||||
{.metadata.name} {"\n"}{end}' -A
|
||||
remediation: |
|
||||
If possible, rewrite application code to read secrets from mounted secret files, rather than
|
||||
from environment variables.
|
||||
scored: false
|
||||
|
||||
- id: 5.4.2
|
||||
text: "Consider external secret storage (Manual)"
|
||||
type: "manual"
|
||||
remediation: |
|
||||
Refer to the secrets management options offered by your cloud provider or a third-party
|
||||
secrets management solution.
|
||||
scored: false
|
||||
|
||||
- id: 5.5
|
||||
text: "Extensible Admission Control"
|
||||
checks:
|
||||
- id: 5.5.1
|
||||
text: "Configure Image Provenance using image controller configuration parameters (Manual)"
|
||||
type: "manual"
|
||||
remediation: |
|
||||
Follow the OpenShift documentation: [Image configuration resources](https://docs.openshift.com/container-platform/4.5/openshift_images/image-configuration.html
|
||||
scored: false
|
||||
|
||||
- id: 5.7
|
||||
text: "General Policies"
|
||||
checks:
|
||||
- id: 5.7.1
|
||||
text: "Create administrative boundaries between resources using namespaces (Manual)"
|
||||
type: "manual"
|
||||
audit: |
|
||||
#Run the following command and review the namespaces created in the cluster.
|
||||
oc get namespaces
|
||||
#Ensure that these namespaces are the ones you need and are adequately administered as per your requirements.
|
||||
remediation: |
|
||||
Follow the documentation and create namespaces for objects in your deployment as you need
|
||||
them.
|
||||
scored: false
|
||||
|
||||
- id: 5.7.2
|
||||
text: "Ensure that the seccomp profile is set to docker/default in your pod definitions (Manual)"
|
||||
type: "manual"
|
||||
remediation: |
|
||||
To enable the default seccomp profile, use the reserved value /runtime/default that will
|
||||
make sure that the pod uses the default policy available on the host.
|
||||
scored: false
|
||||
|
||||
- id: 5.7.3
|
||||
text: "Apply Security Context to Your Pods and Containers (Manual)"
|
||||
type: "manual"
|
||||
remediation: |
|
||||
Follow the Kubernetes documentation and apply security contexts to your pods. For a
|
||||
suggested list of security contexts, you may refer to the CIS Security Benchmark for Docker
|
||||
Containers.
|
||||
scored: false
|
||||
|
||||
- id: 5.7.4
|
||||
text: "The default namespace should not be used (Manual)"
|
||||
type: "manual"
|
||||
audit: |
|
||||
#Run this command to list objects in default namespace
|
||||
oc project default
|
||||
oc get all
|
||||
#The only entries there should be system managed resources such as the kubernetes and openshift service
|
||||
remediation: |
|
||||
Ensure that namespaces are created to allow for appropriate segregation of Kubernetes
|
||||
resources and that all new resources are created in a specific namespace.
|
||||
scored: false
|
||||
2
cfg/rh-1.8/config.yaml
Normal file
2
cfg/rh-1.8/config.yaml
Normal file
@@ -0,0 +1,2 @@
|
||||
---
|
||||
## Version-specific settings that override the values in cfg/config.yaml
|
||||
62
cfg/rh-1.8/controlplane.yaml
Normal file
62
cfg/rh-1.8/controlplane.yaml
Normal file
@@ -0,0 +1,62 @@
|
||||
---
|
||||
controls:
|
||||
version: rh-1.8
|
||||
id: 3
|
||||
text: "Control Plane Configuration"
|
||||
type: "controlplane"
|
||||
groups:
|
||||
- id: 3.1
|
||||
text: "Authentication and Authorization"
|
||||
checks:
|
||||
- id: 3.1.1
|
||||
text: "Client certificate authentication should not be used for users (Manual)"
|
||||
audit: |
|
||||
# To verify user authentication is enabled
|
||||
oc describe authentication
|
||||
# To verify that an identity provider is configured
|
||||
oc get identity
|
||||
# To verify that a custom cluster-admin user exists
|
||||
oc get clusterrolebindings -o=custom-columns=NAME:.metadata.name,ROLE:.roleRef.name,SUBJECT:.subjects[*].kind | grep cluster-admin | grep User
|
||||
# To verity that kbueadmin is removed, no results should be returned
|
||||
oc get secrets kubeadmin -n kube-system
|
||||
type: manual
|
||||
remediation: |
|
||||
Configure an identity provider for the OpenShift cluster.
|
||||
Understanding identity provider configuration | Authentication | OpenShift
|
||||
Container Platform 4.5. Once an identity provider has been defined,
|
||||
you can use RBAC to define and apply permissions.
|
||||
After you define an identity provider and create a new cluster-admin user,
|
||||
remove the kubeadmin user to improve cluster security.
|
||||
scored: false
|
||||
|
||||
- id: 3.2
|
||||
text: "Logging"
|
||||
checks:
|
||||
- id: 3.2.1
|
||||
text: "Ensure that a minimal audit policy is created (Manual)"
|
||||
audit: |
|
||||
#To view kube apiserver log files
|
||||
oc adm node-logs --role=master --path=kube-apiserver/
|
||||
#To view openshift apiserver log files
|
||||
oc adm node-logs --role=master --path=openshift-apiserver/
|
||||
#To verify kube apiserver audit config
|
||||
oc get configmap config -n openshift-kube-apiserver -ojson | jq -r '.data["config.yaml"]' | jq '.auditConfig[]?'
|
||||
#To verify openshift apiserver audit config
|
||||
oc get configmap config -n openshift-apiserver -ojson | jq -r '.data["config.yaml"]' | jq '.auditConfig[]?'
|
||||
type: manual
|
||||
remediation: |
|
||||
No remediation required.
|
||||
scored: false
|
||||
|
||||
- id: 3.2.2
|
||||
text: "Ensure that the audit policy covers key security concerns (Manual)"
|
||||
audit: |
|
||||
#To verify openshift apiserver audit config
|
||||
oc get configmap config -n openshift-kube-apiserver -ojson | jq -r '.data["config.yaml"]' | jq '.auditConfig.policyConfiguration.rules[]?'
|
||||
#To verify kube apiserver audit config
|
||||
oc get configmap config -n openshift-apiserver -ojson | jq -r '.data["config.yaml"]' | jq '.auditConfig.policyConfiguration.rules[]?'
|
||||
type: manual
|
||||
remediation: |
|
||||
In OpenShift 4.6 and higher, if appropriate for your needs,
|
||||
modify the audit policy.
|
||||
scored: false
|
||||
183
cfg/rh-1.8/etcd.yaml
Normal file
183
cfg/rh-1.8/etcd.yaml
Normal file
@@ -0,0 +1,183 @@
|
||||
---
|
||||
controls:
|
||||
version: rh-1.8
|
||||
id: 2
|
||||
text: "Etcd"
|
||||
type: "etcd"
|
||||
groups:
|
||||
- id: 2
|
||||
text: "Etcd"
|
||||
checks:
|
||||
- id: 2.1
|
||||
text: "Ensure that the --cert-file and --key-file arguments are set as appropriate (Manual)"
|
||||
audit: |
|
||||
# Get the node name where the pod is running
|
||||
NODE_NAME=$(oc get pod "$HOSTNAME" -o=jsonpath='{.spec.nodeName}')
|
||||
# Get the pod name in the openshift-etcd namespace
|
||||
POD_NAME=$(oc get pods -n openshift-etcd -l app=etcd --field-selector spec.nodeName="$NODE_NAME" -o jsonpath='{.items[0].metadata.name}' 2>/dev/null)
|
||||
if [ -z "$POD_NAME" ]; then
|
||||
echo "No matching file found on the current node."
|
||||
else
|
||||
# Execute the stat command
|
||||
oc exec -n openshift-etcd -c etcd "$POD_NAME" -- ps -o command= -C etcd | sed 's/.*\(--cert-file=[^ ]*\).*/\1/'
|
||||
oc exec -n openshift-etcd -c etcd "$POD_NAME" -- ps -o command= -C etcd | sed 's/.*\(--key-file=[^ ]*\).*/\1/'
|
||||
fi
|
||||
use_multiple_values: true
|
||||
tests:
|
||||
test_items:
|
||||
- flag: "file"
|
||||
compare:
|
||||
op: regex
|
||||
value: '\/etc\/kubernetes\/static-pod-certs\/secrets\/etcd-all-(serving|certs)\/etcd-serving-.*\.(?:crt|key)'
|
||||
remediation: |
|
||||
OpenShift does not use the etcd-certfile or etcd-keyfile flags.
|
||||
Certificates for etcd are managed by the etcd cluster operator.
|
||||
scored: true
|
||||
|
||||
- id: 2.2
|
||||
text: "Ensure that the --client-cert-auth argument is set to true (Manual)"
|
||||
audit: |
|
||||
# Get the node name where the pod is running
|
||||
NODE_NAME=$(oc get pod "$HOSTNAME" -o=jsonpath='{.spec.nodeName}')
|
||||
# Get the pod name in the openshift-etcd namespace
|
||||
POD_NAME=$(oc get pods -n openshift-etcd -l app=etcd --field-selector spec.nodeName="$NODE_NAME" -o jsonpath='{.items[0].metadata.name}' 2>/dev/null)
|
||||
if [ -z "$POD_NAME" ]; then
|
||||
echo "No matching file found on the current node."
|
||||
else
|
||||
# Execute the stat command
|
||||
oc exec -n openshift-etcd -c etcd "$POD_NAME" -- ps -o command= -C etcd | sed 's/.*\(--client-cert-auth=[^ ]*\).*/\1/'
|
||||
fi
|
||||
use_multiple_values: true
|
||||
tests:
|
||||
test_items:
|
||||
- flag: "--client-cert-auth"
|
||||
compare:
|
||||
op: eq
|
||||
value: true
|
||||
remediation: |
|
||||
This setting is managed by the cluster etcd operator. No remediation required."
|
||||
scored: true
|
||||
|
||||
- id: 2.3
|
||||
text: "Ensure that the --auto-tls argument is not set to true (Manual)"
|
||||
audit: |
|
||||
# Returns 0 if found, 1 if not found
|
||||
# Get the node name where the pod is running
|
||||
NODE_NAME=$(oc get pod "$HOSTNAME" -o=jsonpath='{.spec.nodeName}')
|
||||
# Get the pod name in the openshift-etcd namespace
|
||||
POD_NAME=$(oc get pods -n openshift-etcd -l app=etcd --field-selector spec.nodeName="$NODE_NAME" -o jsonpath='{.items[0].metadata.name}' 2>/dev/null)
|
||||
if [ -z "$POD_NAME" ]; then
|
||||
echo "No matching file found on the current node."
|
||||
else
|
||||
# Execute the stat command
|
||||
oc exec -n openshift-etcd -c etcd "$POD_NAME" -- ps -o command= -C etcd | grep -- --auto-tls=true 2>/dev/null ; echo exit_code=$?
|
||||
fi
|
||||
use_multiple_values: true
|
||||
tests:
|
||||
test_items:
|
||||
- flag: "exit_code"
|
||||
compare:
|
||||
op: eq
|
||||
value: "1"
|
||||
remediation: |
|
||||
This setting is managed by the cluster etcd operator. No remediation required.
|
||||
scored: true
|
||||
|
||||
- id: 2.4
|
||||
text: "Ensure that the --peer-cert-file and --peer-key-file arguments are set as appropriate (Manual)"
|
||||
audit: |
|
||||
# Get the node name where the pod is running
|
||||
NODE_NAME=$(oc get pod "$HOSTNAME" -o=jsonpath='{.spec.nodeName}')
|
||||
# Get the pod name in the openshift-etcd namespace
|
||||
POD_NAME=$(oc get pods -n openshift-etcd -l app=etcd --field-selector spec.nodeName="$NODE_NAME" -o jsonpath='{.items[0].metadata.name}' 2>/dev/null)
|
||||
if [ -z "$POD_NAME" ]; then
|
||||
echo "No matching file found on the current node."
|
||||
else
|
||||
# Execute the stat command
|
||||
oc exec -n openshift-etcd -c etcd "$POD_NAME" -- ps -o command= -C etcd | sed 's/.*\(--peer-cert-file=[^ ]*\).*/\1/'
|
||||
oc exec -n openshift-etcd -c etcd "$POD_NAME" -- ps -o command= -C etcd | sed 's/.*\(--peer-key-file=[^ ]*\).*/\1/'
|
||||
fi
|
||||
use_multiple_values: true
|
||||
tests:
|
||||
test_items:
|
||||
- flag: "file"
|
||||
compare:
|
||||
op: regex
|
||||
value: '\/etc\/kubernetes\/static-pod-certs\/secrets\/etcd-all-(peer|certs)\/etcd-peer-.*\.(?:crt|key)'
|
||||
remediation: |
|
||||
None. This configuration is managed by the etcd operator.
|
||||
scored: true
|
||||
|
||||
- id: 2.5
|
||||
text: "Ensure that the --peer-client-cert-auth argument is set to true (Manual)"
|
||||
audit: |
|
||||
# Get the node name where the pod is running
|
||||
NODE_NAME=$(oc get pod "$HOSTNAME" -o=jsonpath='{.spec.nodeName}')
|
||||
# Get the pod name in the openshift-etcd namespace
|
||||
POD_NAME=$(oc get pods -n openshift-etcd -l app=etcd --field-selector spec.nodeName="$NODE_NAME" -o jsonpath='{.items[0].metadata.name}' 2>/dev/null)
|
||||
if [ -z "$POD_NAME" ]; then
|
||||
echo "No matching file found on the current node."
|
||||
else
|
||||
# Execute the stat command
|
||||
oc exec -n openshift-etcd -c etcd "$POD_NAME" -- ps -o command= -C etcd | sed 's/.*\(--peer-client-cert-auth=[^ ]*\).*/\1/'
|
||||
fi
|
||||
use_multiple_values: true
|
||||
tests:
|
||||
test_items:
|
||||
- flag: "--peer-client-cert-auth"
|
||||
compare:
|
||||
op: eq
|
||||
value: true
|
||||
remediation: |
|
||||
This setting is managed by the cluster etcd operator. No remediation required.
|
||||
scored: true
|
||||
|
||||
- id: 2.6
|
||||
text: "Ensure that the --peer-auto-tls argument is not set to true (Manual)"
|
||||
audit: |
|
||||
# Returns 0 if found, 1 if not found
|
||||
# Get the node name where the pod is running
|
||||
NODE_NAME=$(oc get pod "$HOSTNAME" -o=jsonpath='{.spec.nodeName}')
|
||||
# Get the pod name in the openshift-etcd namespace
|
||||
POD_NAME=$(oc get pods -n openshift-etcd -l app=etcd --field-selector spec.nodeName="$NODE_NAME" -o jsonpath='{.items[0].metadata.name}' 2>/dev/null)
|
||||
if [ -z "$POD_NAME" ]; then
|
||||
echo "No matching file found on the current node."
|
||||
else
|
||||
# Execute the stat command
|
||||
oc exec -n openshift-etcd -c etcd "$POD_NAME" -- ps -o command= -C etcd | grep -- --peer-auto-tls=true 2>/dev/null ; echo exit_code=$?
|
||||
fi
|
||||
use_multiple_values: true
|
||||
tests:
|
||||
test_items:
|
||||
- flag: "exit_code"
|
||||
compare:
|
||||
op: eq
|
||||
value: "1"
|
||||
remediation: |
|
||||
This setting is managed by the cluster etcd operator. No remediation required.
|
||||
scored: true
|
||||
|
||||
- id: 2.7
|
||||
text: "Ensure that a unique Certificate Authority is used for etcd (Manual)"
|
||||
audit: |
|
||||
# Get the node name where the pod is running
|
||||
NODE_NAME=$(oc get pod "$HOSTNAME" -o=jsonpath='{.spec.nodeName}')
|
||||
# Get the pod name in the openshift-etcd namespace
|
||||
POD_NAME=$(oc get pods -n openshift-etcd -l app=etcd --field-selector spec.nodeName="$NODE_NAME" -o jsonpath='{.items[0].metadata.name}' 2>/dev/null)
|
||||
if [ -z "$POD_NAME" ]; then
|
||||
echo "No matching file found on the current node."
|
||||
else
|
||||
# Execute the stat command
|
||||
oc exec -n openshift-etcd -c etcd "$POD_NAME" -- ps -o command= -C etcd | sed 's/.*\(--trusted-ca-file=[^ ]*\).*/\1/'
|
||||
oc exec -n openshift-etcd -c etcd "$POD_NAME" -- ps -o command= -C etcd | sed 's/.*\(--peer-trusted-ca-file=[^ ]*\).*/\1/'
|
||||
fi
|
||||
use_multiple_values: true
|
||||
tests:
|
||||
test_items:
|
||||
- flag: "file"
|
||||
compare:
|
||||
op: regex
|
||||
value: '\/etc\/kubernetes\/static-pod-certs\/configmaps\/(?:etcd-(?:serving|peer-client)-ca\/ca-bundle\.crt|etcd-all-bundles\/server-ca-bundle\.crt)'
|
||||
remediation: |
|
||||
None required. Certificates for etcd are managed by the OpenShift cluster etcd operator.
|
||||
scored: true
|
||||
1285
cfg/rh-1.8/master.yaml
Normal file
1285
cfg/rh-1.8/master.yaml
Normal file
File diff suppressed because it is too large
Load Diff
485
cfg/rh-1.8/node.yaml
Normal file
485
cfg/rh-1.8/node.yaml
Normal file
@@ -0,0 +1,485 @@
|
||||
---
|
||||
controls:
|
||||
version: rh-1.8
|
||||
id: 4
|
||||
text: "Worker Nodes"
|
||||
type: "node"
|
||||
groups:
|
||||
- id: 4.1
|
||||
text: "Worker Node Configuration Files"
|
||||
checks:
|
||||
- id: 4.1.1
|
||||
text: "Ensure that the kubelet service file permissions are set to 644 or more restrictive (Automated)"
|
||||
audit: |
|
||||
NODE_NAME=$(oc get pod $HOSTNAME -o=jsonpath='{.spec.nodeName}')
|
||||
oc debug node/$NODE_NAME -- chroot /host stat -c "$NODE_NAME %n permissions=%a" /etc/systemd/system/kubelet.service 2> /dev/null
|
||||
tests:
|
||||
test_items:
|
||||
- flag: "permissions"
|
||||
compare:
|
||||
op: bitmask
|
||||
value: "644"
|
||||
remediation: |
|
||||
By default, the kubelet service file has permissions of 644.
|
||||
scored: true
|
||||
|
||||
- id: 4.1.2
|
||||
text: "Ensure that the kubelet service file ownership is set to root:root (Automated)"
|
||||
audit: |
|
||||
# Should return root:root for each node
|
||||
NODE_NAME=$(oc get pod $HOSTNAME -o=jsonpath='{.spec.nodeName}')
|
||||
oc debug node/$NODE_NAME -- chroot /host stat -c "$NODE_NAME %n %U:%G" /etc/systemd/system/kubelet.service 2> /dev/null
|
||||
tests:
|
||||
test_items:
|
||||
- flag: root:root
|
||||
remediation: |
|
||||
By default, the kubelet service file has ownership of root:root.
|
||||
scored: true
|
||||
|
||||
- id: 4.1.3
|
||||
text: "If proxy kube proxy configuration file exists ensure permissions are set to 644 or more restrictive (Manual)"
|
||||
audit: |
|
||||
# Get the node name where the pod is running
|
||||
NODE_NAME=$(oc get pod "$HOSTNAME" -o=jsonpath='{.spec.nodeName}')
|
||||
# Get the pod name in the openshift-sdn namespace
|
||||
POD_NAME=$(oc get pods -n openshift-sdn -l app=sdn --field-selector spec.nodeName="$NODE_NAME" -o jsonpath='{.items[0].metadata.name}' 2>/dev/null)
|
||||
|
||||
if [ -z "$POD_NAME" ]; then
|
||||
echo "No matching pods found on the current node."
|
||||
else
|
||||
# Execute the stat command
|
||||
oc exec -n openshift-sdn "$POD_NAME" -- stat -Lc "$i %n permissions=%a" /config/kube-proxy-config.yaml 2>/dev/null
|
||||
fi
|
||||
tests:
|
||||
test_items:
|
||||
- flag: "permissions"
|
||||
set: true
|
||||
compare:
|
||||
op: bitmask
|
||||
value: "644"
|
||||
remediation: |
|
||||
None needed.
|
||||
scored: true
|
||||
|
||||
- id: 4.1.4
|
||||
text: "If proxy kubeconfig file exists ensure ownership is set to root:root (Manual)"
|
||||
audit: |
|
||||
# Get the node name where the pod is running
|
||||
NODE_NAME=$(oc get pod "$HOSTNAME" -o=jsonpath='{.spec.nodeName}')
|
||||
# Get the pod name in the openshift-sdn namespace
|
||||
POD_NAME=$(oc get pods -n openshift-sdn -l app=sdn --field-selector spec.nodeName="$NODE_NAME" -o jsonpath='{.items[0].metadata.name}' 2>/dev/null)
|
||||
|
||||
if [ -z "$POD_NAME" ]; then
|
||||
echo "No matching pods found on the current node."
|
||||
else
|
||||
# Execute the stat command
|
||||
oc exec -n openshift-sdn "$POD_NAME" -- stat -Lc "$i %n %U:%G" /config/kube-proxy-config.yaml 2>/dev/null
|
||||
fi
|
||||
use_multiple_values: true
|
||||
tests:
|
||||
test_items:
|
||||
- flag: root:root
|
||||
remediation: |
|
||||
None required. The configuration is managed by OpenShift operators.
|
||||
scored: true
|
||||
|
||||
- id: 4.1.5
|
||||
text: "Ensure that the --kubeconfig kubelet.conf file permissions are set to 644 or more restrictive (Manual)"
|
||||
audit: |
|
||||
# Check permissions
|
||||
NODE_NAME=$(oc get pod $HOSTNAME -o=jsonpath='{.spec.nodeName}')
|
||||
oc debug node/$NODE_NAME -- chroot /host stat -c "$NODE_NAME %n permissions=%a" /etc/kubernetes/kubelet.conf 2> /dev/null
|
||||
use_multiple_values: true
|
||||
tests:
|
||||
test_items:
|
||||
- flag: "permissions"
|
||||
compare:
|
||||
op: bitmask
|
||||
value: "644"
|
||||
remediation: |
|
||||
None required.
|
||||
scored: true
|
||||
|
||||
- id: 4.1.6
|
||||
text: "Ensure that the --kubeconfig kubelet.conf file ownership is set to root:root (Manual)"
|
||||
audit: |
|
||||
NODE_NAME=$(oc get pod $HOSTNAME -o=jsonpath='{.spec.nodeName}')
|
||||
oc debug node/$NODE_NAME -- chroot /host stat -c "$NODE_NAME %n %U:%G" /etc/kubernetes/kubelet.conf 2> /dev/null
|
||||
use_multiple_values: true
|
||||
tests:
|
||||
test_items:
|
||||
- flag: root:root
|
||||
remediation: |
|
||||
None required.
|
||||
scored: true
|
||||
|
||||
- id: 4.1.7
|
||||
text: "Ensure that the certificate authorities file permissions are set to 644 or more restrictive"
|
||||
audit: |
|
||||
NODE_NAME=$(oc get pod $HOSTNAME -o=jsonpath='{.spec.nodeName}')
|
||||
oc get --raw /api/v1/nodes/$NODE_NAME/proxy/configz | jq '.kubeletconfig.authentication.x509.clientCAFile'
|
||||
oc debug node/$NODE_NAME -- chroot /host stat -c "$NODE_NAME permissions=%a" /etc/kubernetes/kubelet-ca.crt 2> /dev/null
|
||||
tests:
|
||||
test_items:
|
||||
- flag: "/etc/kubernetes/kubelet-ca.crt"
|
||||
- flag: "permissions"
|
||||
compare:
|
||||
op: bitmask
|
||||
value: "644"
|
||||
remediation: |
|
||||
No remediation required. OpenShift sets /etc/kubernetes/kubelet-ca.crt to 644 by default.
|
||||
If permissions are more permissive than 644, update with: chmod 644 /etc/kubernetes/kubelet-ca.crt
|
||||
scored: true
|
||||
|
||||
|
||||
- id: 4.1.8
|
||||
text: "Ensure that the client certificate authorities file ownership is set to root:root (Automated)"
|
||||
audit: |
|
||||
NODE_NAME=$(oc get pod $HOSTNAME -o=jsonpath='{.spec.nodeName}')
|
||||
oc debug node/$NODE_NAME -- chroot /host stat -c "$NODE_NAME %n %U:%G" /etc/kubernetes/kubelet-ca.crt 2> /dev/null
|
||||
use_multiple_values: true
|
||||
tests:
|
||||
test_items:
|
||||
- flag: root:root
|
||||
remediation: |
|
||||
None required.
|
||||
scored: true
|
||||
|
||||
- id: 4.1.9
|
||||
text: "Ensure that the kubelet --config configuration file has permissions set to 600 or more restrictive (Automated)"
|
||||
audit: |
|
||||
NODE_NAME=$(oc get pod $HOSTNAME -o=jsonpath='{.spec.nodeName}')
|
||||
oc debug node/$NODE_NAME -- chroot /host stat -c "$NODE_NAME %n permissions=%a" /var/data/kubelet/config.json 2> /dev/null
|
||||
use_multiple_values: true
|
||||
tests:
|
||||
test_items:
|
||||
- flag: "permissions"
|
||||
compare:
|
||||
op: bitmask
|
||||
value: "600"
|
||||
remediation: |
|
||||
None required.
|
||||
scored: true
|
||||
|
||||
- id: 4.1.10
|
||||
text: "Ensure that the kubelet configuration file ownership is set to root:root (Automated)"
|
||||
audit: |
|
||||
NODE_NAME=$(oc get pod $HOSTNAME -o=jsonpath='{.spec.nodeName}')
|
||||
oc debug node/$NODE_NAME -- chroot /host stat -c "$NODE_NAME %n %U:%G" /var/data/kubelet/config.json 2> /dev/null
|
||||
use_multiple_values: true
|
||||
tests:
|
||||
test_items:
|
||||
- flag: root:root
|
||||
remediation: |
|
||||
None required.
|
||||
scored: true
|
||||
|
||||
- id: 4.2
|
||||
text: "Kubelet"
|
||||
checks:
|
||||
- id: 4.2.1
|
||||
text: "Activate Garbage collection in OpenShift Container Platform 4, as appropriate (Manual)"
|
||||
audit: |
|
||||
echo "Retrieving and inspecting garbage collection configuration from node-local kubelet configz..."
|
||||
|
||||
NODE_NAME=$(oc get pod $HOSTNAME -o=jsonpath='{.spec.nodeName}')
|
||||
oc get --raw /api/v1/nodes/$NODE_NAME/proxy/configz | jq '.kubeletconfig'
|
||||
tests:
|
||||
test_items:
|
||||
- flag: "evictionHard"
|
||||
- flag: "imageGCHighThresholdPercent"
|
||||
- flag: "imageGCLowThresholdPercent"
|
||||
- flag: "imageMinimumGCAge"
|
||||
remediation: |
|
||||
OpenShift manages node garbage collection through KubeletConfig custom resources per MachineConfigPool.
|
||||
To configure or adjust garbage collection thresholds, follow the documentation:
|
||||
https://docs.openshift.com/container-platform/latest/nodes/nodes/nodes-nodes-garbage-collection.html
|
||||
|
||||
Example: Create or modify a KubeletConfig object to include:
|
||||
---
|
||||
evictionHard:
|
||||
"memory.available": "200Mi"
|
||||
"nodefs.available": "10%"
|
||||
"imagefs.available": "15%"
|
||||
imageGCHighThresholdPercent: 85
|
||||
imageGCLowThresholdPercent: 80
|
||||
imageMinimumGCAge: "2m0s"
|
||||
|
||||
Then apply the `KubeletConfig` to the appropriate `MachineConfigPool`.
|
||||
scored: true
|
||||
|
||||
- id: 4.2.2
|
||||
text: "Ensure that the --anonymous-auth argument is set to false (Automated)"
|
||||
audit: |
|
||||
echo "Checking if anonymous-auth is disabled in kubelet configuration on the current node..."
|
||||
|
||||
NODE_NAME=$(oc get pod $HOSTNAME -o=jsonpath='{.spec.nodeName}')
|
||||
oc get --raw /api/v1/nodes/$NODE_NAME/proxy/configz | jq '.kubeletconfig.authentication.anonymous.enabled'
|
||||
tests:
|
||||
test_items:
|
||||
- flag: "false"
|
||||
remediation: |
|
||||
By default, OpenShift sets anonymous-auth to false in Kubelet configuration.
|
||||
If this value is found to be true, create or patch a KubeletConfig object with:
|
||||
|
||||
---
|
||||
kind: KubeletConfig
|
||||
apiVersion: machineconfiguration.openshift.io/v1
|
||||
metadata:
|
||||
name: disable-anonymous-auth
|
||||
spec:
|
||||
kubeletConfig:
|
||||
authentication:
|
||||
anonymous:
|
||||
enabled: false
|
||||
|
||||
Then apply this KubeletConfig to the appropriate MachineConfigPool.
|
||||
See OpenShift documentation on configuring node-level security settings.
|
||||
scored: true
|
||||
|
||||
- id: 4.2.3
|
||||
text: "Ensure that the --authorization-mode argument is not set to AlwaysAllow (Automated)"
|
||||
audit: |
|
||||
echo "Checking kubelet authorization mode on the current node..."
|
||||
|
||||
NODE_NAME=$(oc get pod $HOSTNAME -o=jsonpath='{.spec.nodeName}')
|
||||
oc get --raw /api/v1/nodes/$NODE_NAME/proxy/configz | jq '.kubeletconfig.authorization.mode'
|
||||
tests:
|
||||
test_items:
|
||||
- flag: AlwaysAllow
|
||||
set: false
|
||||
remediation: |
|
||||
No remediation required. By default, OpenShift uses secure authorization modes such as 'Webhook' and does not allow AlwaysAllow.
|
||||
If AlwaysAllow is found, the node must be reconfigured using a KubeletConfig applied through the appropriate MachineConfigPool.
|
||||
scored: true
|
||||
|
||||
|
||||
- id: 4.2.4
|
||||
text: "Ensure that the --client-ca-file argument is set as appropriate (Automated)"
|
||||
audit: |
|
||||
echo "Checking Kubelet 'clientCAFile' setting on current node..."
|
||||
|
||||
NODE_NAME=$(oc get pod $HOSTNAME -o=jsonpath='{.spec.nodeName}')
|
||||
oc get --raw /api/v1/nodes/$NODE_NAME/proxy/configz \
|
||||
| jq '.kubeletconfig.authentication.x509.clientCAFile'
|
||||
tests:
|
||||
test_items:
|
||||
- flag: "/etc/kubernetes/kubelet-ca.crt"
|
||||
remediation: |
|
||||
No remediation required. OpenShift sets the clientCAFile by default to /etc/kubernetes/kubelet-ca.crt.
|
||||
Manual modification is unsupported and unnecessary as OpenShift manages Kubelet certificate authentication via the Machine Config Operator.
|
||||
scored: true
|
||||
|
||||
|
||||
- id: 4.2.5
|
||||
text: "Verify that the read only port is not used or is set to 0 (Automated)"
|
||||
audit: |
|
||||
echo "Checking 'kubelet-read-only-port' argument in openshift-kube-apiserver config..."
|
||||
|
||||
oc -n openshift-kube-apiserver get configmap config -o json \
|
||||
| jq -r '.data["config.yaml"]' \
|
||||
| yq '.apiServerArguments."kubelet-read-only-port"[0]'
|
||||
tests:
|
||||
test_items:
|
||||
- flag: "0"
|
||||
remediation: |
|
||||
No remediation is required if the read-only port is set to 0.
|
||||
If this value is not set to 0 (or the argument is missing), create a KubeletConfig object and apply it to the appropriate MachineConfigPool to disable the read-only port.
|
||||
|
||||
Example KubeletConfig:
|
||||
---
|
||||
apiVersion: machineconfiguration.openshift.io/v1
|
||||
kind: KubeletConfig
|
||||
metadata:
|
||||
name: disable-readonly-port
|
||||
spec:
|
||||
kubeletConfig:
|
||||
readOnlyPort: 0
|
||||
scored: true
|
||||
|
||||
|
||||
- id: 4.2.6
|
||||
text: "Ensure that the --streaming-connection-idle-timeout argument is not set to 0 (automated)"
|
||||
audit: |
|
||||
NODE_NAME=$(oc get pod $HOSTNAME -o=jsonpath='{.spec.nodeName}')
|
||||
oc get --raw /api/v1/nodes/$NODE_NAME/proxy/configz \
|
||||
| jq '.kubeletconfig'
|
||||
tests:
|
||||
test_items:
|
||||
- path: ".streamingConnectionIdleTimeout"
|
||||
compare:
|
||||
op: noteq
|
||||
value: "0s"
|
||||
remediation: |
|
||||
By default, OpenShift sets streamingConnectionIdleTimeout to 4h0m0s.
|
||||
If it is manually set to "0s", this disables timeouts — which is insecure.
|
||||
|
||||
To remediate, create a `KubeletConfig` CR with a safer timeout (e.g., 1h0m0s):
|
||||
---
|
||||
apiVersion: machineconfiguration.openshift.io/v1
|
||||
kind: KubeletConfig
|
||||
metadata:
|
||||
name: set-streaming-timeout
|
||||
spec:
|
||||
kubeletConfig:
|
||||
streamingConnectionIdleTimeout: "1h0m0s"
|
||||
scored: true
|
||||
|
||||
- id: 4.2.7
|
||||
text: "Ensure that the --make-iptables-util-chains argument is set to true (manual)"
|
||||
audit: |
|
||||
echo "Checking 'makeIPTablesUtilChains' setting in Kubelet config on current node..."
|
||||
|
||||
NODE_NAME=$(oc get pod $HOSTNAME -o=jsonpath='{.spec.nodeName}')
|
||||
oc get --raw /api/v1/nodes/$NODE_NAME/proxy/configz \
|
||||
| jq '.kubeletconfig'
|
||||
tests:
|
||||
test_items:
|
||||
- path: ".makeIPTablesUtilChains"
|
||||
compare:
|
||||
op: eq
|
||||
value: true
|
||||
remediation: |
|
||||
No remediation is required.
|
||||
By default, OpenShift sets makeIPTablesUtilChains to true.
|
||||
This allows Kubelet to manage iptables rules and keep them in sync with the dynamic pod network configuration.
|
||||
scored: true
|
||||
|
||||
|
||||
- id: 4.2.8
|
||||
text: "Ensure that the kubeAPIQPS [--event-qps] argument is set to 0 or a level which ensures appropriate event capture (manual)"
|
||||
audit: |
|
||||
echo "Checking 'kubeAPIQPS' setting in Kubelet config on current node..."
|
||||
|
||||
NODE_NAME=$(oc get pod $HOSTNAME -o=jsonpath='{.spec.nodeName}')
|
||||
oc get --raw /api/v1/nodes/$NODE_NAME/proxy/configz \
|
||||
| jq '.kubeletconfig'
|
||||
tests:
|
||||
test_items:
|
||||
- path: ".kubeAPIQPS"
|
||||
compare:
|
||||
op: gte
|
||||
value: 1
|
||||
remediation: |
|
||||
OpenShift sets kubeAPIQPS to a default of 50, which is appropriate in most environments.
|
||||
If kubeAPIQPS is set to 0, event rate limiting is disabled, which can overwhelm the kubelet with excessive events.
|
||||
|
||||
To configure a proper limit, create or modify a `KubeletConfig` resource with an appropriate value:
|
||||
|
||||
---
|
||||
apiVersion: machineconfiguration.openshift.io/v1
|
||||
kind: KubeletConfig
|
||||
metadata:
|
||||
name: set-kubeapiqps
|
||||
spec:
|
||||
kubeletConfig:
|
||||
kubeAPIQPS: 50
|
||||
scored: true
|
||||
|
||||
|
||||
- id: 4.2.9
|
||||
text: "Ensure that the --tls-cert-file and --tls-private-key-file arguments are set as appropriate (manual)"
|
||||
audit: |
|
||||
oc get configmap config -n openshift-kube-apiserver -ojson \
|
||||
| jq -r '.data["config.yaml"]' \
|
||||
| jq -r '.apiServerArguments["kubelet-client-certificate"][]?'
|
||||
|
||||
oc get configmap config -n openshift-kube-apiserver -ojson \
|
||||
| jq -r '.data["config.yaml"]' \
|
||||
| jq -r '.apiServerArguments["kubelet-client-key"][]?'
|
||||
tests:
|
||||
bin_op: and
|
||||
test_items:
|
||||
- flag: "/etc/kubernetes/static-pod-certs/secrets/kubelet-client/tls.crt"
|
||||
- flag: "/etc/kubernetes/static-pod-certs/secrets/kubelet-client/tls.key"
|
||||
remediation: |
|
||||
No remediation is required. OpenShift manages secure TLS connections to kubelets by default using its internal certificate authority.
|
||||
These X.509 certificates are rotated and validated automatically by the platform.
|
||||
Manual modifications to the TLS paths or keys are not supported and can lead to cluster issues.
|
||||
scored: true
|
||||
|
||||
|
||||
- id: 4.2.10
|
||||
text: "Ensure that the --rotate-certificates argument is not set to false (manual)"
|
||||
audit: |
|
||||
NODE_NAME=$(oc get pod $HOSTNAME -o=jsonpath='{.spec.nodeName}')
|
||||
oc get --raw /api/v1/nodes/$NODE_NAME/proxy/configz \
|
||||
| jq '.kubeletconfig'
|
||||
tests:
|
||||
test_items:
|
||||
- path: ".rotateCertificates"
|
||||
compare:
|
||||
op: eq
|
||||
value: true
|
||||
remediation: |
|
||||
No remediation required. By default, OpenShift enables certificate rotation via rotateCertificates=true.
|
||||
If disabled, you must either enable rotation via KubeletConfig or implement external certificate renewal.
|
||||
|
||||
Example remediation using KubeletConfig:
|
||||
---
|
||||
apiVersion: machineconfiguration.openshift.io/v1
|
||||
kind: KubeletConfig
|
||||
metadata:
|
||||
name: enable-cert-rotation
|
||||
spec:
|
||||
kubeletConfig:
|
||||
rotateCertificates: true
|
||||
scored: true
|
||||
|
||||
- id: 4.2.11
|
||||
text: "Verify that the RotateKubeletServerCertificate argument is set to true (manual)"
|
||||
audit: |
|
||||
echo "Checking that RotateKubeletServerCertificate is enabled in kubelet config on current node..."
|
||||
|
||||
NODE_NAME=$(oc get pod $HOSTNAME -o=jsonpath='{.spec.nodeName}')
|
||||
|
||||
echo "Verifying feature gate: RotateKubeletServerCertificate"
|
||||
oc get --raw /api/v1/nodes/$NODE_NAME/proxy/configz \
|
||||
| jq '.kubeletconfig.featureGates.RotateKubeletServerCertificate'
|
||||
|
||||
echo "Verifying that certificate rotation is enabled"
|
||||
oc get --raw /api/v1/nodes/$NODE_NAME/proxy/configz \
|
||||
| jq '.kubeletconfig.rotateCertificates'
|
||||
tests:
|
||||
bin_op: and
|
||||
test_items:
|
||||
- flag: "RotateKubeletServerCertificate"
|
||||
compare:
|
||||
op: eq
|
||||
value: true
|
||||
- flag: "rotateCertificates"
|
||||
compare:
|
||||
op: eq
|
||||
value: true
|
||||
remediation: |
|
||||
No remediation is required. OpenShift enables RotateKubeletServerCertificate by default and manages certificate rotation automatically.
|
||||
If the feature gate or rotation setting is disabled, configure a `KubeletConfig` CR and apply it to the MachineConfigPool:
|
||||
|
||||
---
|
||||
apiVersion: machineconfiguration.openshift.io/v1
|
||||
kind: KubeletConfig
|
||||
metadata:
|
||||
name: enable-server-cert-rotation
|
||||
spec:
|
||||
kubeletConfig:
|
||||
rotateCertificates: true
|
||||
featureGates:
|
||||
RotateKubeletServerCertificate: true
|
||||
scored: true
|
||||
|
||||
- id: 4.2.13
|
||||
text: "Ensure that the Kubelet only makes use of Strong Cryptographic Ciphers (Manual)"
|
||||
audit: |
|
||||
# needs verification
|
||||
# verify cipher suites
|
||||
oc describe --namespace=openshift-ingress-operator ingresscontroller/default
|
||||
oc get kubeapiservers.operator.openshift.io cluster -o json |jq .spec.observedConfig.servingInfo
|
||||
oc get openshiftapiservers.operator.openshift.io cluster -o json |jq .spec.observedConfig.servingInfo
|
||||
oc get cm -n openshift-authentication v4-0-config-system-cliconfig -o jsonpath='{.data.v4\-0\-config\-system\-cliconfig}' | jq .servingInfo
|
||||
#check value for tlsSecurityProfile; null is returned if default is used
|
||||
oc get kubeapiservers.operator.openshift.io cluster -o json |jq .spec.tlsSecurityProfile
|
||||
type: manual
|
||||
remediation: |
|
||||
Follow the directions above and in the OpenShift documentation to configure the tlsSecurityProfile.
|
||||
Configuring Ingress
|
||||
scored: false
|
||||
486
cfg/rh-1.8/policies.yaml
Normal file
486
cfg/rh-1.8/policies.yaml
Normal file
@@ -0,0 +1,486 @@
|
||||
---
|
||||
controls:
|
||||
version: rh-1.8
|
||||
id: 5
|
||||
text: "Policies"
|
||||
type: "policies"
|
||||
groups:
|
||||
- id: 5.1
|
||||
text: "RBAC and Service Accounts"
|
||||
checks:
|
||||
- id: 5.1.1
|
||||
text: "Ensure that the cluster-admin role is only used where required (Manual)"
|
||||
type: "manual"
|
||||
audit: |
|
||||
#To get a list of users and service accounts with the cluster-admin role
|
||||
oc get clusterrolebindings -o=customcolumns=NAME:.metadata.name,ROLE:.roleRef.name,SUBJECT:.subjects[*].kind |
|
||||
grep cluster-admin
|
||||
#To verity that kbueadmin is removed, no results should be returned
|
||||
oc get secrets kubeadmin -n kube-system
|
||||
remediation: |
|
||||
Identify all clusterrolebindings to the cluster-admin role. Check if they are used and if they need this role or if they could use a role with fewer privileges.
|
||||
Where possible, first bind users to a lower privileged role and then remove the clusterrolebinding to the cluster-admin role :
|
||||
oc delete clusterrolebinding [name]
|
||||
scored: false
|
||||
|
||||
- id: 5.1.2
|
||||
text: "Minimize access to secrets (Manual)"
|
||||
type: "manual"
|
||||
remediation: |
|
||||
Where possible, remove get, list and watch access to secret objects in the cluster.
|
||||
scored: false
|
||||
|
||||
- id: 5.1.3
|
||||
text: "Minimize wildcard use in Roles and ClusterRoles (Manual)"
|
||||
type: "manual"
|
||||
remediation: |
|
||||
Where possible replace any use of wildcards in clusterroles and roles with specific
|
||||
objects or actions.
|
||||
scored: false
|
||||
|
||||
- id: 5.1.4
|
||||
text: "Minimize access to create pods (Manual)"
|
||||
type: "manual"
|
||||
remediation: |
|
||||
Where possible, remove create access to pod objects in the cluster.
|
||||
scored: false
|
||||
|
||||
- id: 5.1.5
|
||||
text: "Ensure that default service accounts are not actively used. (Manual)"
|
||||
type: "manual"
|
||||
remediation: |
|
||||
None required.
|
||||
scored: false
|
||||
|
||||
- id: 5.1.6
|
||||
text: "Ensure that Service Account Tokens are only mounted where necessary (Manual)"
|
||||
type: "manual"
|
||||
remediation: |
|
||||
Modify the definition of pods and service accounts which do not need to mount service
|
||||
account tokens to disable it.
|
||||
scored: false
|
||||
|
||||
- id: 5.2
|
||||
text: "Security Context Constraints (SCCs)"
|
||||
checks:
|
||||
- id: 5.2.1
|
||||
text: "Minimize the admission of privileged containers (Manual)"
|
||||
audit: |
|
||||
oc get scc -o json \
|
||||
| jq -r '[.items[] | select(.allowPrivilegedContainer==false) | .metadata.name]
|
||||
| length
|
||||
| if . > 0 then "pass" else "fail" end'
|
||||
tests:
|
||||
test_items:
|
||||
- flag: "pass"
|
||||
remediation: |
|
||||
If no SCCs exist that restrict privileged containers, create one by running:
|
||||
|
||||
oc create -f - <<EOF
|
||||
apiVersion: security.openshift.io/v1
|
||||
kind: SecurityContextConstraints
|
||||
metadata:
|
||||
name: restricted-no-priv
|
||||
allowPrivilegedContainer: false
|
||||
runAsUser:
|
||||
type: MustRunAsRange
|
||||
seLinuxContext:
|
||||
type: MustRunAs
|
||||
users: []
|
||||
groups:
|
||||
- system:authenticated
|
||||
EOF
|
||||
|
||||
Then apply appropriate RBAC to assign this SCC only to necessary service accounts, groups, or users.
|
||||
Carefully avoid assigning `allowPrivilegedContainer: true` in any SCC that is broadly bound.
|
||||
scored: true
|
||||
|
||||
- id: 5.2.2
|
||||
text: "Minimize the admission of containers wishing to share the host process ID namespace (Manual)"
|
||||
audit: |
|
||||
oc get scc -o json \
|
||||
| jq -r '[.items[] | select(.allowHostPID==true) | .metadata.name]
|
||||
| length
|
||||
| if . > 0 then "pass" else "fail" end'
|
||||
tests:
|
||||
test_items:
|
||||
- flag: "pass"
|
||||
remediation: |
|
||||
If SCCs with `allowHostPID: true` exist, ensure they are restricted to trusted service accounts only.
|
||||
|
||||
To create a restrictive SCC that prevents host PID sharing:
|
||||
|
||||
---
|
||||
apiVersion: security.openshift.io/v1
|
||||
kind: SecurityContextConstraints
|
||||
metadata:
|
||||
name: restricted-no-hostpid
|
||||
allowHostPID: false
|
||||
runAsUser:
|
||||
type: MustRunAsRange
|
||||
seLinuxContext:
|
||||
type: MustRunAs
|
||||
users: []
|
||||
groups:
|
||||
- system:authenticated
|
||||
---
|
||||
|
||||
Apply the SCC and bind it only to users or groups that do **not** need hostPID access.
|
||||
scored: true
|
||||
|
||||
- id: 5.2.3
|
||||
text: "Minimize the admission of containers wishing to share the host IPC namespace (Manual)"
|
||||
audit: |
|
||||
oc get scc -o json \
|
||||
| jq -r '[.items[] | select(.allowHostIPC==false) | .metadata.name]
|
||||
| length
|
||||
| if . > 0 then "pass" else "fail" end'
|
||||
tests:
|
||||
test_items:
|
||||
- flag: "pass"
|
||||
remediation: |
|
||||
If no SCCs restrict hostIPC usage, create one that explicitly sets allowHostIPC: false:
|
||||
|
||||
---
|
||||
apiVersion: security.openshift.io/v1
|
||||
kind: SecurityContextConstraints
|
||||
metadata:
|
||||
name: restricted-no-hostipc
|
||||
allowHostIPC: false
|
||||
runAsUser:
|
||||
type: MustRunAsRange
|
||||
seLinuxContext:
|
||||
type: MustRunAs
|
||||
users: []
|
||||
groups:
|
||||
- system:authenticated
|
||||
---
|
||||
|
||||
Then assign this SCC to general workloads and ensure any SCCs allowing hostIPC are tightly scoped via RBAC.
|
||||
scored: true
|
||||
|
||||
- id: 5.2.4
|
||||
text: "Minimize the admission of containers wishing to share the host network namespace (manual)"
|
||||
audit: |
|
||||
oc get scc -o json \
|
||||
| jq -r '[.items[] | select(.allowHostNetwork==false) | .metadata.name]
|
||||
| length
|
||||
| if . > 0 then "pass" else "fail" end'
|
||||
tests:
|
||||
test_items:
|
||||
- flag: "pass"
|
||||
remediation: |
|
||||
If no SCCs restrict host networking, create one by running:
|
||||
|
||||
---
|
||||
apiVersion: security.openshift.io/v1
|
||||
kind: SecurityContextConstraints
|
||||
metadata:
|
||||
name: restricted-no-hostnetwork
|
||||
allowHostNetwork: false
|
||||
runAsUser:
|
||||
type: MustRunAsRange
|
||||
seLinuxContext:
|
||||
type: MustRunAs
|
||||
users: []
|
||||
groups:
|
||||
- system:authenticated
|
||||
---
|
||||
|
||||
Ensure only workloads that require `hostNetwork: true` (e.g., CNI, infra pods) are allowed to use SCCs where it is explicitly enabled. Restrict access to such SCCs using RBAC.
|
||||
scored: true
|
||||
|
||||
- id: 5.2.5
|
||||
text: "Minimize the admission of containers with allowPrivilegeEscalation (manual)"
|
||||
audit: |
|
||||
oc get scc -o json \
|
||||
| jq -r '[.items[] | select(.allowPrivilegeEscalation==false) | .metadata.name]
|
||||
| length
|
||||
| if . > 0 then "pass" else "fail" end'
|
||||
tests:
|
||||
test_items:
|
||||
- flag: "pass"
|
||||
remediation: |
|
||||
If no SCCs exist that restrict the use of privilege escalation, create a custom SCC:
|
||||
|
||||
---
|
||||
apiVersion: security.openshift.io/v1
|
||||
kind: SecurityContextConstraints
|
||||
metadata:
|
||||
name: restricted-no-priv-escalation
|
||||
allowPrivilegeEscalation: false
|
||||
runAsUser:
|
||||
type: MustRunAsRange
|
||||
seLinuxContext:
|
||||
type: MustRunAs
|
||||
users: []
|
||||
groups:
|
||||
- system:authenticated
|
||||
---
|
||||
|
||||
Assign this SCC only to workloads and users that **do not require** the ability to escalate privileges.
|
||||
Use RBAC to restrict access to SCCs where `allowPrivilegeEscalation` is `true` to only trusted service accounts or admin roles.
|
||||
scored: true
|
||||
|
||||
|
||||
- id: 5.2.6
|
||||
text: "Minimize the admission of root containers (manual)"
|
||||
audit: |
|
||||
sccs=$(oc get scc -o json | jq -r '.items[] | select(.runAsUser.type == "MustRunAsNonRoot") | .metadata.name')
|
||||
if [[ -n "$sccs" ]]; then
|
||||
echo "pass"
|
||||
else
|
||||
echo "fail"
|
||||
fi
|
||||
tests:
|
||||
test_items:
|
||||
- flag: "pass"
|
||||
remediation: |
|
||||
If no SCC is found with `runAsUser.type: MustRunAsNonRoot`, create one as follows:
|
||||
|
||||
---
|
||||
apiVersion: security.openshift.io/v1
|
||||
kind: SecurityContextConstraints
|
||||
metadata:
|
||||
name: restricted-nonroot
|
||||
allowPrivilegeEscalation: false
|
||||
runAsUser:
|
||||
type: MustRunAsNonRoot
|
||||
seLinuxContext:
|
||||
type: MustRunAs
|
||||
users: []
|
||||
groups:
|
||||
- system:authenticated
|
||||
---
|
||||
|
||||
Assign this SCC only to workloads that must not run as root.
|
||||
If an SCC allows `RunAsAny`, audit and restrict access using RBAC to prevent misuse.
|
||||
scored: true
|
||||
|
||||
- id: 5.2.7
|
||||
text: "Minimize the admission of containers with the NET_RAW capability (manual)"
|
||||
audit: |
|
||||
oc get scc -o json \
|
||||
| jq -r '[.items[]
|
||||
| select((.requiredDropCapabilities // []) | index("ALL"))
|
||||
| .metadata.name]
|
||||
| length
|
||||
| if . > 0 then "pass" else "fail" end'
|
||||
tests:
|
||||
test_items:
|
||||
- flag: "pass"
|
||||
remediation: |
|
||||
If no SCCs drop ALL capabilities, create a custom SCC that explicitly drops NET_RAW:
|
||||
|
||||
---
|
||||
apiVersion: security.openshift.io/v1
|
||||
kind: SecurityContextConstraints
|
||||
metadata:
|
||||
name: restricted-no-netraw
|
||||
requiredDropCapabilities:
|
||||
- NET_RAW
|
||||
allowPrivilegedContainer: false
|
||||
runAsUser:
|
||||
type: MustRunAsRange
|
||||
seLinuxContext:
|
||||
type: MustRunAs
|
||||
users: []
|
||||
groups:
|
||||
- system:authenticated
|
||||
---
|
||||
|
||||
Apply this SCC to workloads that do not require NET_RAW.
|
||||
If NET_RAW is required (e.g., for low-level networking apps), isolate those workloads with a specific SCC and restrict access via RBAC.
|
||||
scored: true
|
||||
|
||||
|
||||
- id: 5.2.8
|
||||
text: "Minimize the admission of containers with added capabilities (manual)"
|
||||
audit: |
|
||||
oc get scc -o json \
|
||||
| jq -r '[.items[]
|
||||
| select(.allowedCapabilities == null)
|
||||
| .metadata.name]
|
||||
| length
|
||||
| if . > 0 then "pass" else "fail" end'
|
||||
oc get scc -o json \
|
||||
| jq -r '[.items[]
|
||||
| select(.defaultAddCapabilities == null)
|
||||
| .metadata.name]
|
||||
| length
|
||||
| if . > 0 then "true" else "false" end'
|
||||
tests:
|
||||
test_items:
|
||||
- flag: "pass"
|
||||
- flag: "true"
|
||||
remediation: |
|
||||
If no SCCs restrict added capabilities, create a custom SCC as shown below:
|
||||
|
||||
---
|
||||
apiVersion: security.openshift.io/v1
|
||||
kind: SecurityContextConstraints
|
||||
metadata:
|
||||
name: restricted-no-added-caps
|
||||
allowPrivilegedContainer: false
|
||||
allowedCapabilities: []
|
||||
defaultAddCapabilities: []
|
||||
runAsUser:
|
||||
type: MustRunAsRange
|
||||
seLinuxContext:
|
||||
type: MustRunAs
|
||||
users: []
|
||||
groups:
|
||||
- system:authenticated
|
||||
---
|
||||
|
||||
Assign this SCC to workloads that do **not** require elevated capabilities.
|
||||
Create separate SCCs for workloads that require specific capabilities, and use RBAC to tightly restrict access to them.
|
||||
scored: true
|
||||
|
||||
- id: 5.2.9
|
||||
text: "Minimize the admission of containers with capabilities assigned (manual)"
|
||||
audit: |
|
||||
oc get scc -o json \
|
||||
| jq -r '[.items[]
|
||||
| select((.requiredDropCapabilities // []) | index("ALL"))
|
||||
| .metadata.name]
|
||||
| length
|
||||
| if . > 0 then "true" else "false" end'
|
||||
tests:
|
||||
test_items:
|
||||
- flag: "true"
|
||||
remediation: |
|
||||
If no SCCs drop all capabilities, create one that sets 'requiredDropCapabilities: [ALL]':
|
||||
|
||||
---
|
||||
apiVersion: security.openshift.io/v1
|
||||
kind: SecurityContextConstraints
|
||||
metadata:
|
||||
name: restricted-drop-all-capabilities
|
||||
requiredDropCapabilities:
|
||||
- ALL
|
||||
allowPrivilegedContainer: false
|
||||
runAsUser:
|
||||
type: MustRunAsRange
|
||||
seLinuxContext:
|
||||
type: MustRunAs
|
||||
users: []
|
||||
groups:
|
||||
- system:authenticated
|
||||
---
|
||||
|
||||
Apply this SCC to general-purpose workloads that do not require elevated Linux capabilities.
|
||||
If certain workloads require capabilities, create a separate SCC with minimal permissions and scope it using RBAC.
|
||||
scored: true
|
||||
|
||||
- id: 5.2.10
|
||||
text: "Minimize access to privileged Security Context Constraints (Manual)"
|
||||
type: "manual"
|
||||
remediation: |
|
||||
Remove any users and groups who do not need access to an SCC, following the
|
||||
principle of least privilege.
|
||||
You can remove users and groups from an SCC using the oc edit scc $NAME
|
||||
command.
|
||||
Additionally, you can create your own SCCs that contain the container functionality you
|
||||
need for a particular use case and assign that SCC to users and groups if the default
|
||||
SCCs are not appropriate for your use case.
|
||||
scored: false
|
||||
|
||||
|
||||
- id: 5.3
|
||||
text: "Network Policies and CNI"
|
||||
checks:
|
||||
- id: 5.3.1
|
||||
text: "Ensure that the CNI in use supports Network Policies (Manual)"
|
||||
type: "manual"
|
||||
remediation: |
|
||||
None required.
|
||||
scored: false
|
||||
|
||||
- id: 5.3.2
|
||||
text: "Ensure that all Namespaces have Network Policies defined (Manual)"
|
||||
type: "manual"
|
||||
audit: |
|
||||
#Run the following command and review the NetworkPolicy objects created in the cluster.
|
||||
oc -n all get networkpolicy
|
||||
remediation: |
|
||||
Follow the documentation and create NetworkPolicy objects as you need them.
|
||||
scored: false
|
||||
|
||||
- id: 5.4
|
||||
text: "Secrets Management"
|
||||
checks:
|
||||
- id: 5.4.1
|
||||
text: "Prefer using secrets as files over secrets as environment variables (Manual)"
|
||||
type: "manual"
|
||||
audit: |
|
||||
#Run the following command to find references to objects which use environment variables defined from secrets.
|
||||
oc get all -o jsonpath='{range .items[?(@..secretKeyRef)]} {.kind}
|
||||
{.metadata.name} {"\n"}{end}' -A
|
||||
remediation: |
|
||||
If possible, rewrite application code to read secrets from mounted secret files, rather than
|
||||
from environment variables.
|
||||
scored: false
|
||||
|
||||
- id: 5.4.2
|
||||
text: "Consider external secret storage (Manual)"
|
||||
type: "manual"
|
||||
remediation: |
|
||||
Refer to the secrets management options offered by your cloud provider or a third-party
|
||||
secrets management solution.
|
||||
scored: false
|
||||
|
||||
- id: 5.5
|
||||
text: "Extensible Admission Control"
|
||||
checks:
|
||||
- id: 5.5.1
|
||||
text: "Configure Image Provenance using image controller configuration parameters (Manual)"
|
||||
type: "manual"
|
||||
remediation: |
|
||||
Follow the OpenShift documentation: [Image configuration resources](https://docs.openshift.com/container-platform/4.5/openshift_images/image-configuration.html
|
||||
scored: false
|
||||
|
||||
- id: 5.7
|
||||
text: "General Policies"
|
||||
checks:
|
||||
- id: 5.7.1
|
||||
text: "Create administrative boundaries between resources using namespaces (Manual)"
|
||||
type: "manual"
|
||||
audit: |
|
||||
#Run the following command and review the namespaces created in the cluster.
|
||||
oc get namespaces
|
||||
#Ensure that these namespaces are the ones you need and are adequately administered as per your requirements.
|
||||
remediation: |
|
||||
Follow the documentation and create namespaces for objects in your deployment as you need
|
||||
them.
|
||||
scored: false
|
||||
|
||||
- id: 5.7.2
|
||||
text: "Ensure that the seccomp profile is set to docker/default in your pod definitions (Manual)"
|
||||
type: "manual"
|
||||
remediation: |
|
||||
To enable the default seccomp profile, use the reserved value /runtime/default that will
|
||||
make sure that the pod uses the default policy available on the host.
|
||||
scored: false
|
||||
|
||||
- id: 5.7.3
|
||||
text: "Apply Security Context to Your Pods and Containers (Manual)"
|
||||
type: "manual"
|
||||
remediation: |
|
||||
Follow the Kubernetes documentation and apply security contexts to your pods. For a
|
||||
suggested list of security contexts, you may refer to the CIS Security Benchmark for Docker
|
||||
Containers.
|
||||
scored: false
|
||||
|
||||
- id: 5.7.4
|
||||
text: "The default namespace should not be used (Manual)"
|
||||
type: "manual"
|
||||
audit: |
|
||||
#Run this command to list objects in default namespace
|
||||
oc project default
|
||||
oc get all
|
||||
#The only entries there should be system managed resources such as the kubernetes and openshift service
|
||||
remediation: |
|
||||
Ensure that namespaces are created to allow for appropriate segregation of Kubernetes
|
||||
resources and that all new resources are created in a specific namespace.
|
||||
scored: false
|
||||
@@ -98,14 +98,22 @@ groups:
|
||||
|
||||
- id: 1.1.7
|
||||
text: "Ensure that the etcd pod specification file permissions are set to 644 or more restrictive (Automated)"
|
||||
audit: "/bin/sh -c 'if test -e $etcdconf; then find $etcdconf -name '*etcd*' | xargs stat -c permissions=%a; fi'"
|
||||
use_multiple_values: true
|
||||
audit: |
|
||||
/bin/sh -c '
|
||||
if [ -e "$etcdconf" ]; then
|
||||
stat -c "permissions=%a %n" "$etcdconf"
|
||||
else
|
||||
echo "Directory not found"
|
||||
fi
|
||||
'
|
||||
tests:
|
||||
bin_op: or
|
||||
test_items:
|
||||
- flag: "permissions"
|
||||
compare:
|
||||
op: bitmask
|
||||
value: "644"
|
||||
- flag: "Directory not found"
|
||||
remediation: |
|
||||
Run the below command (based on the file location on your system) on the control plane node.
|
||||
For example,
|
||||
@@ -114,14 +122,23 @@ groups:
|
||||
|
||||
- id: 1.1.8
|
||||
text: "Ensure that the etcd pod specification file ownership is set to root:root (Automated)"
|
||||
audit: "/bin/sh -c 'if test -e $etcdconf; then stat -c %U:%G $etcdconf; fi'"
|
||||
audit: |
|
||||
/bin/sh -c '
|
||||
if [ -e $etcdconf ]; then
|
||||
stat -c "ownership=%U:%G %n" $etcdconf
|
||||
else
|
||||
echo "Directory not found"
|
||||
fi
|
||||
'
|
||||
tests:
|
||||
bin_op: or
|
||||
test_items:
|
||||
- flag: "root:root"
|
||||
- flag: "ownership"
|
||||
compare:
|
||||
op: eq
|
||||
value: "root:root"
|
||||
set: true
|
||||
- flag: "Directory not found"
|
||||
remediation: |
|
||||
Run the below command (based on the file location on your system) on the control plane node.
|
||||
For example,
|
||||
@@ -162,14 +179,21 @@ groups:
|
||||
|
||||
- id: 1.1.11
|
||||
text: "Ensure that the etcd data directory permissions are set to 700 or more restrictive (Automated)"
|
||||
audit: "stat -c permissions=%a /var/lib/rancher/rke2/server/db/etcd"
|
||||
audit: |
|
||||
/bin/sh -c 'if [ -d "/var/lib/rancher/rke2/server/db/etcd" ]; then
|
||||
stat -c "permissions=%a" "/var/lib/rancher/rke2/server/db/etcd"
|
||||
else
|
||||
echo "Directory not found"
|
||||
fi'
|
||||
tests:
|
||||
bin_op: or
|
||||
test_items:
|
||||
- flag: "permissions"
|
||||
compare:
|
||||
op: bitmask
|
||||
value: "700"
|
||||
set: true
|
||||
- flag: "Directory not found"
|
||||
remediation: |
|
||||
On the etcd server node, get the etcd data directory, passed as an argument --data-dir,
|
||||
from the command 'ps -ef | grep etcd'.
|
||||
|
||||
@@ -126,14 +126,21 @@ groups:
|
||||
|
||||
- id: 4.1.9
|
||||
text: "Ensure that the kubelet --config configuration file has permissions set to 644 or more restrictive (Automated)"
|
||||
audit: '/bin/sh -c ''if test -e $kubeletconf; then stat -c permissions=%a $kubeletconf; fi'' '
|
||||
audit: |
|
||||
/bin/sh -c 'if test -e "$kubeletconf"; then
|
||||
stat -c "permissions=%a" "$kubeletconf"
|
||||
else
|
||||
echo "File not found"
|
||||
fi'
|
||||
tests:
|
||||
bin_op: or
|
||||
test_items:
|
||||
- flag: "permissions"
|
||||
compare:
|
||||
op: bitmask
|
||||
value: "644"
|
||||
set: true
|
||||
- flag: "File not found"
|
||||
remediation: |
|
||||
Run the following command (using the config file location identified in the Audit step)
|
||||
chmod 644 $kubeletconf
|
||||
@@ -141,11 +148,18 @@ groups:
|
||||
|
||||
- id: 4.1.10
|
||||
text: "Ensure that the kubelet --config configuration file ownership is set to root:root (Automated)"
|
||||
audit: '/bin/sh -c ''if test -e $kubeletconf; then stat -c %U:%G $kubeletconf; fi'' '
|
||||
audit: |
|
||||
/bin/sh -c 'if test -e "$kubeletconf"; then
|
||||
stat -c "%U:%G" "$kubeletconf"
|
||||
else
|
||||
echo "File not found"
|
||||
fi'
|
||||
tests:
|
||||
bin_op: or
|
||||
test_items:
|
||||
- flag: root:root
|
||||
set: true
|
||||
- flag: File not found
|
||||
remediation: |
|
||||
Run the following command (using the config file location identified in the Audit step)
|
||||
chown root:root $kubeletconf
|
||||
|
||||
@@ -98,14 +98,22 @@ groups:
|
||||
|
||||
- id: 1.1.7
|
||||
text: "Ensure that the etcd pod specification file permissions are set to 644 or more restrictive (Automated)"
|
||||
audit: "/bin/sh -c 'if test -e $etcdconf; then find $etcdconf -name '*etcd*' | xargs stat -c permissions=%a; fi'"
|
||||
use_multiple_values: true
|
||||
audit: |
|
||||
/bin/sh -c '
|
||||
if [ -e "$etcdconf" ]; then
|
||||
stat -c "permissions=%a %n" "$etcdconf"
|
||||
else
|
||||
echo "Directory not found"
|
||||
fi
|
||||
'
|
||||
tests:
|
||||
bin_op: or
|
||||
test_items:
|
||||
- flag: "permissions"
|
||||
compare:
|
||||
op: bitmask
|
||||
value: "644"
|
||||
- flag: "Directory not found"
|
||||
remediation: |
|
||||
Run the below command (based on the file location on your system) on the control plane node.
|
||||
For example,
|
||||
@@ -114,14 +122,23 @@ groups:
|
||||
|
||||
- id: 1.1.8
|
||||
text: "Ensure that the etcd pod specification file ownership is set to root:root (Automated)"
|
||||
audit: "/bin/sh -c 'if test -e $etcdconf; then stat -c %U:%G $etcdconf; fi'"
|
||||
audit: |
|
||||
/bin/sh -c '
|
||||
if [ -e $etcdconf ]; then
|
||||
stat -c "ownership=%U:%G %n" $etcdconf
|
||||
else
|
||||
echo "Directory not found"
|
||||
fi
|
||||
'
|
||||
tests:
|
||||
bin_op: or
|
||||
test_items:
|
||||
- flag: "root:root"
|
||||
- flag: "ownership"
|
||||
compare:
|
||||
op: eq
|
||||
value: "root:root"
|
||||
set: true
|
||||
- flag: "Directory not found"
|
||||
remediation: |
|
||||
Run the below command (based on the file location on your system) on the control plane node.
|
||||
For example,
|
||||
@@ -168,14 +185,21 @@ groups:
|
||||
|
||||
- id: 1.1.11
|
||||
text: "Ensure that the etcd data directory permissions are set to 700 or more restrictive (Automated)"
|
||||
audit: "stat -c permissions=%a /var/lib/rancher/rke2/server/db/etcd"
|
||||
audit: |
|
||||
/bin/sh -c 'if [ -d "/var/lib/rancher/rke2/server/db/etcd" ]; then
|
||||
stat -c "permissions=%a" "/var/lib/rancher/rke2/server/db/etcd"
|
||||
else
|
||||
echo "Directory not found"
|
||||
fi'
|
||||
tests:
|
||||
bin_op: or
|
||||
test_items:
|
||||
- flag: "permissions"
|
||||
compare:
|
||||
op: bitmask
|
||||
value: "700"
|
||||
set: true
|
||||
- flag: "Directory not found"
|
||||
remediation: |
|
||||
On the etcd server node, get the etcd data directory, passed as an argument --data-dir,
|
||||
from the command 'ps -ef | grep etcd'.
|
||||
|
||||
@@ -126,14 +126,21 @@ groups:
|
||||
|
||||
- id: 4.1.9
|
||||
text: "If the kubelet config.yaml configuration file is being used validate permissions set to 600 or more restrictive (Automated)"
|
||||
audit: '/bin/sh -c ''if test -e $kubeletconf; then stat -c permissions=%a $kubeletconf; fi'' '
|
||||
audit: |
|
||||
/bin/sh -c 'if test -e "$kubeletconf"; then
|
||||
stat -c "permissions=%a" "$kubeletconf"
|
||||
else
|
||||
echo "File not found"
|
||||
fi'
|
||||
tests:
|
||||
bin_op: or
|
||||
test_items:
|
||||
- flag: "permissions"
|
||||
compare:
|
||||
op: bitmask
|
||||
value: "600"
|
||||
set: true
|
||||
- flag: "File not found"
|
||||
remediation: |
|
||||
Run the following command (using the config file location identified in the Audit step)
|
||||
chmod 600 $kubeletconf
|
||||
@@ -141,11 +148,18 @@ groups:
|
||||
|
||||
- id: 4.1.10
|
||||
text: "If the kubelet config.yaml configuration file is being used validate file ownership is set to root:root (Automated)"
|
||||
audit: '/bin/sh -c ''if test -e $kubeletconf; then stat -c %U:%G $kubeletconf; fi'' '
|
||||
audit: |
|
||||
/bin/sh -c 'if test -e "$kubeletconf"; then
|
||||
stat -c "%U:%G" "$kubeletconf"
|
||||
else
|
||||
echo "File not found"
|
||||
fi'
|
||||
tests:
|
||||
bin_op: or
|
||||
test_items:
|
||||
- flag: root:root
|
||||
set: true
|
||||
- flag: "File not found"
|
||||
remediation: |
|
||||
Run the following command (using the config file location identified in the Audit step)
|
||||
chown root:root $kubeletconf
|
||||
|
||||
@@ -98,14 +98,24 @@ groups:
|
||||
|
||||
- id: 1.1.7
|
||||
text: "Ensure that the etcd pod specification file permissions are set to 600 or more restrictive (Automated)"
|
||||
audit: "/bin/sh -c 'if test -e $etcdconf; then find $etcdconf -name '*etcd*' | xargs stat -c permissions=%a; fi'"
|
||||
use_multiple_values: true
|
||||
audit: |
|
||||
/bin/sh -c '
|
||||
if [ -e "$etcdconf" ]; then
|
||||
stat -c "permissions=%a %n" "$etcdconf"
|
||||
else
|
||||
echo "Directory not found"
|
||||
fi
|
||||
'
|
||||
tests:
|
||||
bin_op: or
|
||||
test_items:
|
||||
- flag: "permissions"
|
||||
compare:
|
||||
op: bitmask
|
||||
value: "600"
|
||||
set: true
|
||||
- flag: "Directory not found"
|
||||
set: true
|
||||
remediation: |
|
||||
Run the below command (based on the file location on your system) on the control plane node.
|
||||
For example,
|
||||
@@ -114,14 +124,24 @@ groups:
|
||||
|
||||
- id: 1.1.8
|
||||
text: "Ensure that the etcd pod specification file ownership is set to root:root (Automated)"
|
||||
audit: "/bin/sh -c 'if test -e $etcdconf; then stat -c %U:%G $etcdconf; fi'"
|
||||
audit: |
|
||||
/bin/sh -c '
|
||||
if [ -e $etcdconf ]; then
|
||||
stat -c "ownership=%U:%G %n" $etcdconf
|
||||
else
|
||||
echo "Directory not found"
|
||||
fi
|
||||
'
|
||||
tests:
|
||||
bin_op: or
|
||||
test_items:
|
||||
- flag: "root:root"
|
||||
- flag: "ownership"
|
||||
compare:
|
||||
op: eq
|
||||
value: "root:root"
|
||||
set: true
|
||||
- flag: "Directory not found"
|
||||
set: true
|
||||
remediation: |
|
||||
Run the below command (based on the file location on your system) on the control plane node.
|
||||
For example,
|
||||
@@ -162,14 +182,22 @@ groups:
|
||||
|
||||
- id: 1.1.11
|
||||
text: "Ensure that the etcd data directory permissions are set to 700 or more restrictive (Automated)"
|
||||
audit: "stat -c permissions=%a /var/lib/rancher/rke2/server/db/etcd"
|
||||
audit: |
|
||||
/bin/sh -c 'if [ -d "/var/lib/rancher/rke2/server/db/etcd" ]; then
|
||||
stat -c "permissions=%a" "/var/lib/rancher/rke2/server/db/etcd"
|
||||
else
|
||||
echo "Directory not found"
|
||||
fi'
|
||||
tests:
|
||||
bin_op: or
|
||||
test_items:
|
||||
- flag: "permissions"
|
||||
compare:
|
||||
op: bitmask
|
||||
value: "700"
|
||||
set: true
|
||||
- flag: "Directory not found"
|
||||
set: true
|
||||
remediation: |
|
||||
On the etcd server node, get the etcd data directory, passed as an argument --data-dir,
|
||||
from the command 'ps -ef | grep etcd'.
|
||||
|
||||
@@ -127,14 +127,21 @@ groups:
|
||||
|
||||
- id: 4.1.9
|
||||
text: "If the kubelet config.yaml configuration file is being used validate permissions set to 600 or more restrictive (Automated)"
|
||||
audit: '/bin/sh -c ''if test -e $kubeletconf; then stat -c permissions=%a $kubeletconf; fi'' '
|
||||
audit: |
|
||||
/bin/sh -c 'if test -e "$kubeletconf"; then
|
||||
stat -c "permissions=%a" "$kubeletconf"
|
||||
else
|
||||
echo "File not found"
|
||||
fi'
|
||||
tests:
|
||||
bin_op: or
|
||||
test_items:
|
||||
- flag: "permissions"
|
||||
compare:
|
||||
op: bitmask
|
||||
value: "600"
|
||||
set: true
|
||||
- flag: "File not found"
|
||||
remediation: |
|
||||
Run the following command (using the config file location identified in the Audit step)
|
||||
chmod 600 $kubeletconf
|
||||
@@ -142,11 +149,18 @@ groups:
|
||||
|
||||
- id: 4.1.10
|
||||
text: "If the kubelet config.yaml configuration file is being used validate file ownership is set to root:root (Automated)"
|
||||
audit: '/bin/sh -c ''if test -e $kubeletconf; then stat -c %U:%G $kubeletconf; fi'' '
|
||||
audit: |
|
||||
/bin/sh -c 'if test -e "$kubeletconf"; then
|
||||
stat -c "%U:%G" "$kubeletconf"
|
||||
else
|
||||
echo "File not found"
|
||||
fi'
|
||||
tests:
|
||||
bin_op: or
|
||||
test_items:
|
||||
- flag: root:root
|
||||
set: true
|
||||
- flag: "File not found"
|
||||
remediation: |
|
||||
Run the following command (using the config file location identified in the Audit step)
|
||||
chown root:root $kubeletconf
|
||||
|
||||
2
cfg/rke2-cis-1.8/config.yaml
Normal file
2
cfg/rke2-cis-1.8/config.yaml
Normal file
@@ -0,0 +1,2 @@
|
||||
---
|
||||
## Version-specific settings that override the values in cfg/config.yaml
|
||||
62
cfg/rke2-cis-1.8/controlplane.yaml
Normal file
62
cfg/rke2-cis-1.8/controlplane.yaml
Normal file
@@ -0,0 +1,62 @@
|
||||
---
|
||||
controls:
|
||||
version: "rke2-cis-1.8"
|
||||
id: 3
|
||||
text: "Control Plane Configuration"
|
||||
type: "controlplane"
|
||||
groups:
|
||||
- id: 3.1
|
||||
text: "Authentication and Authorization"
|
||||
checks:
|
||||
- id: 3.1.1
|
||||
text: "Client certificate authentication should not be used for users (Manual)"
|
||||
type: "manual"
|
||||
remediation: |
|
||||
Alternative mechanisms provided by Kubernetes such as the use of OIDC should be
|
||||
implemented in place of client certificates.
|
||||
scored: false
|
||||
|
||||
- id: 3.1.2
|
||||
text: "Service account token authentication should not be used for users (Manual)"
|
||||
type: "manual"
|
||||
remediation: |
|
||||
Alternative mechanisms provided by Kubernetes such as the use of OIDC should be implemented
|
||||
in place of service account tokens.
|
||||
scored: false
|
||||
|
||||
- id: 3.1.3
|
||||
text: "Bootstrap token authentication should not be used for users (Manual)"
|
||||
type: "manual"
|
||||
remediation: |
|
||||
Alternative mechanisms provided by Kubernetes such as the use of OIDC should be implemented
|
||||
in place of bootstrap tokens.
|
||||
scored: false
|
||||
|
||||
- id: 3.2
|
||||
text: "Logging"
|
||||
checks:
|
||||
- id: 3.2.1
|
||||
text: "Ensure that a minimal audit policy is created (Automated)"
|
||||
audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep"
|
||||
tests:
|
||||
test_items:
|
||||
- flag: "--audit-policy-file"
|
||||
set: true
|
||||
remediation: |
|
||||
Create an audit policy file for your cluster.
|
||||
scored: true
|
||||
|
||||
- id: 3.2.2
|
||||
text: "Ensure that the audit policy covers key security concerns (Manual)"
|
||||
type: "manual"
|
||||
remediation: |
|
||||
Review the audit policy provided for the cluster and ensure that it covers
|
||||
at least the following areas,
|
||||
- Access to Secrets managed by the cluster. Care should be taken to only
|
||||
log Metadata for requests to Secrets, ConfigMaps, and TokenReviews, in
|
||||
order to avoid risk of logging sensitive data.
|
||||
- Modification of Pod and Deployment objects.
|
||||
- Use of `pods/exec`, `pods/portforward`, `pods/proxy` and `services/proxy`.
|
||||
For most requests, minimally logging at the Metadata level is recommended
|
||||
(the most basic level of logging).
|
||||
scored: false
|
||||
135
cfg/rke2-cis-1.8/etcd.yaml
Normal file
135
cfg/rke2-cis-1.8/etcd.yaml
Normal file
@@ -0,0 +1,135 @@
|
||||
---
|
||||
controls:
|
||||
version: "rke2-cis-1.8"
|
||||
id: 2
|
||||
text: "Etcd Node Configuration"
|
||||
type: "etcd"
|
||||
groups:
|
||||
- id: 2
|
||||
text: "Etcd Node Configuration"
|
||||
checks:
|
||||
- id: 2.1
|
||||
text: "Ensure that the --cert-file and --key-file arguments are set as appropriate (Automated)"
|
||||
audit: "/bin/ps -ef | /bin/grep $etcdbin | /bin/grep -v grep"
|
||||
tests:
|
||||
bin_op: and
|
||||
test_items:
|
||||
- flag: "--cert-file"
|
||||
env: "ETCD_CERT_FILE"
|
||||
- flag: "--key-file"
|
||||
env: "ETCD_KEY_FILE"
|
||||
remediation: |
|
||||
Follow the etcd service documentation and configure TLS encryption.
|
||||
Then, edit the etcd pod specification file /etc/kubernetes/manifests/etcd.yaml
|
||||
on the master node and set the below parameters.
|
||||
--cert-file=</path/to/ca-file>
|
||||
--key-file=</path/to/key-file>
|
||||
scored: true
|
||||
|
||||
- id: 2.2
|
||||
text: "Ensure that the --client-cert-auth argument is set to true (Automated)"
|
||||
audit: "/bin/ps -ef | /bin/grep $etcdbin | /bin/grep -v grep"
|
||||
tests:
|
||||
test_items:
|
||||
- flag: "--client-cert-auth"
|
||||
env: "ETCD_CLIENT_CERT_AUTH"
|
||||
compare:
|
||||
op: eq
|
||||
value: true
|
||||
remediation: |
|
||||
Edit the etcd pod specification file $etcdconf on the master
|
||||
node and set the below parameter.
|
||||
--client-cert-auth="true"
|
||||
scored: true
|
||||
|
||||
- id: 2.3
|
||||
text: "Ensure that the --auto-tls argument is not set to true (Automated)"
|
||||
audit: "/bin/ps -ef | /bin/grep $etcdbin | /bin/grep -v grep"
|
||||
tests:
|
||||
bin_op: or
|
||||
test_items:
|
||||
- flag: "--auto-tls"
|
||||
env: "ETCD_AUTO_TLS"
|
||||
set: false
|
||||
- flag: "--auto-tls"
|
||||
env: "ETCD_AUTO_TLS"
|
||||
compare:
|
||||
op: eq
|
||||
value: false
|
||||
remediation: |
|
||||
Edit the etcd pod specification file $etcdconf on the master
|
||||
node and either remove the --auto-tls parameter or set it to false.
|
||||
--auto-tls=false
|
||||
scored: true
|
||||
|
||||
- id: 2.4
|
||||
text: "Ensure that the --peer-cert-file and --peer-key-file arguments are set as appropriate (Automated)"
|
||||
audit: "/bin/ps -ef | /bin/grep $etcdbin | /bin/grep -v grep"
|
||||
tests:
|
||||
bin_op: and
|
||||
test_items:
|
||||
- flag: "--peer-cert-file"
|
||||
env: "ETCD_PEER_CERT_FILE"
|
||||
- flag: "--peer-key-file"
|
||||
env: "ETCD_PEER_KEY_FILE"
|
||||
remediation: |
|
||||
Follow the etcd service documentation and configure peer TLS encryption as appropriate
|
||||
for your etcd cluster.
|
||||
Then, edit the etcd pod specification file $etcdconf on the
|
||||
master node and set the below parameters.
|
||||
--peer-client-file=</path/to/peer-cert-file>
|
||||
--peer-key-file=</path/to/peer-key-file>
|
||||
scored: true
|
||||
|
||||
- id: 2.5
|
||||
text: "Ensure that the --peer-client-cert-auth argument is set to true (Automated)"
|
||||
audit: "/bin/ps -ef | /bin/grep $etcdbin | /bin/grep -v grep"
|
||||
tests:
|
||||
test_items:
|
||||
- flag: "--peer-client-cert-auth"
|
||||
env: "ETCD_PEER_CLIENT_CERT_AUTH"
|
||||
compare:
|
||||
op: eq
|
||||
value: true
|
||||
remediation: |
|
||||
Edit the etcd pod specification file $etcdconf on the master
|
||||
node and set the below parameter.
|
||||
--peer-client-cert-auth=true
|
||||
scored: true
|
||||
|
||||
- id: 2.6
|
||||
text: "Ensure that the --peer-auto-tls argument is not set to true (Automated)"
|
||||
audit: "/bin/ps -ef | /bin/grep $etcdbin | /bin/grep -v grep"
|
||||
tests:
|
||||
bin_op: or
|
||||
test_items:
|
||||
- flag: "--peer-auto-tls"
|
||||
env: "ETCD_PEER_AUTO_TLS"
|
||||
set: false
|
||||
- flag: "--peer-auto-tls"
|
||||
env: "ETCD_PEER_AUTO_TLS"
|
||||
compare:
|
||||
op: eq
|
||||
value: false
|
||||
remediation: |
|
||||
Edit the etcd pod specification file $etcdconf on the master
|
||||
node and either remove the --peer-auto-tls parameter or set it to false.
|
||||
--peer-auto-tls=false
|
||||
scored: true
|
||||
|
||||
- id: 2.7
|
||||
text: "Ensure that a unique Certificate Authority is used for etcd (Automated)"
|
||||
audit: "/bin/ps -ef | /bin/grep $etcdbin | /bin/grep -v grep"
|
||||
tests:
|
||||
test_items:
|
||||
- flag: "--trusted-ca-file"
|
||||
env: "ETCD_TRUSTED_CA_FILE"
|
||||
set: true
|
||||
remediation: |
|
||||
[Manual test]
|
||||
Follow the etcd documentation and create a dedicated certificate authority setup for the
|
||||
etcd service.
|
||||
Then, edit the etcd pod specification file $etcdconf on the
|
||||
master node and set the below parameter.
|
||||
--trusted-ca-file=</path/to/ca-file>
|
||||
scored: true
|
||||
995
cfg/rke2-cis-1.8/master.yaml
Normal file
995
cfg/rke2-cis-1.8/master.yaml
Normal file
@@ -0,0 +1,995 @@
|
||||
---
|
||||
controls:
|
||||
version: "rke2-cis-1.8"
|
||||
id: 1
|
||||
text: "Control Plane Security Configuration"
|
||||
type: "master"
|
||||
groups:
|
||||
- id: 1.1
|
||||
text: "Control Plane Node Configuration Files"
|
||||
checks:
|
||||
- id: 1.1.1
|
||||
text: "Ensure that the API server pod specification file permissions are set to 600 or more restrictive (Automated)"
|
||||
audit: "/bin/sh -c 'if test -e $apiserverconf; then stat -c permissions=%a $apiserverconf; fi'"
|
||||
tests:
|
||||
test_items:
|
||||
- flag: "permissions"
|
||||
compare:
|
||||
op: bitmask
|
||||
value: "600"
|
||||
remediation: |
|
||||
Run the below command (based on the file location on your system) on the
|
||||
control plane node.
|
||||
For example, chmod 600 $apiserverconf
|
||||
Not Applicable - Cluster provisioned by RKE doesn't require or maintain a configuration file for kube-apiserver.
|
||||
All configuration is passed in as arguments at container run time.
|
||||
scored: true
|
||||
|
||||
- id: 1.1.2
|
||||
text: "Ensure that the API server pod specification file ownership is set to root:root (Automated)"
|
||||
audit: "/bin/sh -c 'if test -e $apiserverconf; then stat -c %U:%G $apiserverconf; fi'"
|
||||
tests:
|
||||
test_items:
|
||||
- flag: "root:root"
|
||||
remediation: |
|
||||
Run the below command (based on the file location on your system) on the control plane node.
|
||||
For example, chown root:root $apiserverconf
|
||||
Not Applicable - Cluster provisioned by RKE doesn't require or maintain a configuration file for kube-apiserver.
|
||||
All configuration is passed in as arguments at container run time.
|
||||
scored: true
|
||||
|
||||
- id: 1.1.3
|
||||
text: "Ensure that the controller manager pod specification file permissions are set to 600 or more restrictive (Automated)"
|
||||
audit: "/bin/sh -c 'if test -e $controllermanagerconf; then stat -c permissions=%a $controllermanagerconf; fi'"
|
||||
tests:
|
||||
test_items:
|
||||
- flag: "permissions"
|
||||
compare:
|
||||
op: bitmask
|
||||
value: "600"
|
||||
remediation: |
|
||||
Run the below command (based on the file location on your system) on the control plane node.
|
||||
For example, chmod 600 $controllermanagerconf
|
||||
Not Applicable - Cluster provisioned by RKE doesn't require or maintain a configuration file for kube-apiserver.
|
||||
All configuration is passed in as arguments at container run time.
|
||||
scored: true
|
||||
|
||||
- id: 1.1.4
|
||||
text: "Ensure that the controller manager pod specification file ownership is set to root:root (Automated)"
|
||||
audit: "/bin/sh -c 'if test -e $controllermanagerconf; then stat -c %U:%G $controllermanagerconf; fi'"
|
||||
tests:
|
||||
test_items:
|
||||
- flag: "root:root"
|
||||
remediation: |
|
||||
Run the below command (based on the file location on your system) on the control plane node.
|
||||
For example, chown root:root $controllermanagerconf
|
||||
Not Applicable - Cluster provisioned by RKE doesn't require or maintain a configuration file for kube-apiserver.
|
||||
All configuration is passed in as arguments at container run time.
|
||||
scored: true
|
||||
|
||||
- id: 1.1.5
|
||||
text: "Ensure that the scheduler pod specification file permissions are set to 600 or more restrictive (Automated)"
|
||||
audit: "/bin/sh -c 'if test -e $schedulerconf; then stat -c permissions=%a $schedulerconf; fi'"
|
||||
tests:
|
||||
test_items:
|
||||
- flag: "permissions"
|
||||
compare:
|
||||
op: bitmask
|
||||
value: "600"
|
||||
remediation: |
|
||||
Run the below command (based on the file location on your system) on the control plane node.
|
||||
For example, chmod 600 $schedulerconf
|
||||
Not Applicable - Cluster provisioned by RKE doesn't require or maintain a configuration file for kube-apiserver.
|
||||
All configuration is passed in as arguments at container run time.
|
||||
scored: true
|
||||
|
||||
- id: 1.1.6
|
||||
text: "Ensure that the scheduler pod specification file ownership is set to root:root (Automated)"
|
||||
audit: "/bin/sh -c 'if test -e $schedulerconf; then stat -c %U:%G $schedulerconf; fi'"
|
||||
tests:
|
||||
test_items:
|
||||
- flag: "root:root"
|
||||
remediation: |
|
||||
Run the below command (based on the file location on your system) on the control plane node.
|
||||
For example, chown root:root $schedulerconf
|
||||
Not Applicable - Cluster provisioned by RKE doesn't require or maintain a configuration file for kube-apiserver.
|
||||
All configuration is passed in as arguments at container run time.
|
||||
scored: true
|
||||
|
||||
- id: 1.1.7
|
||||
text: "Ensure that the etcd pod specification file permissions are set to 600 or more restrictive (Automated)"
|
||||
audit: |
|
||||
/bin/sh -c '
|
||||
if [ -e "$etcdconf" ]; then
|
||||
stat -c "permissions=%a %n" "$etcdconf"
|
||||
else
|
||||
echo "Directory not found"
|
||||
fi
|
||||
'
|
||||
tests:
|
||||
bin_op: or
|
||||
test_items:
|
||||
- flag: "permissions"
|
||||
compare:
|
||||
op: bitmask
|
||||
value: "600"
|
||||
- flag: "Directory not found"
|
||||
remediation: |
|
||||
Run the below command (based on the file location on your system) on the control plane node.
|
||||
For example,
|
||||
chmod 600 $etcdconf
|
||||
Not Applicable - Cluster provisioned by RKE doesn't require or maintain a configuration file for kube-apiserver.
|
||||
All configuration is passed in as arguments at container run time.
|
||||
scored: true
|
||||
|
||||
- id: 1.1.8
|
||||
text: "Ensure that the etcd pod specification file ownership is set to root:root (Automated)"
|
||||
audit: |
|
||||
/bin/sh -c '
|
||||
if [ -e "$etcdconf" ]; then
|
||||
stat -c "ownership=%U:%G %n" "$etcdconf"
|
||||
else
|
||||
echo "Directory not found"
|
||||
fi
|
||||
'
|
||||
tests:
|
||||
bin_op: or
|
||||
test_items:
|
||||
- flag: "ownership"
|
||||
compare:
|
||||
op: eq
|
||||
value: "root:root"
|
||||
set: true
|
||||
- flag: "Directory not found"
|
||||
set: true
|
||||
remediation: |
|
||||
Run the below command (based on the file location on your system) on the control plane node.
|
||||
For example,
|
||||
chown root:root $etcdconf
|
||||
Not Applicable - Cluster provisioned by RKE doesn't require or maintain a configuration file for kube-apiserver.
|
||||
All configuration is passed in as arguments at container run time.
|
||||
scored: true
|
||||
|
||||
- id: 1.1.9
|
||||
text: "Ensure that the Container Network Interface file permissions are set to 600 or more restrictive (Manual)"
|
||||
audit: |
|
||||
ps -ef | grep $kubeletbin | grep -- --cni-conf-dir | sed 's%.*cni-conf-dir[= ]\([^ ]*\).*%\1%' | xargs -I{} find {} -mindepth 1 | xargs --no-run-if-empty stat -c permissions=%a
|
||||
find /var/lib/cni/networks -type f 2> /dev/null | xargs --no-run-if-empty stat -c permissions=%a
|
||||
use_multiple_values: true
|
||||
tests:
|
||||
test_items:
|
||||
- flag: "permissions"
|
||||
compare:
|
||||
op: bitmask
|
||||
value: "600"
|
||||
remediation: |
|
||||
Run the below command (based on the file location on your system) on the control plane node.
|
||||
For example, chmod 600 <path/to/cni/files>
|
||||
scored: false
|
||||
|
||||
- id: 1.1.10
|
||||
text: "Ensure that the Container Network Interface file ownership is set to root:root (Manual)"
|
||||
audit: |
|
||||
ps -ef | grep $kubeletbin | grep -- --cni-conf-dir | sed 's%.*cni-conf-dir[= ]\([^ ]*\).*%\1%' | xargs -I{} find {} -mindepth 1 | xargs --no-run-if-empty stat -c %U:%G
|
||||
find /var/lib/cni/networks -type f 2> /dev/null | xargs --no-run-if-empty stat -c %U:%G
|
||||
use_multiple_values: true
|
||||
tests:
|
||||
test_items:
|
||||
- flag: "root:root"
|
||||
remediation: |
|
||||
Run the below command (based on the file location on your system) on the control plane node.
|
||||
For example,
|
||||
chown root:root <path/to/cni/files>
|
||||
scored: false
|
||||
|
||||
- id: 1.1.11
|
||||
text: "Ensure that the etcd data directory permissions are set to 700 or more restrictive (Automated)"
|
||||
audit: |
|
||||
/bin/sh -c 'if [ -e "/var/lib/rancher/rke2/server/db/etcd" ]; then
|
||||
stat -c "permissions=%a %n" "/var/lib/rancher/rke2/server/db/etcd"
|
||||
else
|
||||
echo "Directory not found: /var/lib/rancher/rke2/server/db/etcd"
|
||||
fi'
|
||||
tests:
|
||||
bin_op: or
|
||||
test_items:
|
||||
- flag: "permissions"
|
||||
compare:
|
||||
op: bitmask
|
||||
value: "700"
|
||||
set: true
|
||||
- flag: "Directory not found: /var/lib/rancher/rke2/server/db/etcd"
|
||||
remediation: |
|
||||
On the etcd server node, get the etcd data directory, passed as an argument --data-dir,
|
||||
from the command 'ps -ef | grep etcd'.
|
||||
Run the below command (based on the etcd data directory found above). For example,
|
||||
chmod 700 /var/lib/etcd
|
||||
scored: true
|
||||
|
||||
- id: 1.1.12
|
||||
text: "Ensure that the etcd data directory ownership is set to etcd:etcd (Automated)"
|
||||
audit: "stat -c %U:%G /var/lib/rancher/rke2/server/db/etcd"
|
||||
tests:
|
||||
test_items:
|
||||
- flag: "etcd:etcd"
|
||||
set: true
|
||||
remediation: |
|
||||
On the etcd server node, get the etcd data directory, passed as an argument --data-dir,
|
||||
from the command 'ps -ef | grep etcd'.
|
||||
Run the below command (based on the etcd data directory found above).
|
||||
For example, chown etcd:etcd /var/lib/etcd
|
||||
Permissive - A system service account is required for etcd data directory ownership.
|
||||
Refer to Rancher's hardening guide for more details on how to configure this ownership.
|
||||
scored: true
|
||||
|
||||
- id: 1.1.13
|
||||
text: "Ensure that the admin.conf file permissions are set to 600 or more restrictive (Automated)"
|
||||
audit: "/bin/sh -c 'if test -e /etc/kubernetes/admin.conf; then stat -c permissions=%a /etc/kubernetes/admin.conf; fi'"
|
||||
tests:
|
||||
test_items:
|
||||
- flag: "permissions"
|
||||
compare:
|
||||
op: bitmask
|
||||
value: "600"
|
||||
remediation: |
|
||||
Run the below command (based on the file location on your system) on the control plane node.
|
||||
For example, chmod 600 /etc/kubernetes/admin.conf
|
||||
Not Applicable - Cluster provisioned by RKE does not store the kubernetes default kubeconfig credentials file on the nodes.
|
||||
scored: true
|
||||
|
||||
- id: 1.1.14
|
||||
text: "Ensure that the admin.conf file ownership is set to root:root (Automated)"
|
||||
audit: "/bin/sh -c 'if test -e /etc/kubernetes/admin.conf; then stat -c %U:%G /etc/kubernetes/admin.conf; fi'"
|
||||
tests:
|
||||
test_items:
|
||||
- flag: "root:root"
|
||||
remediation: |
|
||||
Run the below command (based on the file location on your system) on the control plane node.
|
||||
For example, chown root:root /etc/kubernetes/admin.conf
|
||||
Not Applicable - Cluster provisioned by RKE does not store the kubernetes default kubeconfig credentials file on the nodes.
|
||||
scored: true
|
||||
|
||||
- id: 1.1.15
|
||||
text: "Ensure that the scheduler.conf file permissions are set to 600 or more restrictive (Automated)"
|
||||
audit: "/bin/sh -c 'if test -e $schedulerkubeconfig; then stat -c permissions=%a $schedulerkubeconfig; fi'"
|
||||
tests:
|
||||
test_items:
|
||||
- flag: "permissions"
|
||||
compare:
|
||||
op: bitmask
|
||||
value: "600"
|
||||
remediation: |
|
||||
Run the below command (based on the file location on your system) on the control plane node.
|
||||
For example,
|
||||
chmod 600 $schedulerkubeconfig
|
||||
Not Applicable - Cluster provisioned by RKE doesn't require or maintain a configuration file for scheduler.
|
||||
All configuration is passed in as arguments at container run time.
|
||||
scored: true
|
||||
|
||||
- id: 1.1.16
|
||||
text: "Ensure that the scheduler.conf file ownership is set to root:root (Automated)"
|
||||
audit: "/bin/sh -c 'if test -e $schedulerkubeconfig; then stat -c %U:%G $schedulerkubeconfig; fi'"
|
||||
tests:
|
||||
test_items:
|
||||
- flag: "root:root"
|
||||
remediation: |
|
||||
Run the below command (based on the file location on your system) on the control plane node.
|
||||
For example,
|
||||
chown root:root $schedulerkubeconfig
|
||||
Not Applicable - Cluster provisioned by RKE doesn't require or maintain a configuration file for scheduler.
|
||||
All configuration is passed in as arguments at container run time.
|
||||
scored: true
|
||||
|
||||
- id: 1.1.17
|
||||
text: "Ensure that the controller-manager.conf file permissions are set to 600 or more restrictive (Automated)"
|
||||
audit: "/bin/sh -c 'if test -e $controllermanagerkubeconfig; then stat -c permissions=%a $controllermanagerkubeconfig; fi'"
|
||||
tests:
|
||||
test_items:
|
||||
- flag: "permissions"
|
||||
compare:
|
||||
op: bitmask
|
||||
value: "600"
|
||||
remediation: |
|
||||
Run the below command (based on the file location on your system) on the control plane node.
|
||||
For example,
|
||||
chmod 600 $controllermanagerkubeconfig
|
||||
Not Applicable - Cluster provisioned by RKE doesn't require or maintain a configuration file for controller-manager.
|
||||
All configuration is passed in as arguments at container run time.
|
||||
scored: true
|
||||
|
||||
- id: 1.1.18
|
||||
text: "Ensure that the controller-manager.conf file ownership is set to root:root (Automated)"
|
||||
audit: "/bin/sh -c 'if test -e $controllermanagerkubeconfig; then stat -c %U:%G $controllermanagerkubeconfig; fi'"
|
||||
tests:
|
||||
test_items:
|
||||
- flag: "root:root"
|
||||
remediation: |
|
||||
Run the below command (based on the file location on your system) on the control plane node.
|
||||
For example,
|
||||
chown root:root $controllermanagerkubeconfig
|
||||
Not Applicable - Cluster provisioned by RKE doesn't require or maintain a configuration file for controller-manager.
|
||||
All configuration is passed in as arguments at container run time.
|
||||
scored: true
|
||||
|
||||
- id: 1.1.19
|
||||
text: "Ensure that the Kubernetes PKI directory and file ownership is set to root:root (Automated)"
|
||||
audit: "check_files_owner_in_dir.sh /node/etc/kubernetes/ssl"
|
||||
tests:
|
||||
test_items:
|
||||
- flag: "true"
|
||||
compare:
|
||||
op: eq
|
||||
value: "true"
|
||||
set: true
|
||||
remediation: |
|
||||
Run the below command (based on the file location on your system) on the control plane node.
|
||||
For example,
|
||||
chown -R root:root /etc/kubernetes/pki/
|
||||
scored: true
|
||||
|
||||
- id: 1.1.20
|
||||
text: "Ensure that the Kubernetes PKI certificate file permissions are set to 600 or more restrictive (Manual)"
|
||||
audit: |
|
||||
if [ -d /node/etc/kubernetes/ssl ]; then
|
||||
find /node/etc/kubernetes/ssl -type f -name '*.pem' ! -name '*key.pem' \
|
||||
-exec stat -c 'permissions=%a %n' {} +
|
||||
else
|
||||
echo "Directory not found: /node/etc/kubernetes/ssl"
|
||||
fi
|
||||
use_multiple_values: true
|
||||
tests:
|
||||
test_items:
|
||||
- flag: "permissions"
|
||||
compare:
|
||||
op: bitmask
|
||||
value: "600"
|
||||
remediation: |
|
||||
Run the below command (based on the file location on your system) on the control plane node.
|
||||
For example,
|
||||
find /node/etc/kubernetes/ssl/ -name '*.pem' ! -name '*key.pem' -exec chmod -R 600 {} +
|
||||
scored: false
|
||||
|
||||
- id: 1.1.21
|
||||
text: "Ensure that the Kubernetes PKI key file permissions are set to 600 (Manual)"
|
||||
audit: |
|
||||
if [ -d /node/etc/kubernetes/ssl ]; then
|
||||
find /node/etc/kubernetes/ssl -type f -name '*key.pem' -exec stat -c 'permissions=%a %n' {} +
|
||||
else
|
||||
echo "Directory not found: /node/etc/kubernetes/ssl"
|
||||
fi
|
||||
use_multiple_values: true
|
||||
tests:
|
||||
test_items:
|
||||
- flag: "permissions"
|
||||
compare:
|
||||
op: bitmask
|
||||
value: "600"
|
||||
remediation: |
|
||||
Run the below command (based on the file location on your system) on the control plane node.
|
||||
For example,
|
||||
find /node/etc/kubernetes/ssl/ -name '*key.pem' -exec chmod -R 600 {} +
|
||||
scored: false
|
||||
|
||||
- id: 1.2
|
||||
text: "API Server"
|
||||
checks:
|
||||
- id: 1.2.1
|
||||
text: "Ensure that the --anonymous-auth argument is set to false (Automated)"
|
||||
audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep"
|
||||
tests:
|
||||
test_items:
|
||||
- flag: "--anonymous-auth"
|
||||
compare:
|
||||
op: eq
|
||||
value: false
|
||||
set: true
|
||||
remediation: |
|
||||
Edit the API server pod specification file $apiserverconf
|
||||
on the control plane node and set the below parameter.
|
||||
--anonymous-auth=false
|
||||
scored: true
|
||||
|
||||
- id: 1.2.2
|
||||
text: "Ensure that the --token-auth-file parameter is not set (Automated)"
|
||||
audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep"
|
||||
tests:
|
||||
test_items:
|
||||
- flag: "--token-auth-file"
|
||||
set: false
|
||||
remediation: |
|
||||
Follow the documentation and configure alternate mechanisms for authentication. Then,
|
||||
edit the API server pod specification file $apiserverconf
|
||||
on the control plane node and remove the --token-auth-file=<filename> parameter.
|
||||
scored: true
|
||||
|
||||
- id: 1.2.3
|
||||
text: "Ensure that the --DenyServiceExternalIPs is set (Manual)"
|
||||
audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep"
|
||||
tests:
|
||||
test_items:
|
||||
- flag: "--enable-admission-plugins"
|
||||
compare:
|
||||
op: has
|
||||
value: "DenyServiceExternalIPs"
|
||||
remediation: |
|
||||
Edit the API server pod specification file $apiserverconf
|
||||
on the control plane node and add the `DenyServiceExternalIPs` plugin
|
||||
to the enabled admission plugins, as such --enable-admission-plugin=DenyServiceExternalIPs.
|
||||
scored: false
|
||||
|
||||
- id: 1.2.4
|
||||
text: "Ensure that the --kubelet-client-certificate and --kubelet-client-key arguments are set as appropriate (Automated)"
|
||||
audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep"
|
||||
tests:
|
||||
bin_op: and
|
||||
test_items:
|
||||
- flag: "--kubelet-client-certificate"
|
||||
- flag: "--kubelet-client-key"
|
||||
remediation: |
|
||||
Follow the Kubernetes documentation and set up the TLS connection between the
|
||||
apiserver and kubelets. Then, edit API server pod specification file
|
||||
$apiserverconf on the control plane node and set the
|
||||
kubelet client certificate and key parameters as below.
|
||||
--kubelet-client-certificate=<path/to/client-certificate-file>
|
||||
--kubelet-client-key=<path/to/client-key-file>
|
||||
scored: true
|
||||
|
||||
- id: 1.2.5
|
||||
text: "Ensure that the --kubelet-certificate-authority argument is set as appropriate (Automated)"
|
||||
audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep"
|
||||
tests:
|
||||
test_items:
|
||||
- flag: "--kubelet-certificate-authority"
|
||||
set: true
|
||||
remediation: |
|
||||
Follow the Kubernetes documentation and setup the TLS connection between
|
||||
the apiserver and kubelets. Then, edit the API server pod specification file
|
||||
$apiserverconf on the control plane node and set the
|
||||
--kubelet-certificate-authority parameter to the path to the cert file for the certificate authority.
|
||||
--kubelet-certificate-authority=<ca-string>
|
||||
Permissive - When generating serving certificates, functionality could break in conjunction with hostname overrides which are required for certain cloud providers.
|
||||
scored: true
|
||||
|
||||
- id: 1.2.6
|
||||
text: "Ensure that the --authorization-mode argument is not set to AlwaysAllow (Automated)"
|
||||
audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep"
|
||||
tests:
|
||||
test_items:
|
||||
- flag: "--authorization-mode"
|
||||
compare:
|
||||
op: nothave
|
||||
value: "AlwaysAllow"
|
||||
remediation: |
|
||||
Edit the API server pod specification file $apiserverconf
|
||||
on the control plane node and set the --authorization-mode parameter to values other than AlwaysAllow.
|
||||
One such example could be as below.
|
||||
--authorization-mode=RBAC
|
||||
scored: true
|
||||
|
||||
- id: 1.2.7
|
||||
text: "Ensure that the --authorization-mode argument includes Node (Automated)"
|
||||
audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep"
|
||||
tests:
|
||||
test_items:
|
||||
- flag: "--authorization-mode"
|
||||
compare:
|
||||
op: has
|
||||
value: "Node"
|
||||
remediation: |
|
||||
Edit the API server pod specification file $apiserverconf
|
||||
on the control plane node and set the --authorization-mode parameter to a value that includes Node.
|
||||
--authorization-mode=Node,RBAC
|
||||
scored: true
|
||||
|
||||
- id: 1.2.8
|
||||
text: "Ensure that the --authorization-mode argument includes RBAC (Automated)"
|
||||
audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep"
|
||||
tests:
|
||||
test_items:
|
||||
- flag: "--authorization-mode"
|
||||
compare:
|
||||
op: has
|
||||
value: "RBAC"
|
||||
remediation: |
|
||||
Edit the API server pod specification file $apiserverconf
|
||||
on the control plane node and set the --authorization-mode parameter to a value that includes RBAC,
|
||||
for example `--authorization-mode=Node,RBAC`.
|
||||
scored: true
|
||||
|
||||
- id: 1.2.9
|
||||
text: "Ensure that the admission control plugin EventRateLimit is set (Manual)"
|
||||
audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep"
|
||||
tests:
|
||||
test_items:
|
||||
- flag: "--enable-admission-plugins"
|
||||
compare:
|
||||
op: has
|
||||
value: "EventRateLimit"
|
||||
remediation: |
|
||||
Follow the Kubernetes documentation and set the desired limits in a configuration file.
|
||||
Then, edit the API server pod specification file $apiserverconf
|
||||
and set the below parameters.
|
||||
--enable-admission-plugins=...,EventRateLimit,...
|
||||
--admission-control-config-file=<path/to/configuration/file>
|
||||
scored: false
|
||||
|
||||
- id: 1.2.10
|
||||
text: "Ensure that the admission control plugin AlwaysAdmit is not set (Automated)"
|
||||
audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep"
|
||||
tests:
|
||||
bin_op: or
|
||||
test_items:
|
||||
- flag: "--enable-admission-plugins"
|
||||
compare:
|
||||
op: nothave
|
||||
value: AlwaysAdmit
|
||||
- flag: "--enable-admission-plugins"
|
||||
set: false
|
||||
remediation: |
|
||||
Edit the API server pod specification file $apiserverconf
|
||||
on the control plane node and either remove the --enable-admission-plugins parameter, or set it to a
|
||||
value that does not include AlwaysAdmit.
|
||||
scored: true
|
||||
|
||||
- id: 1.2.11
|
||||
text: "Ensure that the admission control plugin AlwaysPullImages is set (Manual)"
|
||||
audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep"
|
||||
tests:
|
||||
test_items:
|
||||
- flag: "--enable-admission-plugins"
|
||||
compare:
|
||||
op: has
|
||||
value: "AlwaysPullImages"
|
||||
remediation: |
|
||||
Edit the API server pod specification file $apiserverconf
|
||||
on the control plane node and set the --enable-admission-plugins parameter to include
|
||||
AlwaysPullImages.
|
||||
--enable-admission-plugins=...,AlwaysPullImages,...
|
||||
scored: false
|
||||
|
||||
- id: 1.2.12
|
||||
text: "Ensure that the admission control plugin SecurityContextDeny is set if PodSecurityPolicy is not used (Manual)"
|
||||
audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep"
|
||||
tests:
|
||||
bin_op: or
|
||||
test_items:
|
||||
- flag: "--enable-admission-plugins"
|
||||
compare:
|
||||
op: has
|
||||
value: "SecurityContextDeny"
|
||||
- flag: "--enable-admission-plugins"
|
||||
compare:
|
||||
op: has
|
||||
value: "PodSecurityPolicy"
|
||||
remediation: |
|
||||
Edit the API server pod specification file $apiserverconf
|
||||
on the control plane node and set the --enable-admission-plugins parameter to include
|
||||
SecurityContextDeny, unless PodSecurityPolicy is already in place.
|
||||
--enable-admission-plugins=...,SecurityContextDeny,...
|
||||
Permissive - Enabling Pod Security Policy can cause applications to unexpectedly fail.
|
||||
scored: false
|
||||
|
||||
- id: 1.2.13
|
||||
text: "Ensure that the admission control plugin ServiceAccount is set (Automated)"
|
||||
audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep"
|
||||
tests:
|
||||
bin_op: or
|
||||
test_items:
|
||||
- flag: "--disable-admission-plugins"
|
||||
compare:
|
||||
op: nothave
|
||||
value: "ServiceAccount"
|
||||
- flag: "--disable-admission-plugins"
|
||||
set: false
|
||||
remediation: |
|
||||
Follow the documentation and create ServiceAccount objects as per your environment.
|
||||
Then, edit the API server pod specification file $apiserverconf
|
||||
on the control plane node and ensure that the --disable-admission-plugins parameter is set to a
|
||||
value that does not include ServiceAccount.
|
||||
scored: true
|
||||
|
||||
- id: 1.2.14
|
||||
text: "Ensure that the admission control plugin NamespaceLifecycle is set (Automated)"
|
||||
audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep"
|
||||
tests:
|
||||
bin_op: or
|
||||
test_items:
|
||||
- flag: "--disable-admission-plugins"
|
||||
compare:
|
||||
op: nothave
|
||||
value: "NamespaceLifecycle"
|
||||
- flag: "--disable-admission-plugins"
|
||||
set: false
|
||||
remediation: |
|
||||
Edit the API server pod specification file $apiserverconf
|
||||
on the control plane node and set the --disable-admission-plugins parameter to
|
||||
ensure it does not include NamespaceLifecycle.
|
||||
scored: true
|
||||
|
||||
- id: 1.2.15
|
||||
text: "Ensure that the admission control plugin NodeRestriction is set (Automated)"
|
||||
audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep"
|
||||
tests:
|
||||
test_items:
|
||||
- flag: "--enable-admission-plugins"
|
||||
compare:
|
||||
op: has
|
||||
value: "NodeRestriction"
|
||||
remediation: |
|
||||
Follow the Kubernetes documentation and configure NodeRestriction plug-in on kubelets.
|
||||
Then, edit the API server pod specification file $apiserverconf
|
||||
on the control plane node and set the --enable-admission-plugins parameter to a
|
||||
value that includes NodeRestriction.
|
||||
--enable-admission-plugins=...,NodeRestriction,...
|
||||
scored: true
|
||||
|
||||
- id: 1.2.16
|
||||
text: "Ensure that the --profiling argument is set to false (Automated)"
|
||||
audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep"
|
||||
tests:
|
||||
test_items:
|
||||
- flag: "--profiling"
|
||||
compare:
|
||||
op: eq
|
||||
value: false
|
||||
remediation: |
|
||||
Edit the API server pod specification file $apiserverconf
|
||||
on the control plane node and set the below parameter.
|
||||
--profiling=false
|
||||
scored: true
|
||||
|
||||
- id: 1.2.17
|
||||
text: "Ensure that the --audit-log-path argument is set (Automated)"
|
||||
audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep"
|
||||
tests:
|
||||
test_items:
|
||||
- flag: "--audit-log-path"
|
||||
remediation: |
|
||||
Edit the API server pod specification file $apiserverconf
|
||||
on the control plane node and set the --audit-log-path parameter to a suitable path and
|
||||
file where you would like audit logs to be written, for example,
|
||||
--audit-log-path=/var/log/apiserver/audit.log
|
||||
scored: true
|
||||
|
||||
- id: 1.2.18
|
||||
text: "Ensure that the --audit-log-maxage argument is set to 30 or as appropriate (Automated)"
|
||||
audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep"
|
||||
tests:
|
||||
test_items:
|
||||
- flag: "--audit-log-maxage"
|
||||
compare:
|
||||
op: gte
|
||||
value: 30
|
||||
remediation: |
|
||||
Edit the API server pod specification file $apiserverconf
|
||||
on the control plane node and set the --audit-log-maxage parameter to 30
|
||||
or as an appropriate number of days, for example,
|
||||
--audit-log-maxage=30
|
||||
scored: true
|
||||
|
||||
- id: 1.2.19
|
||||
text: "Ensure that the --audit-log-maxbackup argument is set to 10 or as appropriate (Automated)"
|
||||
audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep"
|
||||
tests:
|
||||
test_items:
|
||||
- flag: "--audit-log-maxbackup"
|
||||
compare:
|
||||
op: gte
|
||||
value: 10
|
||||
remediation: |
|
||||
Edit the API server pod specification file $apiserverconf
|
||||
on the control plane node and set the --audit-log-maxbackup parameter to 10 or to an appropriate
|
||||
value. For example,
|
||||
--audit-log-maxbackup=10
|
||||
scored: true
|
||||
|
||||
- id: 1.2.20
|
||||
text: "Ensure that the --audit-log-maxsize argument is set to 100 or as appropriate (Automated)"
|
||||
audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep"
|
||||
tests:
|
||||
test_items:
|
||||
- flag: "--audit-log-maxsize"
|
||||
compare:
|
||||
op: gte
|
||||
value: 100
|
||||
remediation: |
|
||||
Edit the API server pod specification file $apiserverconf
|
||||
on the control plane node and set the --audit-log-maxsize parameter to an appropriate size in MB.
|
||||
For example, to set it as 100 MB, --audit-log-maxsize=100
|
||||
scored: true
|
||||
|
||||
- id: 1.2.21
|
||||
text: "Ensure that the --request-timeout argument is set as appropriate (Manual)"
|
||||
audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep"
|
||||
type: manual
|
||||
remediation: |
|
||||
Edit the API server pod specification file $apiserverconf
|
||||
and set the below parameter as appropriate and if needed.
|
||||
For example, --request-timeout=300s
|
||||
scored: false
|
||||
|
||||
- id: 1.2.22
|
||||
text: "Ensure that the --service-account-lookup argument is set to true (Automated)"
|
||||
audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep"
|
||||
tests:
|
||||
bin_op: or
|
||||
test_items:
|
||||
- flag: "--service-account-lookup"
|
||||
set: false
|
||||
- flag: "--service-account-lookup"
|
||||
compare:
|
||||
op: eq
|
||||
value: true
|
||||
remediation: |
|
||||
Edit the API server pod specification file $apiserverconf
|
||||
on the control plane node and set the below parameter.
|
||||
--service-account-lookup=true
|
||||
Alternatively, you can delete the --service-account-lookup parameter from this file so
|
||||
that the default takes effect.
|
||||
scored: true
|
||||
|
||||
- id: 1.2.23
|
||||
text: "Ensure that the --service-account-key-file argument is set as appropriate (Automated)"
|
||||
audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep"
|
||||
tests:
|
||||
test_items:
|
||||
- flag: "--service-account-key-file"
|
||||
remediation: |
|
||||
Edit the API server pod specification file $apiserverconf
|
||||
on the control plane node and set the --service-account-key-file parameter
|
||||
to the public key file for service accounts. For example,
|
||||
--service-account-key-file=<filename>
|
||||
scored: true
|
||||
|
||||
- id: 1.2.24
|
||||
text: "Ensure that the --etcd-certfile and --etcd-keyfile arguments are set as appropriate (Automated)"
|
||||
audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep"
|
||||
tests:
|
||||
bin_op: and
|
||||
test_items:
|
||||
- flag: "--etcd-certfile"
|
||||
- flag: "--etcd-keyfile"
|
||||
remediation: |
|
||||
Follow the Kubernetes documentation and set up the TLS connection between the apiserver and etcd.
|
||||
Then, edit the API server pod specification file $apiserverconf
|
||||
on the control plane node and set the etcd certificate and key file parameters.
|
||||
--etcd-certfile=<path/to/client-certificate-file>
|
||||
--etcd-keyfile=<path/to/client-key-file>
|
||||
scored: true
|
||||
|
||||
- id: 1.2.25
|
||||
text: "Ensure that the --tls-cert-file and --tls-private-key-file arguments are set as appropriate (Automated)"
|
||||
audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep"
|
||||
tests:
|
||||
bin_op: and
|
||||
test_items:
|
||||
- flag: "--tls-cert-file"
|
||||
- flag: "--tls-private-key-file"
|
||||
remediation: |
|
||||
Follow the Kubernetes documentation and set up the TLS connection on the apiserver.
|
||||
Then, edit the API server pod specification file $apiserverconf
|
||||
on the control plane node and set the TLS certificate and private key file parameters.
|
||||
--tls-cert-file=<path/to/tls-certificate-file>
|
||||
--tls-private-key-file=<path/to/tls-key-file>
|
||||
scored: true
|
||||
|
||||
- id: 1.2.26
|
||||
text: "Ensure that the --client-ca-file argument is set as appropriate (Automated)"
|
||||
audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep"
|
||||
tests:
|
||||
test_items:
|
||||
- flag: "--client-ca-file"
|
||||
remediation: |
|
||||
Follow the Kubernetes documentation and set up the TLS connection on the apiserver.
|
||||
Then, edit the API server pod specification file $apiserverconf
|
||||
on the control plane node and set the client certificate authority file.
|
||||
--client-ca-file=<path/to/client-ca-file>
|
||||
scored: true
|
||||
|
||||
- id: 1.2.27
|
||||
text: "Ensure that the --etcd-cafile argument is set as appropriate (Automated)"
|
||||
audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep"
|
||||
tests:
|
||||
test_items:
|
||||
- flag: "--etcd-cafile"
|
||||
remediation: |
|
||||
Follow the Kubernetes documentation and set up the TLS connection between the apiserver and etcd.
|
||||
Then, edit the API server pod specification file $apiserverconf
|
||||
on the control plane node and set the etcd certificate authority file parameter.
|
||||
--etcd-cafile=<path/to/ca-file>
|
||||
scored: true
|
||||
|
||||
- id: 1.2.28
|
||||
text: "Ensure that the --encryption-provider-config argument is set as appropriate (Manual)"
|
||||
audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep"
|
||||
tests:
|
||||
test_items:
|
||||
- flag: "--encryption-provider-config"
|
||||
remediation: |
|
||||
Follow the Kubernetes documentation and configure a EncryptionConfig file.
|
||||
Then, edit the API server pod specification file $apiserverconf
|
||||
on the control plane node and set the --encryption-provider-config parameter to the path of that file.
|
||||
For example, --encryption-provider-config=</path/to/EncryptionConfig/File>
|
||||
Permissive - Enabling encryption changes how data can be recovered as data is encrypted.
|
||||
scored: false
|
||||
|
||||
- id: 1.2.29
|
||||
text: "Ensure that encryption providers are appropriately configured (Manual)"
|
||||
audit: |
|
||||
ENCRYPTION_PROVIDER_CONFIG=$(ps -ef | grep $apiserverbin | grep -- --encryption-provider-config | sed 's%.*encryption-provider-config[= ]\([^ ]*\).*%\1%')
|
||||
if test -e $ENCRYPTION_PROVIDER_CONFIG; then grep -A1 'providers:' $ENCRYPTION_PROVIDER_CONFIG | tail -n1 | grep -o "[A-Za-z]*" | sed 's/^/provider=/'; fi
|
||||
tests:
|
||||
test_items:
|
||||
- flag: "provider"
|
||||
compare:
|
||||
op: valid_elements
|
||||
value: "aescbc,kms,secretbox"
|
||||
remediation: |
|
||||
Follow the Kubernetes documentation and configure a EncryptionConfig file.
|
||||
In this file, choose aescbc, kms or secretbox as the encryption provider.
|
||||
Permissive - Enabling encryption changes how data can be recovered as data is encrypted.
|
||||
scored: false
|
||||
|
||||
- id: 1.2.30
|
||||
text: "Ensure that the API Server only makes use of Strong Cryptographic Ciphers (Manual)"
|
||||
audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep"
|
||||
tests:
|
||||
test_items:
|
||||
- flag: "--tls-cipher-suites"
|
||||
compare:
|
||||
op: valid_elements
|
||||
value: "TLS_AES_128_GCM_SHA256,TLS_AES_256_GCM_SHA384,TLS_CHACHA20_POLY1305_SHA256,TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA,TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256,TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256,TLS_RSA_WITH_3DES_EDE_CBC_SHA,TLS_RSA_WITH_AES_128_CBC_SHA,TLS_RSA_WITH_AES_128_GCM_SHA256,TLS_RSA_WITH_AES_256_CBC_SHA,TLS_RSA_WITH_AES_256_GCM_SHA384"
|
||||
remediation: |
|
||||
Edit the API server pod specification file /etc/kubernetes/manifests/kube-apiserver.yaml
|
||||
on the control plane node and set the below parameter.
|
||||
--tls-cipher-suites=TLS_AES_128_GCM_SHA256,TLS_AES_256_GCM_SHA384,TLS_CHACHA20_POLY1305_SHA256,
|
||||
TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA,TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,
|
||||
TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,
|
||||
TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256,
|
||||
TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,
|
||||
TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,
|
||||
TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256,TLS_RSA_WITH_3DES_EDE_CBC_SHA,TLS_RSA_WITH_AES_128_CBC_SHA,
|
||||
TLS_RSA_WITH_AES_128_GCM_SHA256,TLS_RSA_WITH_AES_256_CBC_SHA,TLS_RSA_WITH_AES_256_GCM_SHA384
|
||||
scored: false
|
||||
|
||||
- id: 1.3
|
||||
text: "Controller Manager"
|
||||
checks:
|
||||
- id: 1.3.1
|
||||
text: "Ensure that the --terminated-pod-gc-threshold argument is set as appropriate (Automated)"
|
||||
audit: "/bin/ps -ef | grep $controllermanagerbin | grep -v grep"
|
||||
tests:
|
||||
test_items:
|
||||
- flag: "--terminated-pod-gc-threshold"
|
||||
remediation: |
|
||||
Edit the Controller Manager pod specification file $controllermanagerconf
|
||||
on the control plane node and set the --terminated-pod-gc-threshold to an appropriate threshold,
|
||||
for example, --terminated-pod-gc-threshold=10
|
||||
scored: true
|
||||
|
||||
- id: 1.3.2
|
||||
text: "Ensure that the --profiling argument is set to false (Automated)"
|
||||
audit: "/bin/ps -ef | grep $controllermanagerbin | grep -v grep"
|
||||
tests:
|
||||
test_items:
|
||||
- flag: "--profiling"
|
||||
compare:
|
||||
op: eq
|
||||
value: false
|
||||
remediation: |
|
||||
Edit the Controller Manager pod specification file $controllermanagerconf
|
||||
on the control plane node and set the below parameter.
|
||||
--profiling=false
|
||||
scored: true
|
||||
|
||||
- id: 1.3.3
|
||||
text: "Ensure that the --use-service-account-credentials argument is set to true (Automated)"
|
||||
audit: "/bin/ps -ef | grep $controllermanagerbin | grep -v grep"
|
||||
tests:
|
||||
test_items:
|
||||
- flag: "--use-service-account-credentials"
|
||||
compare:
|
||||
op: noteq
|
||||
value: false
|
||||
remediation: |
|
||||
Edit the Controller Manager pod specification file $controllermanagerconf
|
||||
on the control plane node to set the below parameter.
|
||||
--use-service-account-credentials=true
|
||||
scored: true
|
||||
|
||||
- id: 1.3.4
|
||||
text: "Ensure that the --service-account-private-key-file argument is set as appropriate (Automated)"
|
||||
audit: "/bin/ps -ef | grep $controllermanagerbin | grep -v grep"
|
||||
tests:
|
||||
test_items:
|
||||
- flag: "--service-account-private-key-file"
|
||||
remediation: |
|
||||
Edit the Controller Manager pod specification file $controllermanagerconf
|
||||
on the control plane node and set the --service-account-private-key-file parameter
|
||||
to the private key file for service accounts.
|
||||
--service-account-private-key-file=<filename>
|
||||
scored: true
|
||||
|
||||
- id: 1.3.5
|
||||
text: "Ensure that the --root-ca-file argument is set as appropriate (Automated)"
|
||||
audit: "/bin/ps -ef | grep $controllermanagerbin | grep -v grep"
|
||||
tests:
|
||||
test_items:
|
||||
- flag: "--root-ca-file"
|
||||
remediation: |
|
||||
Edit the Controller Manager pod specification file $controllermanagerconf
|
||||
on the control plane node and set the --root-ca-file parameter to the certificate bundle file`.
|
||||
--root-ca-file=<path/to/file>
|
||||
scored: true
|
||||
|
||||
- id: 1.3.6
|
||||
text: "Ensure that the RotateKubeletServerCertificate argument is set to true (Automated)"
|
||||
audit: "/bin/ps -ef | grep $controllermanagerbin | grep -v grep"
|
||||
tests:
|
||||
bin_op: or
|
||||
test_items:
|
||||
- flag: "--feature-gates"
|
||||
compare:
|
||||
op: nothave
|
||||
value: "RotateKubeletServerCertificate=false"
|
||||
set: true
|
||||
- flag: "--feature-gates"
|
||||
set: false
|
||||
remediation: |
|
||||
Edit the Controller Manager pod specification file $controllermanagerconf
|
||||
on the control plane node and set the --feature-gates parameter to include RotateKubeletServerCertificate=true.
|
||||
--feature-gates=RotateKubeletServerCertificate=true
|
||||
Cluster provisioned by RKE handles certificate rotation directly through RKE.
|
||||
scored: true
|
||||
|
||||
- id: 1.3.7
|
||||
text: "Ensure that the --bind-address argument is set to 127.0.0.1 (Automated)"
|
||||
audit: "/bin/ps -ef | grep $controllermanagerbin | grep -v grep"
|
||||
tests:
|
||||
bin_op: or
|
||||
test_items:
|
||||
- flag: "--bind-address"
|
||||
compare:
|
||||
op: eq
|
||||
value: "127.0.0.1"
|
||||
- flag: "--bind-address"
|
||||
set: false
|
||||
remediation: |
|
||||
Edit the Controller Manager pod specification file $controllermanagerconf
|
||||
on the control plane node and ensure the correct value for the --bind-address parameter
|
||||
scored: true
|
||||
|
||||
- id: 1.4
|
||||
text: "Scheduler"
|
||||
checks:
|
||||
- id: 1.4.1
|
||||
text: "Ensure that the --profiling argument is set to false (Automated)"
|
||||
audit: "/bin/ps -ef | grep $schedulerbin | grep -v grep"
|
||||
tests:
|
||||
test_items:
|
||||
- flag: "--profiling"
|
||||
compare:
|
||||
op: eq
|
||||
value: false
|
||||
remediation: |
|
||||
Edit the Scheduler pod specification file $schedulerconf file
|
||||
on the control plane node and set the below parameter.
|
||||
--profiling=false
|
||||
scored: true
|
||||
|
||||
- id: 1.4.2
|
||||
text: "Ensure that the --bind-address argument is set to 127.0.0.1 (Automated)"
|
||||
audit: "/bin/ps -ef | grep $schedulerbin | grep -v grep"
|
||||
tests:
|
||||
bin_op: or
|
||||
test_items:
|
||||
- flag: "--bind-address"
|
||||
compare:
|
||||
op: eq
|
||||
value: "127.0.0.1"
|
||||
- flag: "--bind-address"
|
||||
set: false
|
||||
remediation: |
|
||||
Edit the Scheduler pod specification file $schedulerconf
|
||||
on the control plane node and ensure the correct value for the --bind-address parameter
|
||||
scored: true
|
||||
447
cfg/rke2-cis-1.8/node.yaml
Normal file
447
cfg/rke2-cis-1.8/node.yaml
Normal file
@@ -0,0 +1,447 @@
|
||||
---
|
||||
controls:
|
||||
version: "rke2-cis-1.8"
|
||||
id: 4
|
||||
text: "Worker Node Security Configuration"
|
||||
type: "node"
|
||||
groups:
|
||||
- id: 4.1
|
||||
text: "Worker Node Configuration Files"
|
||||
checks:
|
||||
- id: 4.1.1
|
||||
text: "Ensure that the kubelet service file permissions are set to 600 or more restrictive (Automated)"
|
||||
audit: '/bin/sh -c ''if test -e $kubeletsvc; then stat -c permissions=%a $kubeletsvc; fi'' '
|
||||
tests:
|
||||
test_items:
|
||||
- flag: "permissions"
|
||||
compare:
|
||||
op: bitmask
|
||||
value: "600"
|
||||
remediation: |
|
||||
Run the below command (based on the file location on your system) on the each worker node.
|
||||
For example, chmod 600 $kubeletsvc
|
||||
Not Applicable - Clusters provisioned by RKE doesn’t require or maintain a configuration file for the kubelet service.
|
||||
All configuration is passed in as arguments at container run time.
|
||||
scored: true
|
||||
- id: 4.1.2
|
||||
text: "Ensure that the kubelet service file ownership is set to root:root (Automated)"
|
||||
audit: '/bin/sh -c "if test -e $kubeletsvc; then stat -c %U:%G $kubeletsvc; else echo \"File not found\"; fi"'
|
||||
tests:
|
||||
test_items:
|
||||
- flag: root:root
|
||||
remediation: |
|
||||
Run the below command (based on the file location on your system) on the each worker node.
|
||||
For example,
|
||||
chown root:root $kubeletsvc
|
||||
Not Applicable - Clusters provisioned by RKE doesn’t require or maintain a configuration file for the kubelet service.
|
||||
All configuration is passed in as arguments at container run time.
|
||||
scored: true
|
||||
- id: 4.1.3
|
||||
text: "If proxy kubeconfig file exists ensure permissions are set to 600 or more restrictive (Automated)"
|
||||
audit: '/bin/sh -c ''if test -e $proxykubeconfig; then stat -c permissions=%a $proxykubeconfig; fi'' '
|
||||
tests:
|
||||
bin_op: or
|
||||
test_items:
|
||||
- flag: "permissions"
|
||||
set: true
|
||||
compare:
|
||||
op: bitmask
|
||||
value: "600"
|
||||
remediation: |
|
||||
Run the below command (based on the file location on your system) on the each worker node.
|
||||
For example,
|
||||
chmod 600 $proxykubeconfig
|
||||
scored: true
|
||||
- id: 4.1.4
|
||||
text: "If proxy kubeconfig file exists ensure ownership is set to root:root (Automated)"
|
||||
audit: '/bin/sh -c ''if test -e $proxykubeconfig; then stat -c %U:%G $proxykubeconfig; fi'' '
|
||||
tests:
|
||||
bin_op: or
|
||||
test_items:
|
||||
- flag: root:root
|
||||
remediation: |
|
||||
Run the below command (based on the file location on your system) on the each worker node.
|
||||
For example, chown root:root $proxykubeconfig
|
||||
scored: true
|
||||
- id: 4.1.5
|
||||
text: "Ensure that the --kubeconfig kubelet.conf file permissions are set to 600 or more restrictive (Automated)"
|
||||
audit: '/bin/sh -c ''if test -e $kubeletkubeconfig; then stat -c permissions=%a $kubeletkubeconfig; fi'' '
|
||||
tests:
|
||||
test_items:
|
||||
- flag: "permissions"
|
||||
compare:
|
||||
op: bitmask
|
||||
value: "600"
|
||||
remediation: |
|
||||
Run the below command (based on the file location on your system) on the each worker node.
|
||||
For example,
|
||||
chmod 600 $kubeletkubeconfig
|
||||
scored: true
|
||||
- id: 4.1.6
|
||||
text: "Ensure that the --kubeconfig kubelet.conf file ownership is set to root:root (Automated)"
|
||||
audit: '/bin/sh -c ''if test -e $kubeletkubeconfig; then stat -c %U:%G $kubeletkubeconfig; fi'' '
|
||||
tests:
|
||||
test_items:
|
||||
- flag: root:root
|
||||
remediation: |
|
||||
Run the below command (based on the file location on your system) on the each worker node.
|
||||
For example,
|
||||
chown root:root $kubeletkubeconfig
|
||||
scored: true
|
||||
- id: 4.1.7
|
||||
text: "Ensure that the certificate authorities file permissions are set to 600 or more restrictive (Automated)"
|
||||
audit: "stat -c permissions=%a /node/etc/kubernetes/ssl/kube-ca.pem"
|
||||
tests:
|
||||
test_items:
|
||||
- flag: "permissions"
|
||||
compare:
|
||||
op: bitmask
|
||||
value: "600"
|
||||
remediation: |
|
||||
Run the following command to modify the file permissions of the
|
||||
--client-ca-file chmod 600 <filename>
|
||||
scored: true
|
||||
- id: 4.1.8
|
||||
text: "Ensure that the client certificate authorities file ownership is set to root:root (Automated)"
|
||||
audit: "stat -c %U:%G /node/etc/kubernetes/ssl/kube-ca.pem"
|
||||
tests:
|
||||
test_items:
|
||||
- flag: root:root
|
||||
compare:
|
||||
op: eq
|
||||
value: root:root
|
||||
remediation: |
|
||||
Run the following command to modify the ownership of the --client-ca-file.
|
||||
chown root:root <filename>
|
||||
scored: true
|
||||
- id: 4.1.9
|
||||
text: "If the kubelet config.yaml configuration file is being used validate permissions set to 600 or more restrictive (Automated)"
|
||||
audit: |
|
||||
/bin/sh -c 'if test -e "$kubeletconf"; then
|
||||
stat -c "permissions=%a" "$kubeletconf"
|
||||
else
|
||||
echo "File not found"
|
||||
fi'
|
||||
tests:
|
||||
bin_op: or
|
||||
test_items:
|
||||
- flag: "permissions"
|
||||
compare:
|
||||
op: bitmask
|
||||
value: "600"
|
||||
- flag: "File not found"
|
||||
remediation: |
|
||||
Run the following command (using the config file location identified in the Audit step)
|
||||
chmod 600 $kubeletconf
|
||||
Not Applicable - Clusters provisioned by RKE do not require or maintain a configuration file for the kubelet.
|
||||
All configuration is passed in as arguments at container run time.
|
||||
scored: true
|
||||
- id: 4.1.10
|
||||
text: "If the kubelet config.yaml configuration file is being used validate file ownership is set to root:root (Manual)"
|
||||
audit: |
|
||||
/bin/sh -c 'if test -e "$kubeletconf"; then
|
||||
stat -c "%U:%G" "$kubeletconf"
|
||||
else
|
||||
echo "File not found"
|
||||
fi'
|
||||
tests:
|
||||
bin_op: or
|
||||
test_items:
|
||||
- flag: root:root
|
||||
- flag: "File not found"
|
||||
remediation: |
|
||||
Run the following command (using the config file location identified in the Audit step)
|
||||
chown root:root $kubeletconf
|
||||
Not Applicable - Clusters provisioned by RKE doesn’t require or maintain a configuration file for the kubelet.
|
||||
All configuration is passed in as arguments at container run time.
|
||||
scored: false
|
||||
- id: 4.2
|
||||
text: "Kubelet"
|
||||
checks:
|
||||
- id: 4.2.1
|
||||
text: "Ensure that the --anonymous-auth argument is set to false (Automated)"
|
||||
audit: "/bin/ps -fC $kubeletbin"
|
||||
audit_config: "/bin/sh -c 'if test -e $kubeletconf; then /bin/cat $kubeletconf; fi' "
|
||||
tests:
|
||||
test_items:
|
||||
- flag: "--anonymous-auth"
|
||||
path: '{.authentication.anonymous.enabled}'
|
||||
compare:
|
||||
op: eq
|
||||
value: false
|
||||
remediation: |
|
||||
If using a Kubelet config file, edit the file to set `authentication: anonymous: enabled` to
|
||||
`false`.
|
||||
If using executable arguments, edit the kubelet service file
|
||||
$kubeletsvc on each worker node and
|
||||
set the below parameter in KUBELET_SYSTEM_PODS_ARGS variable.
|
||||
`--anonymous-auth=false`
|
||||
Based on your system, restart the kubelet service. For example,
|
||||
systemctl daemon-reload
|
||||
systemctl restart kubelet.service
|
||||
scored: true
|
||||
- id: 4.2.2
|
||||
text: "Ensure that the --authorization-mode argument is not set to AlwaysAllow (Automated)"
|
||||
audit: "/bin/ps -fC $kubeletbin"
|
||||
audit_config: "/bin/sh -c 'if test -e $kubeletconf; then /bin/cat $kubeletconf; fi' "
|
||||
tests:
|
||||
test_items:
|
||||
- flag: --authorization-mode
|
||||
path: '{.authorization.mode}'
|
||||
compare:
|
||||
op: nothave
|
||||
value: AlwaysAllow
|
||||
remediation: |
|
||||
If using a Kubelet config file, edit the file to set `authorization.mode` to Webhook. If
|
||||
using executable arguments, edit the kubelet service file
|
||||
$kubeletsvc on each worker node and
|
||||
set the below parameter in KUBELET_AUTHZ_ARGS variable.
|
||||
--authorization-mode=Webhook
|
||||
Based on your system, restart the kubelet service. For example,
|
||||
systemctl daemon-reload
|
||||
systemctl restart kubelet.service
|
||||
scored: true
|
||||
- id: 4.2.3
|
||||
text: "Ensure that the --client-ca-file argument is set as appropriate (Automated)"
|
||||
audit: "/bin/ps -fC $kubeletbin"
|
||||
audit_config: "/bin/sh -c 'if test -e $kubeletconf; then /bin/cat $kubeletconf; fi' "
|
||||
tests:
|
||||
test_items:
|
||||
- flag: --client-ca-file
|
||||
path: '{.authentication.x509.clientCAFile}'
|
||||
remediation: |
|
||||
If using a Kubelet config file, edit the file to set `authentication.x509.clientCAFile` to
|
||||
the location of the client CA file.
|
||||
If using command line arguments, edit the kubelet service file
|
||||
$kubeletsvc on each worker node and
|
||||
set the below parameter in KUBELET_AUTHZ_ARGS variable.
|
||||
--client-ca-file=<path/to/client-ca-file>
|
||||
Based on your system, restart the kubelet service. For example,
|
||||
systemctl daemon-reload
|
||||
systemctl restart kubelet.service
|
||||
scored: true
|
||||
- id: 4.2.4
|
||||
text: "Verify that the --read-only-port argument is set to 0 (Automated)"
|
||||
audit: "/bin/ps -fC $kubeletbin"
|
||||
audit_config: "/bin/sh -c 'if test -e $kubeletconf; then /bin/cat $kubeletconf; fi' "
|
||||
tests:
|
||||
bin_op: or
|
||||
test_items:
|
||||
- flag: "--read-only-port"
|
||||
path: '{.readOnlyPort}'
|
||||
compare:
|
||||
op: eq
|
||||
value: 0
|
||||
- flag: "--read-only-port"
|
||||
path: '{.readOnlyPort}'
|
||||
set: false
|
||||
remediation: |
|
||||
If using a Kubelet config file, edit the file to set `readOnlyPort` to 0.
|
||||
If using command line arguments, edit the kubelet service file
|
||||
$kubeletsvc on each worker node and
|
||||
set the below parameter in KUBELET_SYSTEM_PODS_ARGS variable.
|
||||
--read-only-port=0
|
||||
Based on your system, restart the kubelet service. For example,
|
||||
systemctl daemon-reload
|
||||
systemctl restart kubelet.service
|
||||
scored: true
|
||||
- id: 4.2.5
|
||||
text: "Ensure that the --streaming-connection-idle-timeout argument is not set to 0 (Manual)"
|
||||
audit: "/bin/ps -fC $kubeletbin"
|
||||
audit_config: "/bin/sh -c 'if test -e $kubeletconf; then /bin/cat $kubeletconf; fi' "
|
||||
tests:
|
||||
test_items:
|
||||
- flag: --streaming-connection-idle-timeout
|
||||
path: '{.streamingConnectionIdleTimeout}'
|
||||
compare:
|
||||
op: noteq
|
||||
value: 0
|
||||
- flag: --streaming-connection-idle-timeout
|
||||
path: '{.streamingConnectionIdleTimeout}'
|
||||
set: false
|
||||
bin_op: or
|
||||
remediation: |
|
||||
If using a Kubelet config file, edit the file to set `streamingConnectionIdleTimeout` to a
|
||||
value other than 0.
|
||||
If using command line arguments, edit the kubelet service file
|
||||
$kubeletsvc on each worker node and
|
||||
set the below parameter in KUBELET_SYSTEM_PODS_ARGS variable.
|
||||
--streaming-connection-idle-timeout=5m
|
||||
Based on your system, restart the kubelet service. For example,
|
||||
systemctl daemon-reload
|
||||
systemctl restart kubelet.service
|
||||
scored: false
|
||||
- id: 4.2.6
|
||||
text: "Ensure that the --make-iptables-util-chains argument is set to true (Automated)"
|
||||
audit: "/bin/ps -fC $kubeletbin"
|
||||
audit_config: "/bin/sh -c 'if test -e $kubeletconf; then /bin/cat $kubeletconf; fi' "
|
||||
tests:
|
||||
test_items:
|
||||
- flag: --make-iptables-util-chains
|
||||
path: '{.makeIPTablesUtilChains}'
|
||||
compare:
|
||||
op: eq
|
||||
value: true
|
||||
- flag: --make-iptables-util-chains
|
||||
path: '{.makeIPTablesUtilChains}'
|
||||
set: false
|
||||
bin_op: or
|
||||
remediation: |
|
||||
If using a Kubelet config file, edit the file to set `makeIPTablesUtilChains` to `true`.
|
||||
If using command line arguments, edit the kubelet service file
|
||||
$kubeletsvc on each worker node and
|
||||
remove the --make-iptables-util-chains argument from the
|
||||
KUBELET_SYSTEM_PODS_ARGS variable.
|
||||
Based on your system, restart the kubelet service. For example:
|
||||
systemctl daemon-reload
|
||||
systemctl restart kubelet.service
|
||||
scored: true
|
||||
- id: 4.2.7
|
||||
text: "Ensure that the --hostname-override argument is not set (Manual)"
|
||||
audit: "/bin/ps -fC $kubeletbin "
|
||||
tests:
|
||||
test_items:
|
||||
- flag: --hostname-override
|
||||
set: false
|
||||
remediation: |
|
||||
Edit the kubelet service file $kubeletsvc
|
||||
on each worker node and remove the --hostname-override argument from the
|
||||
KUBELET_SYSTEM_PODS_ARGS variable.
|
||||
Based on your system, restart the kubelet service. For example,
|
||||
systemctl daemon-reload
|
||||
systemctl restart kubelet.service
|
||||
Not Applicable - Clusters provisioned by RKE set the --hostname-override to avoid any hostname configuration errors
|
||||
scored: false
|
||||
- id: 4.2.8
|
||||
text: "Ensure that the eventRecordQPS argument is set to a level which ensures appropriate event capture (Manual)"
|
||||
audit: "/bin/ps -fC $kubeletbin"
|
||||
audit_config: "/bin/sh -c 'if test -e $kubeletconf; then /bin/cat $kubeletconf; fi' "
|
||||
tests:
|
||||
test_items:
|
||||
- flag: --event-qps
|
||||
path: '{.eventRecordQPS}'
|
||||
compare:
|
||||
op: gte
|
||||
value: 0
|
||||
- flag: --event-qps
|
||||
path: '{.eventRecordQPS}'
|
||||
set: false
|
||||
bin_op: or
|
||||
remediation: |
|
||||
If using a Kubelet config file, edit the file to set `eventRecordQPS` to an appropriate level.
|
||||
If using command line arguments, edit the kubelet service file
|
||||
$kubeletsvc on each worker node and
|
||||
set the below parameter in KUBELET_SYSTEM_PODS_ARGS variable.
|
||||
Based on your system, restart the kubelet service. For example,
|
||||
systemctl daemon-reload
|
||||
systemctl restart kubelet.service
|
||||
scored: false
|
||||
- id: 4.2.9
|
||||
text: "Ensure that the --tls-cert-file and --tls-private-key-file arguments are set as appropriate (Manual)"
|
||||
audit: "/bin/ps -fC $kubeletbin"
|
||||
audit_config: "/bin/sh -c 'if test -e $kubeletconf; then /bin/cat $kubeletconf; fi' "
|
||||
tests:
|
||||
test_items:
|
||||
- flag: --tls-cert-file
|
||||
path: '{.tlsCertFile}'
|
||||
- flag: --tls-private-key-file
|
||||
path: '{.tlsPrivateKeyFile}'
|
||||
remediation: |
|
||||
If using a Kubelet config file, edit the file to set `tlsCertFile` to the location
|
||||
of the certificate file to use to identify this Kubelet, and `tlsPrivateKeyFile`
|
||||
to the location of the corresponding private key file.
|
||||
If using command line arguments, edit the kubelet service file
|
||||
$kubeletsvc on each worker node and
|
||||
set the below parameters in KUBELET_CERTIFICATE_ARGS variable.
|
||||
--tls-cert-file=<path/to/tls-certificate-file>
|
||||
--tls-private-key-file=<path/to/tls-key-file>
|
||||
Based on your system, restart the kubelet service. For example,
|
||||
systemctl daemon-reload
|
||||
systemctl restart kubelet.service
|
||||
Permissive - When generating serving certificates, functionality could break in conjunction with hostname overrides which are required for certain cloud providers.
|
||||
scored: false
|
||||
- id: 4.2.10
|
||||
text: "Ensure that the --rotate-certificates argument is not set to false (Automated)"
|
||||
audit: "/bin/ps -fC $kubeletbin"
|
||||
audit_config: "/bin/sh -c 'if test -e $kubeletconf; then /bin/cat $kubeletconf; fi' "
|
||||
tests:
|
||||
test_items:
|
||||
- flag: --rotate-certificates
|
||||
path: '{.rotateCertificates}'
|
||||
compare:
|
||||
op: eq
|
||||
value: true
|
||||
- flag: --rotate-certificates
|
||||
path: '{.rotateCertificates}'
|
||||
set: false
|
||||
bin_op: or
|
||||
remediation: |
|
||||
If using a Kubelet config file, edit the file to add the line `rotateCertificates` to `true` or
|
||||
remove it altogether to use the default value.
|
||||
If using command line arguments, edit the kubelet service file
|
||||
$kubeletsvc on each worker node and
|
||||
remove --rotate-certificates=false argument from the KUBELET_CERTIFICATE_ARGS
|
||||
variable.
|
||||
Based on your system, restart the kubelet service. For example,
|
||||
systemctl daemon-reload
|
||||
systemctl restart kubelet.service
|
||||
scored: true
|
||||
- id: 4.2.11
|
||||
text: "Verify that the RotateKubeletServerCertificate argument is set to true (Manual)"
|
||||
audit: "/bin/ps -fC $kubeletbin"
|
||||
audit_config: "/bin/sh -c 'if test -e $kubeletconf; then /bin/cat $kubeletconf; fi' "
|
||||
tests:
|
||||
bin_op: or
|
||||
test_items:
|
||||
- flag: RotateKubeletServerCertificate
|
||||
path: '{.featureGates.RotateKubeletServerCertificate}'
|
||||
compare:
|
||||
op: nothave
|
||||
value: false
|
||||
- flag: RotateKubeletServerCertificate
|
||||
path: '{.featureGates.RotateKubeletServerCertificate}'
|
||||
set: false
|
||||
remediation: |
|
||||
Edit the kubelet service file $kubeletsvc
|
||||
on each worker node and set the below parameter in KUBELET_CERTIFICATE_ARGS variable.
|
||||
--feature-gates=RotateKubeletServerCertificate=true
|
||||
Based on your system, restart the kubelet service. For example:
|
||||
systemctl daemon-reload
|
||||
systemctl restart kubelet.service
|
||||
Not Applicable - Clusters provisioned by RKE handles certificate rotation directly through RKE.
|
||||
scored: false
|
||||
- id: 4.2.12
|
||||
text: "Ensure that the Kubelet only makes use of Strong Cryptographic Ciphers (Automated)"
|
||||
audit: "/bin/ps -fC $kubeletbin"
|
||||
audit_config: "/bin/sh -c 'if test -e $kubeletconf; then /bin/cat $kubeletconf; fi' "
|
||||
tests:
|
||||
test_items:
|
||||
- flag: --tls-cipher-suites
|
||||
path: '{range .tlsCipherSuites[:]}{}{'',''}{end}'
|
||||
compare:
|
||||
op: valid_elements
|
||||
value: TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256
|
||||
remediation: |
|
||||
If using a Kubelet config file, edit the file to set `TLSCipherSuites` to
|
||||
TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256
|
||||
or to a subset of these values.
|
||||
If using executable arguments, edit the kubelet service file
|
||||
$kubeletsvc on each worker node and
|
||||
set the --tls-cipher-suites parameter as follows, or to a subset of these values.
|
||||
--tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256
|
||||
Based on your system, restart the kubelet service. For example:
|
||||
systemctl daemon-reload
|
||||
systemctl restart kubelet.service
|
||||
scored: true
|
||||
- id: 4.2.13
|
||||
text: "Ensure that a limit is set on pod PIDs (Manual)"
|
||||
audit: "/bin/ps -fC $kubeletbin"
|
||||
audit_config: "/bin/sh -c 'if test -e $kubeletconf; then /bin/cat $kubeletconf; fi' "
|
||||
tests:
|
||||
test_items:
|
||||
- flag: --pod-max-pids
|
||||
path: '{.podPidsLimit}'
|
||||
remediation: |
|
||||
Decide on an appropriate level for this parameter and set it,
|
||||
either via the --pod-max-pids command line parameter or the PodPidsLimit configuration file setting.
|
||||
scored: false
|
||||
311
cfg/rke2-cis-1.8/policies.yaml
Normal file
311
cfg/rke2-cis-1.8/policies.yaml
Normal file
@@ -0,0 +1,311 @@
|
||||
---
|
||||
controls:
|
||||
version: "rke2-cis-1.8"
|
||||
id: 5
|
||||
text: "Kubernetes Policies"
|
||||
type: "policies"
|
||||
groups:
|
||||
- id: 5.1
|
||||
text: "RBAC and Service Accounts"
|
||||
checks:
|
||||
- id: 5.1.1
|
||||
text: "Ensure that the cluster-admin role is only used where required (Manual)"
|
||||
type: "manual"
|
||||
remediation: |
|
||||
Identify all clusterrolebindings to the cluster-admin role. Check if they are used and
|
||||
if they need this role or if they could use a role with fewer privileges.
|
||||
Where possible, first bind users to a lower privileged role and then remove the
|
||||
clusterrolebinding to the cluster-admin role :
|
||||
kubectl delete clusterrolebinding [name]
|
||||
scored: false
|
||||
|
||||
- id: 5.1.2
|
||||
text: "Minimize access to secrets (Manual)"
|
||||
type: "manual"
|
||||
remediation: |
|
||||
Where possible, remove get, list and watch access to Secret objects in the cluster.
|
||||
scored: false
|
||||
|
||||
- id: 5.1.3
|
||||
text: "Minimize wildcard use in Roles and ClusterRoles (Manual)"
|
||||
type: "manual"
|
||||
remediation: |
|
||||
Where possible replace any use of wildcards in clusterroles and roles with specific
|
||||
objects or actions.
|
||||
scored: false
|
||||
|
||||
- id: 5.1.4
|
||||
text: "Minimize access to create pods (Manual)"
|
||||
type: "manual"
|
||||
remediation: |
|
||||
Where possible, remove create access to pod objects in the cluster.
|
||||
scored: false
|
||||
|
||||
- id: 5.1.5
|
||||
text: "Ensure that default service accounts are not actively used. (Manual)"
|
||||
type: "manual"
|
||||
audit: check_for_default_sa.sh
|
||||
remediation: |
|
||||
Create explicit service accounts wherever a Kubernetes workload requires specific access
|
||||
to the Kubernetes API server.
|
||||
Modify the configuration of each default service account to include this value
|
||||
automountServiceAccountToken: false
|
||||
Permissive - Kubernetes provides default service accounts to be used.
|
||||
scored: false
|
||||
|
||||
- id: 5.1.6
|
||||
text: "Ensure that Service Account Tokens are only mounted where necessary (Manual)"
|
||||
type: "manual"
|
||||
remediation: |
|
||||
Modify the definition of pods and service accounts which do not need to mount service
|
||||
account tokens to disable it.
|
||||
scored: false
|
||||
|
||||
- id: 5.1.7
|
||||
text: "Avoid use of system:masters group (Manual)"
|
||||
type: "manual"
|
||||
remediation: |
|
||||
Remove the system:masters group from all users in the cluster.
|
||||
scored: false
|
||||
|
||||
- id: 5.1.8
|
||||
text: "Limit use of the Bind, Impersonate and Escalate permissions in the Kubernetes cluster (Manual)"
|
||||
type: "manual"
|
||||
remediation: |
|
||||
Where possible, remove the impersonate, bind and escalate rights from subjects.
|
||||
scored: false
|
||||
|
||||
- id: 5.1.9
|
||||
text: "Minimize access to create persistent volumes (Manual)"
|
||||
type: "manual"
|
||||
remediation: |
|
||||
Where possible, remove create access to PersistentVolume objects in the cluster.
|
||||
scored: false
|
||||
|
||||
- id: 5.1.10
|
||||
text: "Minimize access to the proxy sub-resource of nodes (Manual)"
|
||||
type: "manual"
|
||||
remediation: |
|
||||
Where possible, remove access to the proxy sub-resource of node objects.
|
||||
scored: false
|
||||
|
||||
- id: 5.1.11
|
||||
text: "Minimize access to the approval sub-resource of certificatesigningrequests objects (Manual)"
|
||||
type: "manual"
|
||||
remediation: |
|
||||
Where possible, remove access to the approval sub-resource of certificatesigningrequest objects.
|
||||
scored: false
|
||||
|
||||
- id: 5.1.12
|
||||
text: "Minimize access to webhook configuration objects (Manual)"
|
||||
type: "manual"
|
||||
remediation: |
|
||||
Where possible, remove access to the validatingwebhookconfigurations or mutatingwebhookconfigurations objects
|
||||
scored: false
|
||||
|
||||
- id: 5.1.13
|
||||
text: "Minimize access to the service account token creation (Manual)"
|
||||
type: "manual"
|
||||
remediation: |
|
||||
Where possible, remove access to the token sub-resource of serviceaccount objects.
|
||||
scored: false
|
||||
|
||||
- id: 5.2
|
||||
text: "Pod Security Standards"
|
||||
checks:
|
||||
- id: 5.2.1
|
||||
text: "Ensure that the cluster has at least one active policy control mechanism in place (Manual)"
|
||||
type: "manual"
|
||||
remediation: |
|
||||
Ensure that either Pod Security Admission or an external policy control system is in place
|
||||
for every namespace which contains user workloads.
|
||||
scored: false
|
||||
|
||||
- id: 5.2.2
|
||||
text: "Minimize the admission of privileged containers (Manual)"
|
||||
type: "manual"
|
||||
remediation: |
|
||||
Add policies to each namespace in the cluster which has user workloads to restrict the
|
||||
admission of privileged containers.
|
||||
scored: false
|
||||
|
||||
- id: 5.2.3
|
||||
text: "Minimize the admission of containers wishing to share the host process ID namespace (Manual)"
|
||||
type: "manual"
|
||||
remediation: |
|
||||
Add policies to each namespace in the cluster which has user workloads to restrict the
|
||||
admission of `hostPID` containers.
|
||||
Permissive - Enabling Pod Security Policy can cause applications to unexpectedly fail.
|
||||
scored: true
|
||||
|
||||
- id: 5.2.4
|
||||
text: "Minimize the admission of containers wishing to share the host IPC namespace (Manual)"
|
||||
type: "manual"
|
||||
remediation: |
|
||||
Add policies to each namespace in the cluster which has user workloads to restrict the
|
||||
admission of `hostIPC` containers.
|
||||
Permissive - Enabling Pod Security Policy can cause applications to unexpectedly fail.
|
||||
scored: true
|
||||
|
||||
- id: 5.2.5
|
||||
text: "Minimize the admission of containers wishing to share the host network namespace (Manual)"
|
||||
type: "manual"
|
||||
remediation: |
|
||||
Add policies to each namespace in the cluster which has user workloads to restrict the
|
||||
admission of `hostNetwork` containers.
|
||||
Permissive - Enabling Pod Security Policy can cause applications to unexpectedly fail.
|
||||
scored: true
|
||||
|
||||
- id: 5.2.6
|
||||
text: "Minimize the admission of containers with allowPrivilegeEscalation (Manual)"
|
||||
type: "manual"
|
||||
remediation: |
|
||||
Add policies to each namespace in the cluster which has user workloads to restrict the
|
||||
admission of containers with `.spec.allowPrivilegeEscalation` set to `true`.
|
||||
scored: false
|
||||
|
||||
- id: 5.2.7
|
||||
text: "Minimize the admission of root containers (Manual)"
|
||||
type: "manual"
|
||||
remediation: |
|
||||
Create a policy for each namespace in the cluster, ensuring that either `MustRunAsNonRoot`
|
||||
or `MustRunAs` with the range of UIDs not including 0, is set.
|
||||
scored: false
|
||||
|
||||
- id: 5.2.8
|
||||
text: "Minimize the admission of containers with the NET_RAW capability (Manual)"
|
||||
type: "manual"
|
||||
remediation: |
|
||||
Add policies to each namespace in the cluster which has user workloads to restrict the
|
||||
admission of containers with the `NET_RAW` capability.
|
||||
scored: false
|
||||
|
||||
- id: 5.2.9
|
||||
text: "Minimize the admission of containers with added capabilities (Manual)"
|
||||
type: "manual"
|
||||
remediation: |
|
||||
Ensure that `allowedCapabilities` is not present in policies for the cluster unless
|
||||
it is set to an empty array.
|
||||
scored: false
|
||||
|
||||
- id: 5.2.10
|
||||
text: "Minimize the admission of containers with capabilities assigned (Manual)"
|
||||
type: "manual"
|
||||
remediation: |
|
||||
Review the use of capabilites in applications running on your cluster. Where a namespace
|
||||
contains applicaions which do not require any Linux capabities to operate consider adding
|
||||
a PSP which forbids the admission of containers which do not drop all capabilities.
|
||||
scored: false
|
||||
|
||||
- id: 5.2.11
|
||||
text: "Minimize the admission of Windows HostProcess containers (Manual)"
|
||||
type: "manual"
|
||||
remediation: |
|
||||
Add policies to each namespace in the cluster which has user workloads to restrict the
|
||||
admission of containers that have `.securityContext.windowsOptions.hostProcess` set to `true`.
|
||||
scored: false
|
||||
|
||||
- id: 5.2.12
|
||||
text: "Minimize the admission of HostPath volumes (Manual)"
|
||||
type: "manual"
|
||||
remediation: |
|
||||
Add policies to each namespace in the cluster which has user workloads to restrict the
|
||||
admission of containers with `hostPath` volumes.
|
||||
scored: false
|
||||
|
||||
- id: 5.2.13
|
||||
text: "Minimize the admission of containers which use HostPorts (Manual)"
|
||||
type: "manual"
|
||||
remediation: |
|
||||
Add policies to each namespace in the cluster which has user workloads to restrict the
|
||||
admission of containers which use `hostPort` sections.
|
||||
scored: false
|
||||
|
||||
- id: 5.3
|
||||
text: "Network Policies and CNI"
|
||||
checks:
|
||||
- id: 5.3.1
|
||||
text: "Ensure that the CNI in use supports NetworkPolicies (Manual)"
|
||||
type: "manual"
|
||||
remediation: |
|
||||
If the CNI plugin in use does not support network policies, consideration should be given to
|
||||
making use of a different plugin, or finding an alternate mechanism for restricting traffic
|
||||
in the Kubernetes cluster.
|
||||
scored: false
|
||||
|
||||
- id: 5.3.2
|
||||
text: "Ensure that all Namespaces have NetworkPolicies defined (Manual)"
|
||||
type: "manual"
|
||||
remediation: |
|
||||
Follow the documentation and create NetworkPolicy objects as you need them.
|
||||
Permissive - Enabling Network Policies can prevent certain applications from communicating with each other.
|
||||
scored: false
|
||||
|
||||
- id: 5.4
|
||||
text: "Secrets Management"
|
||||
checks:
|
||||
- id: 5.4.1
|
||||
text: "Prefer using Secrets as files over Secrets as environment variables (Manual)"
|
||||
type: "manual"
|
||||
remediation: |
|
||||
If possible, rewrite application code to read Secrets from mounted secret files, rather than
|
||||
from environment variables.
|
||||
scored: false
|
||||
|
||||
- id: 5.4.2
|
||||
text: "Consider external secret storage (Manual)"
|
||||
type: "manual"
|
||||
remediation: |
|
||||
Refer to the Secrets management options offered by your cloud provider or a third-party
|
||||
secrets management solution.
|
||||
scored: false
|
||||
|
||||
- id: 5.5
|
||||
text: "Extensible Admission Control"
|
||||
checks:
|
||||
- id: 5.5.1
|
||||
text: "Configure Image Provenance using ImagePolicyWebhook admission controller (Manual)"
|
||||
type: "manual"
|
||||
remediation: |
|
||||
Follow the Kubernetes documentation and setup image provenance.
|
||||
scored: false
|
||||
|
||||
- id: 5.7
|
||||
text: "General Policies"
|
||||
checks:
|
||||
- id: 5.7.1
|
||||
text: "Create administrative boundaries between resources using namespaces (Manual)"
|
||||
type: "manual"
|
||||
remediation: |
|
||||
Follow the documentation and create namespaces for objects in your deployment as you need
|
||||
them.
|
||||
scored: false
|
||||
|
||||
- id: 5.7.2
|
||||
text: "Ensure that the seccomp profile is set to docker/default in your Pod definitions (Manual)"
|
||||
type: "manual"
|
||||
remediation: |
|
||||
Use `securityContext` to enable the docker/default seccomp profile in your pod definitions.
|
||||
An example is as below:
|
||||
securityContext:
|
||||
seccompProfile:
|
||||
type: RuntimeDefault
|
||||
scored: false
|
||||
|
||||
- id: 5.7.3
|
||||
text: "Apply SecurityContext to your Pods and Containers (Manual)"
|
||||
type: "manual"
|
||||
remediation: |
|
||||
Follow the Kubernetes documentation and apply SecurityContexts to your Pods. For a
|
||||
suggested list of SecurityContexts, you may refer to the CIS Security Benchmark for Docker
|
||||
Containers.
|
||||
scored: false
|
||||
|
||||
- id: 5.7.4
|
||||
text: "The default namespace should not be used (Manual)"
|
||||
type: "manual"
|
||||
remediation: |
|
||||
Ensure that namespaces are created to allow for appropriate segregation of Kubernetes
|
||||
resources and that all new resources are created in a specific namespace.
|
||||
Permissive - Kubernetes provides a default namespace.
|
||||
scored: false
|
||||
@@ -243,10 +243,13 @@ func TestMapToCISVersion(t *testing.T) {
|
||||
{kubeVersion: "1.25", succeed: true, exp: "cis-1.7"},
|
||||
{kubeVersion: "1.26", succeed: true, exp: "cis-1.8"},
|
||||
{kubeVersion: "1.27", succeed: true, exp: "cis-1.9"},
|
||||
{kubeVersion: "1.28", succeed: true, exp: "cis-1.9"},
|
||||
{kubeVersion: "1.29", succeed: true, exp: "cis-1.9"},
|
||||
{kubeVersion: "1.30", succeed: true, exp: "cis-1.10"},
|
||||
{kubeVersion: "1.31", succeed: true, exp: "cis-1.10"},
|
||||
{kubeVersion: "1.28", succeed: true, exp: "cis-1.10"},
|
||||
{kubeVersion: "1.29", succeed: true, exp: "cis-1.11"},
|
||||
{kubeVersion: "1.30", succeed: true, exp: "cis-1.11"},
|
||||
{kubeVersion: "1.31", succeed: true, exp: "cis-1.11"},
|
||||
{kubeVersion: "1.32", succeed: true, exp: "cis-1.12"},
|
||||
{kubeVersion: "1.33", succeed: true, exp: "cis-1.12"},
|
||||
{kubeVersion: "1.34", succeed: true, exp: "cis-1.12"},
|
||||
{kubeVersion: "gke-1.2.0", succeed: true, exp: "gke-1.2.0"},
|
||||
{kubeVersion: "ocp-3.10", succeed: true, exp: "rh-0.7"},
|
||||
{kubeVersion: "ocp-3.11", succeed: true, exp: "rh-0.7"},
|
||||
|
||||
29
cmd/util.go
29
cmd/util.go
@@ -522,19 +522,26 @@ func getPlatformBenchmarkVersion(platform Platform) string {
|
||||
glog.V(3).Infof("getPlatformBenchmarkVersion platform: %s", platform)
|
||||
switch platform.Name {
|
||||
case "eks":
|
||||
oldEKSVersions := []string{"1.15", "1.16", "1.17", "1.18", "1.19", "1.20", "1.21", "1.22", "1.23", "1.24", "1.25", "1.26", "1.27", "1.28"}
|
||||
if slices.Contains(oldEKSVersions, platform.Version) {
|
||||
switch platform.Version {
|
||||
case "1.15", "1.16", "1.17", "1.18", "1.19":
|
||||
return "eks-1.0.1"
|
||||
case "1.29", "1.30", "1.31":
|
||||
return "eks-1.7.0"
|
||||
case "1.32", "1.33", "1.34":
|
||||
return "eks-1.8.0"
|
||||
default:
|
||||
return "eks-1.5.0"
|
||||
}
|
||||
return "eks-1.7.0"
|
||||
case "aks":
|
||||
return "aks-1.7"
|
||||
case "gke":
|
||||
switch platform.Version {
|
||||
case "1.15", "1.16", "1.17", "1.18", "1.19":
|
||||
return "gke-1.0"
|
||||
case "1.29", "1.30", "1.31":
|
||||
case "1.28", "1.29", "1.30":
|
||||
return "gke-1.6.0"
|
||||
case "1.31", "1.32", "1.33", "1.34":
|
||||
return "gke-1.8.0"
|
||||
default:
|
||||
return "gke-1.2.0"
|
||||
}
|
||||
@@ -546,6 +553,10 @@ func getPlatformBenchmarkVersion(platform Platform) string {
|
||||
return "rh-0.7"
|
||||
case "4.1":
|
||||
return "rh-1.0"
|
||||
case "4.11":
|
||||
return "rh-1.4"
|
||||
case "4.13":
|
||||
return "rh-1.8"
|
||||
}
|
||||
case "vmware":
|
||||
return "tkgi-1.2.53"
|
||||
@@ -575,10 +586,12 @@ func getPlatformBenchmarkVersion(platform Platform) string {
|
||||
return "rke2-cis-1.23"
|
||||
case "1.24":
|
||||
return "rke2-cis-1.24"
|
||||
case "1.25", "1.26", "1.27":
|
||||
case "1.25":
|
||||
return "rke2-cis-1.7"
|
||||
case "1.26", "1.27":
|
||||
return "rke2-cis-1.8"
|
||||
default:
|
||||
return "rke2-cis-1.7"
|
||||
return "rke2-cis-1.8"
|
||||
}
|
||||
}
|
||||
return ""
|
||||
@@ -621,10 +634,10 @@ func getOpenShiftInfo() Platform {
|
||||
|
||||
func getOcpValidVersion(ocpVer string) (string, error) {
|
||||
ocpOriginal := ocpVer
|
||||
|
||||
valid := []string{"3.10", "4.1", "4.11", "4.13"}
|
||||
for !isEmpty(ocpVer) {
|
||||
glog.V(3).Info(fmt.Sprintf("getOcpBenchmarkVersion check for ocp: %q \n", ocpVer))
|
||||
if ocpVer == "3.10" || ocpVer == "4.1" {
|
||||
if slices.Contains(valid, ocpVer) {
|
||||
glog.V(1).Info(fmt.Sprintf("getOcpBenchmarkVersion found valid version for ocp: %q \n", ocpVer))
|
||||
return ocpVer, nil
|
||||
}
|
||||
|
||||
@@ -657,6 +657,27 @@ func Test_getPlatformBenchmarkVersion(t *testing.T) {
|
||||
},
|
||||
want: "eks-1.7.0",
|
||||
},
|
||||
{
|
||||
name: "eks 1.29",
|
||||
args: args{
|
||||
platform: Platform{Name: "eks", Version: "1.29"},
|
||||
},
|
||||
want: "eks-1.7.0",
|
||||
},
|
||||
{
|
||||
name: "eks 1.30",
|
||||
args: args{
|
||||
platform: Platform{Name: "eks", Version: "1.30"},
|
||||
},
|
||||
want: "eks-1.7.0",
|
||||
},
|
||||
{
|
||||
name: "eks 1.32",
|
||||
args: args{
|
||||
platform: Platform{Name: "eks", Version: "1.32"},
|
||||
},
|
||||
want: "eks-1.8.0",
|
||||
},
|
||||
{
|
||||
name: "eks 1.24",
|
||||
args: args{
|
||||
@@ -685,6 +706,20 @@ func Test_getPlatformBenchmarkVersion(t *testing.T) {
|
||||
},
|
||||
want: "gke-1.2.0",
|
||||
},
|
||||
{
|
||||
name: "gke 1.28",
|
||||
args: args{
|
||||
platform: Platform{Name: "gke", Version: "1.28"},
|
||||
},
|
||||
want: "gke-1.6.0",
|
||||
},
|
||||
{
|
||||
name: "gke 1.31",
|
||||
args: args{
|
||||
platform: Platform{Name: "gke", Version: "1.31"},
|
||||
},
|
||||
want: "gke-1.8.0",
|
||||
},
|
||||
{
|
||||
name: "aliyun",
|
||||
args: args{
|
||||
@@ -713,6 +748,20 @@ func Test_getPlatformBenchmarkVersion(t *testing.T) {
|
||||
},
|
||||
want: "rh-0.7",
|
||||
},
|
||||
{
|
||||
name: "openshift4",
|
||||
args: args{
|
||||
platform: Platform{Name: "ocp", Version: "4.11"},
|
||||
},
|
||||
want: "rh-1.4",
|
||||
},
|
||||
{
|
||||
name: "openshift4",
|
||||
args: args{
|
||||
platform: Platform{Name: "ocp", Version: "4.13"},
|
||||
},
|
||||
want: "rh-1.8",
|
||||
},
|
||||
{
|
||||
name: "openshift4",
|
||||
args: args{
|
||||
@@ -737,10 +786,17 @@ func Test_getPlatformBenchmarkVersion(t *testing.T) {
|
||||
{
|
||||
name: "rke2",
|
||||
args: args{
|
||||
platform: Platform{Name: "rke2r", Version: "1.27"},
|
||||
platform: Platform{Name: "rke2r", Version: "1.25"},
|
||||
},
|
||||
want: "rke2-cis-1.7",
|
||||
},
|
||||
{
|
||||
name: "rke2",
|
||||
args: args{
|
||||
platform: Platform{Name: "rke2r", Version: "1.26"},
|
||||
},
|
||||
want: "rke2-cis-1.8",
|
||||
},
|
||||
{
|
||||
name: "aks",
|
||||
args: args{
|
||||
|
||||
@@ -24,6 +24,8 @@ The following table shows the valid targets based on the CIS Benchmark version.
|
||||
| cis-1.8 | master, controlplane, node, etcd, policies |
|
||||
| cis-1.9 | master, controlplane, node, etcd, policies |
|
||||
| cis-1.10 | master, controlplane, node, etcd, policies |
|
||||
| cis-1.11 | master, controlplane, node, etcd, policies |
|
||||
| cis-1.12 | master, controlplane, node, etcd, policies |
|
||||
| gke-1.0 | master, controlplane, node, etcd, policies, managedservices |
|
||||
| gke-1.2.0 | controlplane, node, policies, managedservices |
|
||||
| gke-1.6.0 | controlplane, node, policies, managedservices |
|
||||
|
||||
@@ -17,8 +17,10 @@ Other benchmarks are defined by hardening guides.
|
||||
| CIS | [1.24](https://workbench.cisecurity.org/benchmarks/10873) | cis-1.24 | 1.24 |
|
||||
| CIS | [1.7](https://workbench.cisecurity.org/benchmarks/11107) | cis-1.7 | 1.25 |
|
||||
| CIS | [1.8](https://workbench.cisecurity.org/benchmarks/12958) | cis-1.8 | 1.26 |
|
||||
| CIS | [1.9](https://workbench.cisecurity.org/benchmarks/16828) | cis-1.9 | 1.27-1.29 |
|
||||
| CIS | [1.10](https://workbench.cisecurity.org/benchmarks/17568) | cis-1.10 | 1.28-1.31 |
|
||||
| CIS | [1.9](https://workbench.cisecurity.org/benchmarks/16828) | cis-1.9 | 1.27 |
|
||||
| CIS | [1.10](https://workbench.cisecurity.org/benchmarks/17568) | cis-1.10 | 1.28 |
|
||||
| CIS | [1.11](https://workbench.cisecurity.org/benchmarks/21709) | cis-1.11 | 1.29-1.31 |
|
||||
| CIS | [1.12](https://workbench.cisecurity.org/benchmarks/22107) | cis-1.12 | 1.32-1.34 |
|
||||
| CIS | [GKE 1.0.0](https://workbench.cisecurity.org/benchmarks/4536) | gke-1.0 | GKE |
|
||||
| CIS | [GKE 1.2.0](https://workbench.cisecurity.org/benchmarks/7534) | gke-1.2.0 | GKE |
|
||||
| CIS | [GKE 1.6.0](https://workbench.cisecurity.org/benchmarks/16093) | gke-1.6.0 | GKE |
|
||||
|
||||
103
go.mod
103
go.mod
@@ -1,51 +1,50 @@
|
||||
module github.com/aquasecurity/kube-bench
|
||||
|
||||
go 1.24.4
|
||||
go 1.25.0
|
||||
|
||||
require (
|
||||
github.com/aws/aws-sdk-go-v2 v1.38.0
|
||||
github.com/aws/aws-sdk-go-v2/config v1.31.0
|
||||
github.com/aws/aws-sdk-go-v2/service/securityhub v1.62.0
|
||||
github.com/aws/aws-sdk-go-v2 v1.41.1
|
||||
github.com/aws/aws-sdk-go-v2/config v1.32.7
|
||||
github.com/aws/aws-sdk-go-v2/service/securityhub v1.67.3
|
||||
github.com/fatih/color v1.18.0
|
||||
github.com/golang/glog v1.2.5
|
||||
github.com/magiconair/properties v1.8.10
|
||||
github.com/onsi/ginkgo v1.16.5
|
||||
github.com/pkg/errors v0.9.1
|
||||
github.com/spf13/cobra v1.9.1
|
||||
github.com/spf13/viper v1.20.1
|
||||
github.com/stretchr/testify v1.10.0
|
||||
github.com/spf13/cobra v1.10.2
|
||||
github.com/spf13/viper v1.21.0
|
||||
github.com/stretchr/testify v1.11.1
|
||||
golang.org/x/exp v0.0.0-20250718183923-645b1fa84792
|
||||
gopkg.in/yaml.v2 v2.4.0
|
||||
gorm.io/driver/postgres v1.6.0
|
||||
gorm.io/gorm v1.30.1
|
||||
k8s.io/apimachinery v0.33.4
|
||||
k8s.io/client-go v0.33.4
|
||||
gorm.io/gorm v1.31.1
|
||||
k8s.io/apimachinery v0.35.1
|
||||
k8s.io/client-go v0.35.1
|
||||
)
|
||||
|
||||
require (
|
||||
github.com/aws/aws-sdk-go-v2/credentials v1.18.4 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.18.3 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.3 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.3 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/internal/ini v1.8.3 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.13.0 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.3 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/sso v1.28.0 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.33.0 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/sts v1.37.0 // indirect
|
||||
github.com/aws/smithy-go v1.22.5 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/credentials v1.19.7 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.18.17 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.17 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.17 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/internal/ini v1.8.4 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.13.4 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.17 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/signin v1.0.5 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/sso v1.30.9 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.35.13 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/sts v1.41.6 // indirect
|
||||
github.com/aws/smithy-go v1.24.0 // indirect
|
||||
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect
|
||||
github.com/emicklei/go-restful/v3 v3.11.0 // indirect
|
||||
github.com/fsnotify/fsnotify v1.8.0 // indirect
|
||||
github.com/fxamacker/cbor/v2 v2.7.0 // indirect
|
||||
github.com/go-logr/logr v1.4.2 // indirect
|
||||
github.com/emicklei/go-restful/v3 v3.12.2 // indirect
|
||||
github.com/fsnotify/fsnotify v1.9.0 // indirect
|
||||
github.com/fxamacker/cbor/v2 v2.9.0 // indirect
|
||||
github.com/go-logr/logr v1.4.3 // indirect
|
||||
github.com/go-openapi/jsonpointer v0.21.0 // indirect
|
||||
github.com/go-openapi/jsonreference v0.20.2 // indirect
|
||||
github.com/go-openapi/swag v0.23.0 // indirect
|
||||
github.com/go-viper/mapstructure/v2 v2.4.0 // indirect
|
||||
github.com/gogo/protobuf v1.3.2 // indirect
|
||||
github.com/google/gnostic-models v0.6.9 // indirect
|
||||
github.com/google/go-cmp v0.7.0 // indirect
|
||||
github.com/google/gnostic-models v0.7.0 // indirect
|
||||
github.com/google/uuid v1.6.0 // indirect
|
||||
github.com/inconshreveable/mousetrap v1.1.0 // indirect
|
||||
github.com/jackc/pgpassfile v1.0.0 // indirect
|
||||
@@ -60,38 +59,38 @@ require (
|
||||
github.com/mattn/go-colorable v0.1.13 // indirect
|
||||
github.com/mattn/go-isatty v0.0.20 // indirect
|
||||
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
|
||||
github.com/modern-go/reflect2 v1.0.2 // indirect
|
||||
github.com/modern-go/reflect2 v1.0.3-0.20250322232337-35a7c28c31ee // indirect
|
||||
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect
|
||||
github.com/pelletier/go-toml/v2 v2.2.3 // indirect
|
||||
github.com/pelletier/go-toml/v2 v2.2.4 // indirect
|
||||
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect
|
||||
github.com/sagikazarmark/locafero v0.7.0 // indirect
|
||||
github.com/sourcegraph/conc v0.3.0 // indirect
|
||||
github.com/spf13/afero v1.12.0 // indirect
|
||||
github.com/spf13/cast v1.7.1 // indirect
|
||||
github.com/spf13/pflag v1.0.6 // indirect
|
||||
github.com/sagikazarmark/locafero v0.11.0 // indirect
|
||||
github.com/sourcegraph/conc v0.3.1-0.20240121214520-5f936abd7ae8 // indirect
|
||||
github.com/spf13/afero v1.15.0 // indirect
|
||||
github.com/spf13/cast v1.10.0 // indirect
|
||||
github.com/spf13/pflag v1.0.10 // indirect
|
||||
github.com/stretchr/objx v0.5.2 // indirect
|
||||
github.com/subosito/gotenv v1.6.0 // indirect
|
||||
github.com/x448/float16 v0.8.4 // indirect
|
||||
go.uber.org/atomic v1.10.0 // indirect
|
||||
go.uber.org/multierr v1.9.0 // indirect
|
||||
golang.org/x/crypto v0.36.0 // indirect
|
||||
golang.org/x/net v0.38.0 // indirect
|
||||
golang.org/x/oauth2 v0.27.0 // indirect
|
||||
golang.org/x/sync v0.16.0 // indirect
|
||||
golang.org/x/sys v0.31.0 // indirect
|
||||
golang.org/x/term v0.30.0 // indirect
|
||||
golang.org/x/text v0.23.0 // indirect
|
||||
go.yaml.in/yaml/v2 v2.4.3 // indirect
|
||||
go.yaml.in/yaml/v3 v3.0.4 // indirect
|
||||
golang.org/x/crypto v0.45.0 // indirect
|
||||
golang.org/x/net v0.47.0 // indirect
|
||||
golang.org/x/oauth2 v0.30.0 // indirect
|
||||
golang.org/x/sync v0.18.0 // indirect
|
||||
golang.org/x/sys v0.38.0 // indirect
|
||||
golang.org/x/term v0.37.0 // indirect
|
||||
golang.org/x/text v0.31.0 // indirect
|
||||
golang.org/x/time v0.9.0 // indirect
|
||||
google.golang.org/protobuf v1.36.5 // indirect
|
||||
gopkg.in/evanphx/json-patch.v4 v4.12.0 // indirect
|
||||
google.golang.org/protobuf v1.36.8 // indirect
|
||||
gopkg.in/evanphx/json-patch.v4 v4.13.0 // indirect
|
||||
gopkg.in/inf.v0 v0.9.1 // indirect
|
||||
gopkg.in/yaml.v3 v3.0.1 // indirect
|
||||
k8s.io/api v0.33.4 // indirect
|
||||
k8s.io/api v0.35.1 // indirect
|
||||
k8s.io/klog/v2 v2.130.1 // indirect
|
||||
k8s.io/kube-openapi v0.0.0-20250318190949-c8a335a9a2ff // indirect
|
||||
k8s.io/utils v0.0.0-20241104100929-3ea5e8cea738 // indirect
|
||||
sigs.k8s.io/json v0.0.0-20241010143419-9aa6b5e7a4b3 // indirect
|
||||
k8s.io/kube-openapi v0.0.0-20250910181357-589584f1c912 // indirect
|
||||
k8s.io/utils v0.0.0-20251002143259-bc988d571ff4 // indirect
|
||||
sigs.k8s.io/json v0.0.0-20250730193827-2d320260d730 // indirect
|
||||
sigs.k8s.io/randfill v1.0.0 // indirect
|
||||
sigs.k8s.io/structured-merge-diff/v4 v4.6.0 // indirect
|
||||
sigs.k8s.io/yaml v1.4.0 // indirect
|
||||
sigs.k8s.io/structured-merge-diff/v6 v6.3.0 // indirect
|
||||
sigs.k8s.io/yaml v1.6.0 // indirect
|
||||
)
|
||||
|
||||
234
go.sum
234
go.sum
@@ -1,51 +1,55 @@
|
||||
github.com/aws/aws-sdk-go-v2 v1.38.0 h1:UCRQ5mlqcFk9HJDIqENSLR3wiG1VTWlyUfLDEvY7RxU=
|
||||
github.com/aws/aws-sdk-go-v2 v1.38.0/go.mod h1:9Q0OoGQoboYIAJyslFyF1f5K1Ryddop8gqMhWx/n4Wg=
|
||||
github.com/aws/aws-sdk-go-v2/config v1.31.0 h1:9yH0xiY5fUnVNLRWO0AtayqwU1ndriZdN78LlhruJR4=
|
||||
github.com/aws/aws-sdk-go-v2/config v1.31.0/go.mod h1:VeV3K72nXnhbe4EuxxhzsDc/ByrCSlZwUnWH52Nde/I=
|
||||
github.com/aws/aws-sdk-go-v2/credentials v1.18.4 h1:IPd0Algf1b+Qy9BcDp0sCUcIWdCQPSzDoMK3a8pcbUM=
|
||||
github.com/aws/aws-sdk-go-v2/credentials v1.18.4/go.mod h1:nwg78FjH2qvsRM1EVZlX9WuGUJOL5od+0qvm0adEzHk=
|
||||
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.18.3 h1:GicIdnekoJsjq9wqnvyi2elW6CGMSYKhdozE7/Svh78=
|
||||
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.18.3/go.mod h1:R7BIi6WNC5mc1kfRM7XM/VHC3uRWkjc396sfabq4iOo=
|
||||
github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.3 h1:o9RnO+YZ4X+kt5Z7Nvcishlz0nksIt2PIzDglLMP0vA=
|
||||
github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.3/go.mod h1:+6aLJzOG1fvMOyzIySYjOFjcguGvVRL68R+uoRencN4=
|
||||
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.3 h1:joyyUFhiTQQmVK6ImzNU9TQSNRNeD9kOklqTzyk5v6s=
|
||||
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.3/go.mod h1:+vNIyZQP3b3B1tSLI0lxvrU9cfM7gpdRXMFfm67ZcPc=
|
||||
github.com/aws/aws-sdk-go-v2/internal/ini v1.8.3 h1:bIqFDwgGXXN1Kpp99pDOdKMTTb5d2KyU5X/BZxjOkRo=
|
||||
github.com/aws/aws-sdk-go-v2/internal/ini v1.8.3/go.mod h1:H5O/EsxDWyU+LP/V8i5sm8cxoZgc2fdNR9bxlOFrQTo=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.13.0 h1:6+lZi2JeGKtCraAj1rpoZfKqnQ9SptseRZioejfUOLM=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.13.0/go.mod h1:eb3gfbVIxIoGgJsi9pGne19dhCBpK6opTYpQqAmdy44=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.3 h1:ieRzyHXypu5ByllM7Sp4hC5f/1Fy5wqxqY0yB85hC7s=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.3/go.mod h1:O5ROz8jHiOAKAwx179v+7sHMhfobFVi6nZt8DEyiYoM=
|
||||
github.com/aws/aws-sdk-go-v2/service/securityhub v1.62.0 h1:sKzvE3fkQNa4iXbS2zhPsWhoYZUuPGeyCx29zWaUAyg=
|
||||
github.com/aws/aws-sdk-go-v2/service/securityhub v1.62.0/go.mod h1:O3x2LxaDhY0QmJKHLaw2MGgKeYhDMWvi7zsJ+rcnWQU=
|
||||
github.com/aws/aws-sdk-go-v2/service/sso v1.28.0 h1:Mc/MKBf2m4VynyJkABoVEN+QzkfLqGj0aiJuEe7cMeM=
|
||||
github.com/aws/aws-sdk-go-v2/service/sso v1.28.0/go.mod h1:iS5OmxEcN4QIPXARGhavH7S8kETNL11kym6jhoS7IUQ=
|
||||
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.33.0 h1:6csaS/aJmqZQbKhi1EyEMM7yBW653Wy/B9hnBofW+sw=
|
||||
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.33.0/go.mod h1:59qHWaY5B+Rs7HGTuVGaC32m0rdpQ68N8QCN3khYiqs=
|
||||
github.com/aws/aws-sdk-go-v2/service/sts v1.37.0 h1:MG9VFW43M4A8BYeAfaJJZWrroinxeTi2r3+SnmLQfSA=
|
||||
github.com/aws/aws-sdk-go-v2/service/sts v1.37.0/go.mod h1:JdeBDPgpJfuS6rU/hNglmOigKhyEZtBmbraLE4GK1J8=
|
||||
github.com/aws/smithy-go v1.22.5 h1:P9ATCXPMb2mPjYBgueqJNCA5S9UfktsW0tTxi+a7eqw=
|
||||
github.com/aws/smithy-go v1.22.5/go.mod h1:t1ufH5HMublsJYulve2RKmHDC15xu1f26kHCp/HgceI=
|
||||
github.com/Masterminds/semver/v3 v3.4.0 h1:Zog+i5UMtVoCU8oKka5P7i9q9HgrJeGzI9SA1Xbatp0=
|
||||
github.com/Masterminds/semver/v3 v3.4.0/go.mod h1:4V+yj/TJE1HU9XfppCwVMZq3I84lprf4nC11bSS5beM=
|
||||
github.com/aws/aws-sdk-go-v2 v1.41.1 h1:ABlyEARCDLN034NhxlRUSZr4l71mh+T5KAeGh6cerhU=
|
||||
github.com/aws/aws-sdk-go-v2 v1.41.1/go.mod h1:MayyLB8y+buD9hZqkCW3kX1AKq07Y5pXxtgB+rRFhz0=
|
||||
github.com/aws/aws-sdk-go-v2/config v1.32.7 h1:vxUyWGUwmkQ2g19n7JY/9YL8MfAIl7bTesIUykECXmY=
|
||||
github.com/aws/aws-sdk-go-v2/config v1.32.7/go.mod h1:2/Qm5vKUU/r7Y+zUk/Ptt2MDAEKAfUtKc1+3U1Mo3oY=
|
||||
github.com/aws/aws-sdk-go-v2/credentials v1.19.7 h1:tHK47VqqtJxOymRrNtUXN5SP/zUTvZKeLx4tH6PGQc8=
|
||||
github.com/aws/aws-sdk-go-v2/credentials v1.19.7/go.mod h1:qOZk8sPDrxhf+4Wf4oT2urYJrYt3RejHSzgAquYeppw=
|
||||
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.18.17 h1:I0GyV8wiYrP8XpA70g1HBcQO1JlQxCMTW9npl5UbDHY=
|
||||
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.18.17/go.mod h1:tyw7BOl5bBe/oqvoIeECFJjMdzXoa/dfVz3QQ5lgHGA=
|
||||
github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.17 h1:xOLELNKGp2vsiteLsvLPwxC+mYmO6OZ8PYgiuPJzF8U=
|
||||
github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.17/go.mod h1:5M5CI3D12dNOtH3/mk6minaRwI2/37ifCURZISxA/IQ=
|
||||
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.17 h1:WWLqlh79iO48yLkj1v3ISRNiv+3KdQoZ6JWyfcsyQik=
|
||||
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.17/go.mod h1:EhG22vHRrvF8oXSTYStZhJc1aUgKtnJe+aOiFEV90cM=
|
||||
github.com/aws/aws-sdk-go-v2/internal/ini v1.8.4 h1:WKuaxf++XKWlHWu9ECbMlha8WOEGm0OUEZqm4K/Gcfk=
|
||||
github.com/aws/aws-sdk-go-v2/internal/ini v1.8.4/go.mod h1:ZWy7j6v1vWGmPReu0iSGvRiise4YI5SkR3OHKTZ6Wuc=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.13.4 h1:0ryTNEdJbzUCEWkVXEXoqlXV72J5keC1GvILMOuD00E=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.13.4/go.mod h1:HQ4qwNZh32C3CBeO6iJLQlgtMzqeG17ziAA/3KDJFow=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.17 h1:RuNSMoozM8oXlgLG/n6WLaFGoea7/CddrCfIiSA+xdY=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.17/go.mod h1:F2xxQ9TZz5gDWsclCtPQscGpP0VUOc8RqgFM3vDENmU=
|
||||
github.com/aws/aws-sdk-go-v2/service/securityhub v1.67.3 h1:FEs3IkfJWp+Sz3ZY6sAxmebBF0lr1wBcTWkuFW1OFJg=
|
||||
github.com/aws/aws-sdk-go-v2/service/securityhub v1.67.3/go.mod h1:3wnS16Wip5w0uh9kVFBhuMFmdkrMBr8Fc96kAY5h13o=
|
||||
github.com/aws/aws-sdk-go-v2/service/signin v1.0.5 h1:VrhDvQib/i0lxvr3zqlUwLwJP4fpmpyD9wYG1vfSu+Y=
|
||||
github.com/aws/aws-sdk-go-v2/service/signin v1.0.5/go.mod h1:k029+U8SY30/3/ras4G/Fnv/b88N4mAfliNn08Dem4M=
|
||||
github.com/aws/aws-sdk-go-v2/service/sso v1.30.9 h1:v6EiMvhEYBoHABfbGB4alOYmCIrcgyPPiBE1wZAEbqk=
|
||||
github.com/aws/aws-sdk-go-v2/service/sso v1.30.9/go.mod h1:yifAsgBxgJWn3ggx70A3urX2AN49Y5sJTD1UQFlfqBw=
|
||||
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.35.13 h1:gd84Omyu9JLriJVCbGApcLzVR3XtmC4ZDPcAI6Ftvds=
|
||||
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.35.13/go.mod h1:sTGThjphYE4Ohw8vJiRStAcu3rbjtXRsdNB0TvZ5wwo=
|
||||
github.com/aws/aws-sdk-go-v2/service/sts v1.41.6 h1:5fFjR/ToSOzB2OQ/XqWpZBmNvmP/pJ1jOWYlFDJTjRQ=
|
||||
github.com/aws/aws-sdk-go-v2/service/sts v1.41.6/go.mod h1:qgFDZQSD/Kys7nJnVqYlWKnh0SSdMjAi0uSwON4wgYQ=
|
||||
github.com/aws/smithy-go v1.24.0 h1:LpilSUItNPFr1eY85RYgTIg5eIEPtvFbskaFcmmIUnk=
|
||||
github.com/aws/smithy-go v1.24.0/go.mod h1:LEj2LM3rBRQJxPZTB4KuzZkaZYnZPnvgIhb4pu07mx0=
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.6/go.mod h1:oOW0eioCTA6cOiMLiUPZOpcVxMig6NIQQ7OS05n1F4g=
|
||||
github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
|
||||
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM=
|
||||
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/emicklei/go-restful/v3 v3.11.0 h1:rAQeMHw1c7zTmncogyy8VvRZwtkmkZ4FxERmMY4rD+g=
|
||||
github.com/emicklei/go-restful/v3 v3.11.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc=
|
||||
github.com/emicklei/go-restful/v3 v3.12.2 h1:DhwDP0vY3k8ZzE0RunuJy8GhNpPL6zqLkDf9B/a0/xU=
|
||||
github.com/emicklei/go-restful/v3 v3.12.2/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc=
|
||||
github.com/fatih/color v1.18.0 h1:S8gINlzdQ840/4pfAwic/ZE0djQEH3wM94VfqLTZcOM=
|
||||
github.com/fatih/color v1.18.0/go.mod h1:4FelSpRwEGDpQ12mAdzqdOukCy4u8WUtOY6lkT/6HfU=
|
||||
github.com/frankban/quicktest v1.14.6 h1:7Xjx+VpznH+oBnejlPUj8oUpdxnVs4f8XU8WnHkI4W8=
|
||||
github.com/frankban/quicktest v1.14.6/go.mod h1:4ptaffx2x8+WTWXmUCuVU6aPUX1/Mz7zb5vbUoiM6w0=
|
||||
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
|
||||
github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ=
|
||||
github.com/fsnotify/fsnotify v1.8.0 h1:dAwr6QBTBZIkG8roQaJjGof0pp0EeF+tNV7YBP3F/8M=
|
||||
github.com/fsnotify/fsnotify v1.8.0/go.mod h1:8jBTzvmWwFyi3Pb8djgCCO5IBqzKJ/Jwo8TRcHyHii0=
|
||||
github.com/fxamacker/cbor/v2 v2.7.0 h1:iM5WgngdRBanHcxugY4JySA0nk1wZorNOpTgCMedv5E=
|
||||
github.com/fxamacker/cbor/v2 v2.7.0/go.mod h1:pxXPTn3joSm21Gbwsv0w9OSA2y1HFR9qXEeXQVeNoDQ=
|
||||
github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY=
|
||||
github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY=
|
||||
github.com/fsnotify/fsnotify v1.9.0 h1:2Ml+OJNzbYCTzsxtv8vKSFD9PbJjmhYF14k/jKC7S9k=
|
||||
github.com/fsnotify/fsnotify v1.9.0/go.mod h1:8jBTzvmWwFyi3Pb8djgCCO5IBqzKJ/Jwo8TRcHyHii0=
|
||||
github.com/fxamacker/cbor/v2 v2.9.0 h1:NpKPmjDBgUfBms6tr6JZkTHtfFGcMKsw3eGcmD/sapM=
|
||||
github.com/fxamacker/cbor/v2 v2.9.0/go.mod h1:vM4b+DJCtHn+zz7h3FFp/hDAI9WNWCsZj23V5ytsSxQ=
|
||||
github.com/go-logr/logr v1.4.3 h1:CjnDlHq8ikf6E492q6eKboGOC0T8CDaOvkHCIg8idEI=
|
||||
github.com/go-logr/logr v1.4.3/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY=
|
||||
github.com/go-openapi/jsonpointer v0.19.6/go.mod h1:osyAmYz/mB/C3I+WsTTSgw1ONzaLJoLCyoi6/zppojs=
|
||||
github.com/go-openapi/jsonpointer v0.21.0 h1:YgdVicSA9vH5RiHs9TZW5oyafXZFc6+2Vc1rr/O9oNQ=
|
||||
github.com/go-openapi/jsonpointer v0.21.0/go.mod h1:IUyH9l/+uyhIYQ/PXVA41Rexl+kOkAPDdXEYns6fzUY=
|
||||
@@ -60,8 +64,6 @@ github.com/go-task/slim-sprig/v3 v3.0.0 h1:sUs3vkvUymDpBKi3qH1YSqBQk9+9D/8M2mN1v
|
||||
github.com/go-task/slim-sprig/v3 v3.0.0/go.mod h1:W848ghGpv3Qj3dhTPRyJypKRiqCdHZiAzKg9hl15HA8=
|
||||
github.com/go-viper/mapstructure/v2 v2.4.0 h1:EBsztssimR/CONLSZZ04E8qAkxNYq4Qp9LvH92wZUgs=
|
||||
github.com/go-viper/mapstructure/v2 v2.4.0/go.mod h1:oJDH3BJKyqBA2TXFhDsKDGDTlndYOZ6rGS0BRZIxGhM=
|
||||
github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q=
|
||||
github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q=
|
||||
github.com/golang/glog v1.2.5 h1:DrW6hGnjIhtvhOIiAKT6Psh/Kd/ldepEa81DKeiRJ5I=
|
||||
github.com/golang/glog v1.2.5/go.mod h1:6AhwSGph0fcJtXVM/PEHPqZlFeoLxhs7/t5UDAwmO+w=
|
||||
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||
@@ -71,17 +73,16 @@ github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrU
|
||||
github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w=
|
||||
github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0=
|
||||
github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
|
||||
github.com/google/gnostic-models v0.6.9 h1:MU/8wDLif2qCXZmzncUQ/BOfxWfthHi63KqpoNbWqVw=
|
||||
github.com/google/gnostic-models v0.6.9/go.mod h1:CiWsm0s6BSQd1hRn8/QmxqB6BesYcbSZxsz9b0KuDBw=
|
||||
github.com/google/gnostic-models v0.7.0 h1:qwTtogB15McXDaNqTZdzPJRHvaVJlAl+HVQnLmJEJxo=
|
||||
github.com/google/gnostic-models v0.7.0/go.mod h1:whL5G0m6dmc5cPxKc5bdKdEN3UjI7OUGxBlw57miDrQ=
|
||||
github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
|
||||
github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
|
||||
github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
|
||||
github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8=
|
||||
github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU=
|
||||
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
|
||||
github.com/google/pprof v0.0.0-20241029153458-d1b30febd7db h1:097atOisP2aRj7vFgYQBbFN4U4JNXUNYpxael3UzMyo=
|
||||
github.com/google/pprof v0.0.0-20241029153458-d1b30febd7db/go.mod h1:vavhavw2zAxS5dIdcRluK6cSGGPlZynqzFM8NdvU144=
|
||||
github.com/google/pprof v0.0.0-20250403155104-27863c87afa6 h1:BHT72Gu3keYf3ZEu2J0b1vyeLSOYI8bm5wbJM/8yDe8=
|
||||
github.com/google/pprof v0.0.0-20250403155104-27863c87afa6/go.mod h1:boTsfXsheKC2y+lKOCMpSfarhxDeIzfZG1jqGcPl3cA=
|
||||
github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0=
|
||||
github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||
github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
|
||||
@@ -103,8 +104,6 @@ github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8Hm
|
||||
github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y=
|
||||
github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM=
|
||||
github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo=
|
||||
github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8=
|
||||
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
|
||||
github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=
|
||||
github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE=
|
||||
github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk=
|
||||
@@ -124,8 +123,9 @@ github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D
|
||||
github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
|
||||
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg=
|
||||
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
|
||||
github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M=
|
||||
github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk=
|
||||
github.com/modern-go/reflect2 v1.0.3-0.20250322232337-35a7c28c31ee h1:W5t00kpgFdJifH4BDsTlE89Zl93FEloxaWZfGcifgq8=
|
||||
github.com/modern-go/reflect2 v1.0.3-0.20250322232337-35a7c28c31ee/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk=
|
||||
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA=
|
||||
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ=
|
||||
github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A=
|
||||
@@ -135,36 +135,37 @@ github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+W
|
||||
github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk=
|
||||
github.com/onsi/ginkgo v1.16.5 h1:8xi0RTUf59SOSfEtZMvwTvXYMzG4gV23XVHOZiXNtnE=
|
||||
github.com/onsi/ginkgo v1.16.5/go.mod h1:+E8gABHa3K6zRBolWtd+ROzc/U5bkGt0FwiG042wbpU=
|
||||
github.com/onsi/ginkgo/v2 v2.21.0 h1:7rg/4f3rB88pb5obDgNZrNHrQ4e6WpjonchcpuBRnZM=
|
||||
github.com/onsi/ginkgo/v2 v2.21.0/go.mod h1:7Du3c42kxCUegi0IImZ1wUQzMBVecgIHjR1C+NkhLQo=
|
||||
github.com/onsi/ginkgo/v2 v2.27.2 h1:LzwLj0b89qtIy6SSASkzlNvX6WktqurSHwkk2ipF/Ns=
|
||||
github.com/onsi/ginkgo/v2 v2.27.2/go.mod h1:ArE1D/XhNXBXCBkKOLkbsb2c81dQHCRcF5zwn/ykDRo=
|
||||
github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY=
|
||||
github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo=
|
||||
github.com/onsi/gomega v1.35.1 h1:Cwbd75ZBPxFSuZ6T+rN/WCb/gOc6YgFBXLlZLhC7Ds4=
|
||||
github.com/onsi/gomega v1.35.1/go.mod h1:PvZbdDc8J6XJEpDK4HCuRBm8a6Fzp9/DmhC9C7yFlog=
|
||||
github.com/pelletier/go-toml/v2 v2.2.3 h1:YmeHyLY8mFWbdkNWwpr+qIL2bEqT0o95WSdkNHvL12M=
|
||||
github.com/pelletier/go-toml/v2 v2.2.3/go.mod h1:MfCQTFTvCcUyyvvwm1+G6H/jORL20Xlb6rzQu9GuUkc=
|
||||
github.com/onsi/gomega v1.38.2 h1:eZCjf2xjZAqe+LeWvKb5weQ+NcPwX84kqJ0cZNxok2A=
|
||||
github.com/onsi/gomega v1.38.2/go.mod h1:W2MJcYxRGV63b418Ai34Ud0hEdTVXq9NW9+Sx6uXf3k=
|
||||
github.com/pelletier/go-toml/v2 v2.2.4 h1:mye9XuhQ6gvn5h28+VilKrrPoQVanw5PMw/TB0t5Ec4=
|
||||
github.com/pelletier/go-toml/v2 v2.2.4/go.mod h1:2gIqNv+qfxSVS7cM2xJQKtLSTLUE9V8t9Stt+h56mCY=
|
||||
github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
|
||||
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U=
|
||||
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||
github.com/rogpeppe/go-internal v1.13.1 h1:KvO1DLK/DRN07sQ1LQKScxyZJuNnedQ5/wKSR38lUII=
|
||||
github.com/rogpeppe/go-internal v1.13.1/go.mod h1:uMEvuHeurkdAXX61udpOXGD/AzZDWNMNyH2VO9fmH0o=
|
||||
github.com/rogpeppe/go-internal v1.14.1 h1:UQB4HGPB6osV0SQTLymcB4TgvyWu6ZyliaW0tI/otEQ=
|
||||
github.com/rogpeppe/go-internal v1.14.1/go.mod h1:MaRKkUm5W0goXpeCfT7UZI6fk/L7L7so1lCWt35ZSgc=
|
||||
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
|
||||
github.com/sagikazarmark/locafero v0.7.0 h1:5MqpDsTGNDhY8sGp0Aowyf0qKsPrhewaLSsFaodPcyo=
|
||||
github.com/sagikazarmark/locafero v0.7.0/go.mod h1:2za3Cg5rMaTMoG/2Ulr9AwtFaIppKXTRYnozin4aB5k=
|
||||
github.com/sourcegraph/conc v0.3.0 h1:OQTbbt6P72L20UqAkXXuLOj79LfEanQ+YQFNpLA9ySo=
|
||||
github.com/sourcegraph/conc v0.3.0/go.mod h1:Sdozi7LEKbFPqYX2/J+iBAM6HpqSLTASQIKqDmF7Mt0=
|
||||
github.com/spf13/afero v1.12.0 h1:UcOPyRBYczmFn6yvphxkn9ZEOY65cpwGKb5mL36mrqs=
|
||||
github.com/spf13/afero v1.12.0/go.mod h1:ZTlWwG4/ahT8W7T0WQ5uYmjI9duaLQGy3Q2OAl4sk/4=
|
||||
github.com/spf13/cast v1.7.1 h1:cuNEagBQEHWN1FnbGEjCXL2szYEXqfJPbP2HNUaca9Y=
|
||||
github.com/spf13/cast v1.7.1/go.mod h1:ancEpBxwJDODSW/UG4rDrAqiKolqNNh2DX3mk86cAdo=
|
||||
github.com/spf13/cobra v1.9.1 h1:CXSaggrXdbHK9CF+8ywj8Amf7PBRmPCOJugH954Nnlo=
|
||||
github.com/spf13/cobra v1.9.1/go.mod h1:nDyEzZ8ogv936Cinf6g1RU9MRY64Ir93oCnqb9wxYW0=
|
||||
github.com/spf13/pflag v1.0.6 h1:jFzHGLGAlb3ruxLB8MhbI6A8+AQX/2eW4qeyNZXNp2o=
|
||||
github.com/spf13/pflag v1.0.6/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
|
||||
github.com/spf13/viper v1.20.1 h1:ZMi+z/lvLyPSCoNtFCpqjy0S4kPbirhpTMwl8BkW9X4=
|
||||
github.com/spf13/viper v1.20.1/go.mod h1:P9Mdzt1zoHIG8m2eZQinpiBjo6kCmZSKBClNNqjJvu4=
|
||||
github.com/sagikazarmark/locafero v0.11.0 h1:1iurJgmM9G3PA/I+wWYIOw/5SyBtxapeHDcg+AAIFXc=
|
||||
github.com/sagikazarmark/locafero v0.11.0/go.mod h1:nVIGvgyzw595SUSUE6tvCp3YYTeHs15MvlmU87WwIik=
|
||||
github.com/sourcegraph/conc v0.3.1-0.20240121214520-5f936abd7ae8 h1:+jumHNA0Wrelhe64i8F6HNlS8pkoyMv5sreGx2Ry5Rw=
|
||||
github.com/sourcegraph/conc v0.3.1-0.20240121214520-5f936abd7ae8/go.mod h1:3n1Cwaq1E1/1lhQhtRK2ts/ZwZEhjcQeJQ1RuC6Q/8U=
|
||||
github.com/spf13/afero v1.15.0 h1:b/YBCLWAJdFWJTN9cLhiXXcD7mzKn9Dm86dNnfyQw1I=
|
||||
github.com/spf13/afero v1.15.0/go.mod h1:NC2ByUVxtQs4b3sIUphxK0NioZnmxgyCrfzeuq8lxMg=
|
||||
github.com/spf13/cast v1.10.0 h1:h2x0u2shc1QuLHfxi+cTJvs30+ZAHOGRic8uyGTDWxY=
|
||||
github.com/spf13/cast v1.10.0/go.mod h1:jNfB8QC9IA6ZuY2ZjDp0KtFO2LZZlg4S/7bzP6qqeHo=
|
||||
github.com/spf13/cobra v1.10.2 h1:DMTTonx5m65Ic0GOoRY2c16WCbHxOOw6xxezuLaBpcU=
|
||||
github.com/spf13/cobra v1.10.2/go.mod h1:7C1pvHqHw5A4vrJfjNwvOdzYu0Gml16OCs2GRiTUUS4=
|
||||
github.com/spf13/pflag v1.0.9/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
|
||||
github.com/spf13/pflag v1.0.10 h1:4EBh2KAYBwaONj6b2Ye1GiHfwjqyROoF4RwYO+vPwFk=
|
||||
github.com/spf13/pflag v1.0.10/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
|
||||
github.com/spf13/viper v1.21.0 h1:x5S+0EU27Lbphp4UKm1C+1oQO+rKx36vfCoaVebLFSU=
|
||||
github.com/spf13/viper v1.21.0/go.mod h1:P0lhsswPGWD/1lZJ9ny3fYnVqxiegrlNrEmgLjbTCAY=
|
||||
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||
github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw=
|
||||
github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo=
|
||||
@@ -176,43 +177,41 @@ github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/
|
||||
github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
||||
github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
|
||||
github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
|
||||
github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA=
|
||||
github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
|
||||
github.com/stretchr/testify v1.11.1 h1:7s2iGBzp5EwR7/aIZr8ao5+dra3wiQyKjjFuvgVKu7U=
|
||||
github.com/stretchr/testify v1.11.1/go.mod h1:wZwfW3scLgRK+23gO65QZefKpKQRnfz6sD981Nm4B6U=
|
||||
github.com/subosito/gotenv v1.6.0 h1:9NlTDc1FTs4qu0DDq7AEtTPNw6SVm7uBMsUCUjABIf8=
|
||||
github.com/subosito/gotenv v1.6.0/go.mod h1:Dk4QP5c2W3ibzajGcXpNraDfq2IrhjMIvMSWPKKo0FU=
|
||||
github.com/x448/float16 v0.8.4 h1:qLwI1I70+NjRFUR3zs1JPUCgaCXSh3SW62uAKT1mSBM=
|
||||
github.com/x448/float16 v0.8.4/go.mod h1:14CWIYCyZA/cWjXOioeEpHeN/83MdbZDRQHoFcYsOfg=
|
||||
github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
|
||||
github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
|
||||
go.uber.org/atomic v1.10.0 h1:9qC72Qh0+3MqyJbAn8YU5xVq1frD8bn3JtD2oXtafVQ=
|
||||
go.uber.org/atomic v1.10.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0=
|
||||
go.uber.org/multierr v1.9.0 h1:7fIwc/ZtS0q++VgcfqFDxSBZVv/Xo49/SYnDFupUwlI=
|
||||
go.uber.org/multierr v1.9.0/go.mod h1:X2jQV1h+kxSjClGpnseKVIxpmcjrj7MNnI0bnlfKTVQ=
|
||||
go.yaml.in/yaml/v2 v2.4.3 h1:6gvOSjQoTB3vt1l+CU+tSyi/HOjfOjRLJ4YwYZGwRO0=
|
||||
go.yaml.in/yaml/v2 v2.4.3/go.mod h1:zSxWcmIDjOzPXpjlTTbAsKokqkDNAVtZO0WOMiT90s8=
|
||||
go.yaml.in/yaml/v3 v3.0.4 h1:tfq32ie2Jv2UxXFdLJdh3jXuOzWiL1fo0bu/FbuKpbc=
|
||||
go.yaml.in/yaml/v3 v3.0.4/go.mod h1:DhzuOOF2ATzADvBadXxruRBLzYTpT36CKvDb3+aBEFg=
|
||||
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
|
||||
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||
golang.org/x/crypto v0.36.0 h1:AnAEvhDddvBdpY+uR+MyHmuZzzNqXSe/GvuDeob5L34=
|
||||
golang.org/x/crypto v0.36.0/go.mod h1:Y4J0ReaxCR1IMaabaSMugxJES1EpwhBHhv2bDHklZvc=
|
||||
golang.org/x/crypto v0.45.0 h1:jMBrvKuj23MTlT0bQEOBcAE0mjg8mK9RXFhRH6nyF3Q=
|
||||
golang.org/x/crypto v0.45.0/go.mod h1:XTGrrkGJve7CYK7J8PEww4aY7gM3qMCElcJQ8n8JdX4=
|
||||
golang.org/x/exp v0.0.0-20250718183923-645b1fa84792 h1:R9PFI6EUdfVKgwKjZef7QIwGcBKu86OEFpJ9nUEP2l4=
|
||||
golang.org/x/exp v0.0.0-20250718183923-645b1fa84792/go.mod h1:A+z0yzpGtvnG90cToK5n2tu8UJVP2XUATh+r+sfOOOc=
|
||||
golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||
golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||
golang.org/x/mod v0.29.0 h1:HV8lRxZC4l2cr3Zq1LvtOsi/ThTgWnUk/y64QSs8GwA=
|
||||
golang.org/x/mod v0.29.0/go.mod h1:NyhrlYXJ2H4eJiRy/WDBO6HMqZQ6q9nk4JzS3NuCK+w=
|
||||
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
|
||||
golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
|
||||
golang.org/x/net v0.38.0 h1:vRMAPTMaeGqVhG5QyLJHqNDwecKTomGeqbnfZyKlBI8=
|
||||
golang.org/x/net v0.38.0/go.mod h1:ivrbrMbzFq5J41QOQh0siUuly180yBYtLp+CKbEaFx8=
|
||||
golang.org/x/oauth2 v0.27.0 h1:da9Vo7/tDv5RH/7nZDz1eMGS/q1Vv1N/7FCrBhI9I3M=
|
||||
golang.org/x/oauth2 v0.27.0/go.mod h1:onh5ek6nERTohokkhCD/y2cV4Do3fxFHFuAejCkRWT8=
|
||||
golang.org/x/net v0.47.0 h1:Mx+4dIFzqraBXUugkia1OOvlD6LemFo1ALMHjrXDOhY=
|
||||
golang.org/x/net v0.47.0/go.mod h1:/jNxtkgq5yWUGYkaZGqo27cfGZ1c5Nen03aYrrKpVRU=
|
||||
golang.org/x/oauth2 v0.30.0 h1:dnDm7JmhM45NNpd8FDDeLhK6FwqbOf4MLCM9zb1BOHI=
|
||||
golang.org/x/oauth2 v0.30.0/go.mod h1:B++QgG3ZKulg6sRPGD/mqlHQs5rB3Ml9erfeDY7xKlU=
|
||||
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.16.0 h1:ycBJEhp9p4vXvUZNszeOq0kGTPghopOL8q0fq3vstxw=
|
||||
golang.org/x/sync v0.16.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA=
|
||||
golang.org/x/sync v0.18.0 h1:kr88TuHDroi+UVf+0hZnirlk8o8T+4MrK6mr60WkH/I=
|
||||
golang.org/x/sync v0.18.0/go.mod h1:9KTHXmSnoGruLpwFjVSX0lNNA75CykiMECbovNTZqGI=
|
||||
golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
@@ -224,23 +223,21 @@ golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7w
|
||||
golang.org/x/sys v0.0.0-20210112080510-489259a85091/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.31.0 h1:ioabZlmFYtWhL+TRYpcnNlLwhyxaM9kWTDEmfnprqik=
|
||||
golang.org/x/sys v0.31.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k=
|
||||
golang.org/x/term v0.30.0 h1:PQ39fJZ+mfadBm0y5WlL4vlM7Sx1Hgf13sMIY2+QS9Y=
|
||||
golang.org/x/term v0.30.0/go.mod h1:NYYFdzHoI5wRh/h5tDMdMqCqPJZEuNqVR5xJLd/n67g=
|
||||
golang.org/x/sys v0.38.0 h1:3yZWxaJjBmCWXqhN1qh02AkOnCQ1poK6oF+a7xWL6Gc=
|
||||
golang.org/x/sys v0.38.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks=
|
||||
golang.org/x/term v0.37.0 h1:8EGAD0qCmHYZg6J17DvsMy9/wJ7/D/4pV/wfnld5lTU=
|
||||
golang.org/x/term v0.37.0/go.mod h1:5pB4lxRNYYVZuTLmy8oR2BH8dflOR+IbTYFD8fi3254=
|
||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
golang.org/x/text v0.23.0 h1:D71I7dUrlY+VX0gQShAThNGHFxZ13dGLBHQLVl1mJlY=
|
||||
golang.org/x/text v0.23.0/go.mod h1:/BLNzu4aZCJ1+kcD0DNRotWKage4q2rGVAg4o22unh4=
|
||||
golang.org/x/text v0.31.0 h1:aC8ghyu4JhP8VojJ2lEHBnochRno1sgL6nEi9WGFGMM=
|
||||
golang.org/x/text v0.31.0/go.mod h1:tKRAlv61yKIjGGHX/4tP1LTbc13YSec1pxVEWXzfoeM=
|
||||
golang.org/x/time v0.9.0 h1:EsRrnYcQiGH+5FfbgvV4AP7qEZstoyrHB0DzarOQ4ZY=
|
||||
golang.org/x/time v0.9.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM=
|
||||
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||
golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
|
||||
golang.org/x/tools v0.0.0-20201224043029-2b0845dc783e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
|
||||
golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
|
||||
golang.org/x/tools v0.35.0 h1:mBffYraMEf7aa0sB+NuKnuCy8qI/9Bughn8dC2Gu5r0=
|
||||
golang.org/x/tools v0.35.0/go.mod h1:NKdj5HkL/73byiZSJjqJgKn3ep7KjFkBOkR/Hps3VPw=
|
||||
golang.org/x/tools v0.38.0 h1:Hx2Xv8hISq8Lm16jvBZ2VQf+RLmbd7wVUsALibYI/IQ=
|
||||
golang.org/x/tools v0.38.0/go.mod h1:yEsQ/d/YK8cjh0L6rZlY8tgtlKiBNTL14pGDJPJpYQs=
|
||||
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
@@ -251,13 +248,13 @@ google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQ
|
||||
google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE=
|
||||
google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo=
|
||||
google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
|
||||
google.golang.org/protobuf v1.36.5 h1:tPhr+woSbjfYvY6/GPufUoYizxw1cF/yFoxJ2fmpwlM=
|
||||
google.golang.org/protobuf v1.36.5/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE=
|
||||
google.golang.org/protobuf v1.36.8 h1:xHScyCOEuuwZEc6UtSOvPbAT4zRh0xcNRYekJwfqyMc=
|
||||
google.golang.org/protobuf v1.36.8/go.mod h1:fuxRtAxBytpl4zzqUh6/eyUujkJdNiuEkXntxiD/uRU=
|
||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
|
||||
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=
|
||||
gopkg.in/evanphx/json-patch.v4 v4.12.0 h1:n6jtcsulIzXPJaxegRbvFNNrZDjbij7ny3gmSPG+6V4=
|
||||
gopkg.in/evanphx/json-patch.v4 v4.12.0/go.mod h1:p8EYWUEYMpynmqDbY58zCKCFZw8pRWMG4EsWvDvM72M=
|
||||
gopkg.in/evanphx/json-patch.v4 v4.13.0 h1:czT3CmqEaQ1aanPc5SdlgQrrEIb8w/wwCvWWnfEbYzo=
|
||||
gopkg.in/evanphx/json-patch.v4 v4.13.0/go.mod h1:p8EYWUEYMpynmqDbY58zCKCFZw8pRWMG4EsWvDvM72M=
|
||||
gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys=
|
||||
gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc=
|
||||
gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw=
|
||||
@@ -273,26 +270,25 @@ gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
|
||||
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
gorm.io/driver/postgres v1.6.0 h1:2dxzU8xJ+ivvqTRph34QX+WrRaJlmfyPqXmoGVjMBa4=
|
||||
gorm.io/driver/postgres v1.6.0/go.mod h1:vUw0mrGgrTK+uPHEhAdV4sfFELrByKVGnaVRkXDhtWo=
|
||||
gorm.io/gorm v1.30.1 h1:lSHg33jJTBxs2mgJRfRZeLDG+WZaHYCk3Wtfl6Ngzo4=
|
||||
gorm.io/gorm v1.30.1/go.mod h1:8Z33v652h4//uMA76KjeDH8mJXPm1QNCYrMeatR0DOE=
|
||||
k8s.io/api v0.33.4 h1:oTzrFVNPXBjMu0IlpA2eDDIU49jsuEorGHB4cvKupkk=
|
||||
k8s.io/api v0.33.4/go.mod h1:VHQZ4cuxQ9sCUMESJV5+Fe8bGnqAARZ08tSTdHWfeAc=
|
||||
k8s.io/apimachinery v0.33.4 h1:SOf/JW33TP0eppJMkIgQ+L6atlDiP/090oaX0y9pd9s=
|
||||
k8s.io/apimachinery v0.33.4/go.mod h1:BHW0YOu7n22fFv/JkYOEfkUYNRN0fj0BlvMFWA7b+SM=
|
||||
k8s.io/client-go v0.33.4 h1:TNH+CSu8EmXfitntjUPwaKVPN0AYMbc9F1bBS8/ABpw=
|
||||
k8s.io/client-go v0.33.4/go.mod h1:LsA0+hBG2DPwovjd931L/AoaezMPX9CmBgyVyBZmbCY=
|
||||
gorm.io/gorm v1.31.1 h1:7CA8FTFz/gRfgqgpeKIBcervUn3xSyPUmr6B2WXJ7kg=
|
||||
gorm.io/gorm v1.31.1/go.mod h1:XyQVbO2k6YkOis7C2437jSit3SsDK72s7n7rsSHd+Gs=
|
||||
k8s.io/api v0.35.1 h1:0PO/1FhlK/EQNVK5+txc4FuhQibV25VLSdLMmGpDE/Q=
|
||||
k8s.io/api v0.35.1/go.mod h1:28uR9xlXWml9eT0uaGo6y71xK86JBELShLy4wR1XtxM=
|
||||
k8s.io/apimachinery v0.35.1 h1:yxO6gV555P1YV0SANtnTjXYfiivaTPvCTKX6w6qdDsU=
|
||||
k8s.io/apimachinery v0.35.1/go.mod h1:jQCgFZFR1F4Ik7hvr2g84RTJSZegBc8yHgFWKn//hns=
|
||||
k8s.io/client-go v0.35.1 h1:+eSfZHwuo/I19PaSxqumjqZ9l5XiTEKbIaJ+j1wLcLM=
|
||||
k8s.io/client-go v0.35.1/go.mod h1:1p1KxDt3a0ruRfc/pG4qT/3oHmUj1AhSHEcxNSGg+OA=
|
||||
k8s.io/klog/v2 v2.130.1 h1:n9Xl7H1Xvksem4KFG4PYbdQCQxqc/tTUyrgXaOhHSzk=
|
||||
k8s.io/klog/v2 v2.130.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE=
|
||||
k8s.io/kube-openapi v0.0.0-20250318190949-c8a335a9a2ff h1:/usPimJzUKKu+m+TE36gUyGcf03XZEP0ZIKgKj35LS4=
|
||||
k8s.io/kube-openapi v0.0.0-20250318190949-c8a335a9a2ff/go.mod h1:5jIi+8yX4RIb8wk3XwBo5Pq2ccx4FP10ohkbSKCZoK8=
|
||||
k8s.io/utils v0.0.0-20241104100929-3ea5e8cea738 h1:M3sRQVHv7vB20Xc2ybTt7ODCeFj6JSWYFzOFnYeS6Ro=
|
||||
k8s.io/utils v0.0.0-20241104100929-3ea5e8cea738/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0=
|
||||
sigs.k8s.io/json v0.0.0-20241010143419-9aa6b5e7a4b3 h1:/Rv+M11QRah1itp8VhT6HoVx1Ray9eB4DBr+K+/sCJ8=
|
||||
sigs.k8s.io/json v0.0.0-20241010143419-9aa6b5e7a4b3/go.mod h1:18nIHnGi6636UCz6m8i4DhaJ65T6EruyzmoQqI2BVDo=
|
||||
sigs.k8s.io/randfill v0.0.0-20250304075658-069ef1bbf016/go.mod h1:XeLlZ/jmk4i1HRopwe7/aU3H5n1zNUcX6TM94b3QxOY=
|
||||
k8s.io/kube-openapi v0.0.0-20250910181357-589584f1c912 h1:Y3gxNAuB0OBLImH611+UDZcmKS3g6CthxToOb37KgwE=
|
||||
k8s.io/kube-openapi v0.0.0-20250910181357-589584f1c912/go.mod h1:kdmbQkyfwUagLfXIad1y2TdrjPFWp2Q89B3qkRwf/pQ=
|
||||
k8s.io/utils v0.0.0-20251002143259-bc988d571ff4 h1:SjGebBtkBqHFOli+05xYbK8YF1Dzkbzn+gDM4X9T4Ck=
|
||||
k8s.io/utils v0.0.0-20251002143259-bc988d571ff4/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0=
|
||||
sigs.k8s.io/json v0.0.0-20250730193827-2d320260d730 h1:IpInykpT6ceI+QxKBbEflcR5EXP7sU1kvOlxwZh5txg=
|
||||
sigs.k8s.io/json v0.0.0-20250730193827-2d320260d730/go.mod h1:mdzfpAEoE6DHQEN0uh9ZbOCuHbLK5wOm7dK4ctXE9Tg=
|
||||
sigs.k8s.io/randfill v1.0.0 h1:JfjMILfT8A6RbawdsK2JXGBR5AQVfd+9TbzrlneTyrU=
|
||||
sigs.k8s.io/randfill v1.0.0/go.mod h1:XeLlZ/jmk4i1HRopwe7/aU3H5n1zNUcX6TM94b3QxOY=
|
||||
sigs.k8s.io/structured-merge-diff/v4 v4.6.0 h1:IUA9nvMmnKWcj5jl84xn+T5MnlZKThmUW1TdblaLVAc=
|
||||
sigs.k8s.io/structured-merge-diff/v4 v4.6.0/go.mod h1:dDy58f92j70zLsuZVuUX5Wp9vtxXpaZnkPGWeqDfCps=
|
||||
sigs.k8s.io/yaml v1.4.0 h1:Mk1wCc2gy/F0THH0TAp1QYyJNzRm2KCLy3o5ASXVI5E=
|
||||
sigs.k8s.io/yaml v1.4.0/go.mod h1:Ejl7/uTz7PSA4eKMyQCUTnhZYNmLIl+5c2lQPGR2BPY=
|
||||
sigs.k8s.io/structured-merge-diff/v6 v6.3.0 h1:jTijUJbW353oVOd9oTlifJqOGEkUw2jB/fXCbTiQEco=
|
||||
sigs.k8s.io/structured-merge-diff/v6 v6.3.0/go.mod h1:M3W8sfWvn2HhQDIbGWj3S099YozAsymCo/wrT5ohRUE=
|
||||
sigs.k8s.io/yaml v1.6.0 h1:G8fkbMSAFqgEFgh4b1wmtzDnioxFCUgTZhlbj5P9QYs=
|
||||
sigs.k8s.io/yaml v1.6.0/go.mod h1:796bPqUfzR/0jLAl6XjHl3Ck7MiyVv8dbTdyT3/pMf4=
|
||||
|
||||
2
job.yaml
2
job.yaml
@@ -11,7 +11,7 @@ spec:
|
||||
spec:
|
||||
containers:
|
||||
- command: ["kube-bench"]
|
||||
image: docker.io/aquasec/kube-bench:v0.12.0
|
||||
image: docker.io/aquasec/kube-bench:v0.14.1
|
||||
name: kube-bench
|
||||
volumeMounts:
|
||||
- name: var-lib-cni
|
||||
|
||||
2
makefile
2
makefile
@@ -11,7 +11,7 @@ uname := $(shell uname -s)
|
||||
BUILDX_PLATFORM ?= linux/amd64,linux/arm64,linux/arm,linux/ppc64le,linux/s390x
|
||||
DOCKER_ORGS ?= aquasec public.ecr.aws/aquasecurity
|
||||
GOARCH ?= $@
|
||||
KUBECTL_VERSION ?= 1.34.0-rc.2
|
||||
KUBECTL_VERSION ?= 1.35.0
|
||||
ARCH ?= $(shell go env GOARCH)
|
||||
|
||||
ifneq ($(findstring Microsoft,$(shell uname -r)),)
|
||||
|
||||
Reference in New Issue
Block a user