mirror of
https://github.com/aquasecurity/kube-bench.git
synced 2026-03-01 01:00:22 +00:00
Compare commits
37 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
8a7204d42f | ||
|
|
8d832611f6 | ||
|
|
6a7c22723f | ||
|
|
21d0908485 | ||
|
|
f1807bb192 | ||
|
|
c1bee59a02 | ||
|
|
8e7777928f | ||
|
|
43a9034b2f | ||
|
|
5848e5df6c | ||
|
|
651df2d4ce | ||
|
|
1aa87bbebb | ||
|
|
06159c6edb | ||
|
|
22f5a1c559 | ||
|
|
0ec876e1bc | ||
|
|
9e8c6b2c7d | ||
|
|
581b68d985 | ||
|
|
379f11996f | ||
|
|
659b0c1cad | ||
|
|
8afa78abaf | ||
|
|
2938a24924 | ||
|
|
462a50341a | ||
|
|
60eb8104ad | ||
|
|
6eb894633a | ||
|
|
428f433fae | ||
|
|
8a3701577b | ||
|
|
e3e9e7d390 | ||
|
|
e25d283dd1 | ||
|
|
b48ee8511f | ||
|
|
315817617b | ||
|
|
227665c9e8 | ||
|
|
4fc9c0e7a8 | ||
|
|
1f401b1a50 | ||
|
|
10e0a78701 | ||
|
|
182cbaa71d | ||
|
|
7793925b22 | ||
|
|
1cf0f8cd92 | ||
|
|
c9382e4e96 |
41
.github/workflows/build.yml
vendored
41
.github/workflows/build.yml
vendored
@@ -14,7 +14,6 @@ on:
|
||||
- "LICENSE"
|
||||
- "NOTICE"
|
||||
env:
|
||||
GO_VERSION: "1.23.6"
|
||||
KIND_VERSION: "v0.11.1"
|
||||
KIND_IMAGE: "kindest/node:v1.21.1@sha256:69860bda5563ac81e3c0057d654b5253219618a22ec3a346306239bba8cfa1a6"
|
||||
|
||||
@@ -23,12 +22,12 @@ jobs:
|
||||
name: Lint
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Setup Go
|
||||
uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version: ${{ env.GO_VERSION }}
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v5
|
||||
uses: actions/checkout@v6
|
||||
- name: Setup Go
|
||||
uses: actions/setup-go@7a3fe6cf4cb3a834922a1244abfce67bcef6a0c5 # v6.2.0
|
||||
with:
|
||||
go-version-file: go.mod
|
||||
- name: yaml-lint
|
||||
uses: ibiqlik/action-yamllint@v3
|
||||
- name: Setup golangci-lint
|
||||
@@ -40,12 +39,12 @@ jobs:
|
||||
name: Unit tests
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Setup Go
|
||||
uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version: ${{ env.GO_VERSION }}
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v5
|
||||
uses: actions/checkout@v6
|
||||
- name: Setup Go
|
||||
uses: actions/setup-go@7a3fe6cf4cb3a834922a1244abfce67bcef6a0c5 # v6.2.0
|
||||
with:
|
||||
go-version-file: go.mod
|
||||
- name: Run unit tests
|
||||
run: make tests
|
||||
- name: Upload code coverage
|
||||
@@ -56,12 +55,12 @@ jobs:
|
||||
name: E2e tests
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Setup Go
|
||||
uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version: ${{ env.GO_VERSION }}
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v5
|
||||
uses: actions/checkout@v6
|
||||
- name: Setup Go
|
||||
uses: actions/setup-go@7a3fe6cf4cb3a834922a1244abfce67bcef6a0c5 # v6.2.0
|
||||
with:
|
||||
go-version-file: go.mod
|
||||
- name: Setup Kubernetes cluster (KIND)
|
||||
uses: engineerd/setup-kind@v0.6.2
|
||||
with:
|
||||
@@ -86,14 +85,14 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
needs: [e2e, unit]
|
||||
steps:
|
||||
- name: Setup Go
|
||||
uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version: ${{ env.GO_VERSION }}
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v5
|
||||
uses: actions/checkout@v6
|
||||
with:
|
||||
fetch-depth: 0
|
||||
- name: Setup Go
|
||||
uses: actions/setup-go@7a3fe6cf4cb3a834922a1244abfce67bcef6a0c5 # v6.2.0
|
||||
with:
|
||||
go-version-file: go.mod
|
||||
- name: Dry-run release snapshot
|
||||
uses: goreleaser/goreleaser-action@v6
|
||||
with:
|
||||
|
||||
2
.github/workflows/mkdocs-deploy.yaml
vendored
2
.github/workflows/mkdocs-deploy.yaml
vendored
@@ -16,7 +16,7 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout main
|
||||
uses: actions/checkout@v5
|
||||
uses: actions/checkout@v6
|
||||
with:
|
||||
fetch-depth: 0
|
||||
persist-credentials: true
|
||||
|
||||
4
.github/workflows/publish.yml
vendored
4
.github/workflows/publish.yml
vendored
@@ -16,14 +16,14 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Check Out Repo
|
||||
uses: actions/checkout@v5
|
||||
uses: actions/checkout@v6
|
||||
- name: Set up QEMU
|
||||
uses: docker/setup-qemu-action@v3
|
||||
- name: Set up Docker Buildx
|
||||
id: buildx
|
||||
uses: docker/setup-buildx-action@v3
|
||||
- name: Cache Docker layers
|
||||
uses: actions/cache@v4
|
||||
uses: actions/cache@v5
|
||||
with:
|
||||
path: /tmp/.buildx-cache
|
||||
key: ${{ runner.os }}-buildxarch-${{ github.sha }}
|
||||
|
||||
11
.github/workflows/release.yml
vendored
11
.github/workflows/release.yml
vendored
@@ -5,7 +5,6 @@ on:
|
||||
tags:
|
||||
- "v*"
|
||||
env:
|
||||
GO_VERSION: "1.23.6"
|
||||
KIND_VERSION: "v0.11.1"
|
||||
KIND_IMAGE: "kindest/node:v1.21.1@sha256:69860bda5563ac81e3c0057d654b5253219618a22ec3a346306239bba8cfa1a6"
|
||||
|
||||
@@ -14,14 +13,14 @@ jobs:
|
||||
name: Release
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Setup Go
|
||||
uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version: ${{ env.GO_VERSION }}
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v5
|
||||
uses: actions/checkout@v6
|
||||
with:
|
||||
fetch-depth: 0
|
||||
- name: Setup Go
|
||||
uses: actions/setup-go@7a3fe6cf4cb3a834922a1244abfce67bcef6a0c5 # v6.2.0
|
||||
with:
|
||||
go-version-file: go.mod
|
||||
- name: Run unit tests
|
||||
run: make tests
|
||||
- name: Setup Kubernetes cluster (KIND)
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
FROM golang:1.25.3 AS build
|
||||
FROM golang:1.26.0 AS build
|
||||
WORKDIR /go/src/github.com/aquasecurity/kube-bench/
|
||||
COPY makefile makefile
|
||||
COPY go.mod go.sum ./
|
||||
@@ -19,7 +19,7 @@ RUN /bin/bash -c 'echo "$(<kubectl.sha256) /usr/local/bin/kubectl" | sha256sum
|
||||
|
||||
RUN chmod +x /usr/local/bin/kubectl
|
||||
|
||||
FROM alpine:3.22.2 AS run
|
||||
FROM alpine:3.23.3 AS run
|
||||
WORKDIR /opt/kube-bench/
|
||||
# add GNU ps for -C, -o cmd, --no-headers support and add findutils to get GNU xargs
|
||||
# https://github.com/aquasecurity/kube-bench/issues/109
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
FROM golang:1.25.3 AS build
|
||||
FROM golang:1.26.0 AS build
|
||||
WORKDIR /go/src/github.com/aquasecurity/kube-bench/
|
||||
COPY makefile makefile
|
||||
COPY go.mod go.sum ./
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
FROM golang:1.25.3 AS build
|
||||
FROM golang:1.26.0 AS build
|
||||
WORKDIR /go/src/github.com/aquasecurity/kube-bench/
|
||||
COPY makefile makefile
|
||||
COPY go.mod go.sum ./
|
||||
|
||||
2
cfg/cis-1.12/config.yaml
Normal file
2
cfg/cis-1.12/config.yaml
Normal file
@@ -0,0 +1,2 @@
|
||||
---
|
||||
## Version-specific settings that override the values in cfg/config.yaml
|
||||
62
cfg/cis-1.12/controlplane.yaml
Normal file
62
cfg/cis-1.12/controlplane.yaml
Normal file
@@ -0,0 +1,62 @@
|
||||
---
|
||||
controls:
|
||||
version: "cis-1.12"
|
||||
id: 3
|
||||
text: "Control Plane Configuration"
|
||||
type: "controlplane"
|
||||
groups:
|
||||
- id: 3.1
|
||||
text: "Authentication and Authorization"
|
||||
checks:
|
||||
- id: 3.1.1
|
||||
text: "Client certificate authentication should not be used for users (Manual)"
|
||||
type: "manual"
|
||||
remediation: |
|
||||
Alternative mechanisms provided by Kubernetes such as the use of OIDC should be
|
||||
implemented in place of client certificates.
|
||||
scored: false
|
||||
|
||||
- id: 3.1.2
|
||||
text: "Service account token authentication should not be used for users (Manual)"
|
||||
type: "manual"
|
||||
remediation: |
|
||||
Alternative mechanisms provided by Kubernetes such as the use of OIDC should be implemented
|
||||
in place of service account tokens.
|
||||
scored: false
|
||||
|
||||
- id: 3.1.3
|
||||
text: "Bootstrap token authentication should not be used for users (Manual)"
|
||||
type: "manual"
|
||||
remediation: |
|
||||
Alternative mechanisms provided by Kubernetes such as the use of OIDC should be implemented
|
||||
in place of bootstrap tokens.
|
||||
scored: false
|
||||
|
||||
- id: 3.2
|
||||
text: "Logging"
|
||||
checks:
|
||||
- id: 3.2.1
|
||||
text: "Ensure that a minimal audit policy is created (Manual)"
|
||||
audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep"
|
||||
tests:
|
||||
test_items:
|
||||
- flag: "--audit-policy-file"
|
||||
set: true
|
||||
remediation: |
|
||||
Create an audit policy file for your cluster.
|
||||
scored: false
|
||||
|
||||
- id: 3.2.2
|
||||
text: "Ensure that the audit policy covers key security concerns (Manual)"
|
||||
type: "manual"
|
||||
remediation: |
|
||||
Review the audit policy provided for the cluster and ensure that it covers
|
||||
at least the following areas,
|
||||
- Access to Secrets managed by the cluster. Care should be taken to only
|
||||
log Metadata for requests to Secrets, ConfigMaps, and TokenReviews, in
|
||||
order to avoid risk of logging sensitive data.
|
||||
- Modification of Pod and Deployment objects.
|
||||
- Use of `pods/exec`, `pods/portforward`, `pods/proxy` and `services/proxy`.
|
||||
For most requests, minimally logging at the Metadata level is recommended
|
||||
(the most basic level of logging).
|
||||
scored: false
|
||||
135
cfg/cis-1.12/etcd.yaml
Normal file
135
cfg/cis-1.12/etcd.yaml
Normal file
@@ -0,0 +1,135 @@
|
||||
---
|
||||
controls:
|
||||
version: "cis-1.12"
|
||||
id: 2
|
||||
text: "Etcd Node Configuration"
|
||||
type: "etcd"
|
||||
groups:
|
||||
- id: 2
|
||||
text: "Etcd Node Configuration"
|
||||
checks:
|
||||
- id: 2.1
|
||||
text: "Ensure that the --cert-file and --key-file arguments are set as appropriate (Automated)"
|
||||
audit: "/bin/ps -ef | /bin/grep $etcdbin | /bin/grep -v grep"
|
||||
tests:
|
||||
bin_op: and
|
||||
test_items:
|
||||
- flag: "--cert-file"
|
||||
env: "ETCD_CERT_FILE"
|
||||
- flag: "--key-file"
|
||||
env: "ETCD_KEY_FILE"
|
||||
remediation: |
|
||||
Follow the etcd service documentation and configure TLS encryption.
|
||||
Then, edit the etcd pod specification file /etc/kubernetes/manifests/etcd.yaml
|
||||
on the master node and set the below parameters.
|
||||
--cert-file=</path/to/ca-file>
|
||||
--key-file=</path/to/key-file>
|
||||
scored: true
|
||||
|
||||
- id: 2.2
|
||||
text: "Ensure that the --client-cert-auth argument is set to true (Automated)"
|
||||
audit: "/bin/ps -ef | /bin/grep $etcdbin | /bin/grep -v grep"
|
||||
tests:
|
||||
test_items:
|
||||
- flag: "--client-cert-auth"
|
||||
env: "ETCD_CLIENT_CERT_AUTH"
|
||||
compare:
|
||||
op: eq
|
||||
value: true
|
||||
remediation: |
|
||||
Edit the etcd pod specification file $etcdconf on the master
|
||||
node and set the below parameter.
|
||||
--client-cert-auth="true"
|
||||
scored: true
|
||||
|
||||
- id: 2.3
|
||||
text: "Ensure that the --auto-tls argument is not set to true (Automated)"
|
||||
audit: "/bin/ps -ef | /bin/grep $etcdbin | /bin/grep -v grep"
|
||||
tests:
|
||||
bin_op: or
|
||||
test_items:
|
||||
- flag: "--auto-tls"
|
||||
env: "ETCD_AUTO_TLS"
|
||||
set: false
|
||||
- flag: "--auto-tls"
|
||||
env: "ETCD_AUTO_TLS"
|
||||
compare:
|
||||
op: eq
|
||||
value: false
|
||||
remediation: |
|
||||
Edit the etcd pod specification file $etcdconf on the master
|
||||
node and either remove the --auto-tls parameter or set it to false.
|
||||
--auto-tls=false
|
||||
scored: true
|
||||
|
||||
- id: 2.4
|
||||
text: "Ensure that the --peer-cert-file and --peer-key-file arguments are
|
||||
set as appropriate (Automated)"
|
||||
audit: "/bin/ps -ef | /bin/grep $etcdbin | /bin/grep -v grep"
|
||||
tests:
|
||||
bin_op: and
|
||||
test_items:
|
||||
- flag: "--peer-cert-file"
|
||||
env: "ETCD_PEER_CERT_FILE"
|
||||
- flag: "--peer-key-file"
|
||||
env: "ETCD_PEER_KEY_FILE"
|
||||
remediation: |
|
||||
Follow the etcd service documentation and configure peer TLS encryption as appropriate
|
||||
for your etcd cluster.
|
||||
Then, edit the etcd pod specification file $etcdconf on the
|
||||
master node and set the below parameters.
|
||||
--peer-client-file=</path/to/peer-cert-file>
|
||||
--peer-key-file=</path/to/peer-key-file>
|
||||
scored: true
|
||||
|
||||
- id: 2.5
|
||||
text: "Ensure that the --peer-client-cert-auth argument is set to true (Automated)"
|
||||
audit: "/bin/ps -ef | /bin/grep $etcdbin | /bin/grep -v grep"
|
||||
tests:
|
||||
test_items:
|
||||
- flag: "--peer-client-cert-auth"
|
||||
env: "ETCD_PEER_CLIENT_CERT_AUTH"
|
||||
compare:
|
||||
op: eq
|
||||
value: true
|
||||
remediation: |
|
||||
Edit the etcd pod specification file $etcdconf on the master
|
||||
node and set the below parameter.
|
||||
--peer-client-cert-auth=true
|
||||
scored: true
|
||||
|
||||
- id: 2.6
|
||||
text: "Ensure that the --peer-auto-tls argument is not set to true (Automated)"
|
||||
audit: "/bin/ps -ef | /bin/grep $etcdbin | /bin/grep -v grep"
|
||||
tests:
|
||||
bin_op: or
|
||||
test_items:
|
||||
- flag: "--peer-auto-tls"
|
||||
env: "ETCD_PEER_AUTO_TLS"
|
||||
set: false
|
||||
- flag: "--peer-auto-tls"
|
||||
env: "ETCD_PEER_AUTO_TLS"
|
||||
compare:
|
||||
op: eq
|
||||
value: false
|
||||
remediation: |
|
||||
Edit the etcd pod specification file $etcdconf on the master
|
||||
node and either remove the --peer-auto-tls parameter or set it to false.
|
||||
--peer-auto-tls=false
|
||||
scored: true
|
||||
|
||||
- id: 2.7
|
||||
text: "Ensure that a unique Certificate Authority is used for etcd (Manual)"
|
||||
audit: "/bin/ps -ef | /bin/grep $etcdbin | /bin/grep -v grep"
|
||||
tests:
|
||||
test_items:
|
||||
- flag: "--trusted-ca-file"
|
||||
env: "ETCD_TRUSTED_CA_FILE"
|
||||
remediation: |
|
||||
[Manual test]
|
||||
Follow the etcd documentation and create a dedicated certificate authority setup for the
|
||||
etcd service.
|
||||
Then, edit the etcd pod specification file $etcdconf on the
|
||||
master node and set the below parameter.
|
||||
--trusted-ca-file=</path/to/ca-file>
|
||||
scored: false
|
||||
930
cfg/cis-1.12/master.yaml
Normal file
930
cfg/cis-1.12/master.yaml
Normal file
@@ -0,0 +1,930 @@
|
||||
---
|
||||
controls:
|
||||
version: "cis-1.12"
|
||||
id: 1
|
||||
text: "Control Plane Security Configuration"
|
||||
type: "master"
|
||||
groups:
|
||||
- id: 1.1
|
||||
text: "Control Plane Node Configuration Files"
|
||||
checks:
|
||||
- id: 1.1.1
|
||||
text: "Ensure that the API server pod specification file permissions are set to 600 or more restrictive (Automated)"
|
||||
audit: "/bin/sh -c 'if test -e $apiserverconf; then stat -c permissions=%a $apiserverconf; fi'"
|
||||
tests:
|
||||
test_items:
|
||||
- flag: "permissions"
|
||||
compare:
|
||||
op: bitmask
|
||||
value: "600"
|
||||
remediation: |
|
||||
Run the below command (based on the file location on your system) on the
|
||||
control plane node.
|
||||
For example, chmod 600 $apiserverconf
|
||||
scored: true
|
||||
|
||||
- id: 1.1.2
|
||||
text: "Ensure that the API server pod specification file ownership is set to root:root (Automated)"
|
||||
audit: "/bin/sh -c 'if test -e $apiserverconf; then stat -c %U:%G $apiserverconf; fi'"
|
||||
tests:
|
||||
test_items:
|
||||
- flag: "root:root"
|
||||
remediation: |
|
||||
Run the below command (based on the file location on your system) on the control plane node.
|
||||
For example, chown root:root $apiserverconf
|
||||
scored: true
|
||||
|
||||
- id: 1.1.3
|
||||
text: "Ensure that the controller manager pod specification file permissions are set to 600 or more restrictive (Automated)"
|
||||
audit: "/bin/sh -c 'if test -e $controllermanagerconf; then stat -c permissions=%a $controllermanagerconf; fi'"
|
||||
tests:
|
||||
test_items:
|
||||
- flag: "permissions"
|
||||
compare:
|
||||
op: bitmask
|
||||
value: "600"
|
||||
remediation: |
|
||||
Run the below command (based on the file location on your system) on the control plane node.
|
||||
For example, chmod 600 $controllermanagerconf
|
||||
scored: true
|
||||
|
||||
- id: 1.1.4
|
||||
text: "Ensure that the controller manager pod specification file ownership is set to root:root (Automated)"
|
||||
audit: "/bin/sh -c 'if test -e $controllermanagerconf; then stat -c %U:%G $controllermanagerconf; fi'"
|
||||
tests:
|
||||
test_items:
|
||||
- flag: "root:root"
|
||||
remediation: |
|
||||
Run the below command (based on the file location on your system) on the control plane node.
|
||||
For example, chown root:root $controllermanagerconf
|
||||
scored: true
|
||||
|
||||
- id: 1.1.5
|
||||
text: "Ensure that the scheduler pod specification file permissions are set to 600 or more restrictive (Automated)"
|
||||
audit: "/bin/sh -c 'if test -e $schedulerconf; then stat -c permissions=%a $schedulerconf; fi'"
|
||||
tests:
|
||||
test_items:
|
||||
- flag: "permissions"
|
||||
compare:
|
||||
op: bitmask
|
||||
value: "600"
|
||||
remediation: |
|
||||
Run the below command (based on the file location on your system) on the control plane node.
|
||||
For example, chmod 600 $schedulerconf
|
||||
scored: true
|
||||
|
||||
- id: 1.1.6
|
||||
text: "Ensure that the scheduler pod specification file ownership is set to root:root (Automated)"
|
||||
audit: "/bin/sh -c 'if test -e $schedulerconf; then stat -c %U:%G $schedulerconf; fi'"
|
||||
tests:
|
||||
test_items:
|
||||
- flag: "root:root"
|
||||
remediation: |
|
||||
Run the below command (based on the file location on your system) on the control plane node.
|
||||
For example, chown root:root $schedulerconf
|
||||
scored: true
|
||||
|
||||
- id: 1.1.7
|
||||
text: "Ensure that the etcd pod specification file permissions are set to 600 or more restrictive (Automated)"
|
||||
audit: "/bin/sh -c 'if test -e $etcdconf; then find $etcdconf -name '*etcd*' | xargs stat -c permissions=%a; fi'"
|
||||
use_multiple_values: true
|
||||
tests:
|
||||
test_items:
|
||||
- flag: "permissions"
|
||||
compare:
|
||||
op: bitmask
|
||||
value: "600"
|
||||
remediation: |
|
||||
Run the below command (based on the file location on your system) on the control plane node.
|
||||
For example,
|
||||
chmod 600 $etcdconf
|
||||
scored: true
|
||||
|
||||
- id: 1.1.8
|
||||
text: "Ensure that the etcd pod specification file ownership is set to root:root (Automated)"
|
||||
audit: "/bin/sh -c 'if test -e $etcdconf; then find $etcdconf -name '*etcd*' | xargs stat -c %U:%G; fi'"
|
||||
use_multiple_values: true
|
||||
tests:
|
||||
test_items:
|
||||
- flag: "root:root"
|
||||
remediation: |
|
||||
Run the below command (based on the file location on your system) on the control plane node.
|
||||
For example,
|
||||
chown root:root $etcdconf
|
||||
scored: true
|
||||
|
||||
- id: 1.1.9
|
||||
text: "Ensure that the Container Network Interface file permissions are set to 600 or more restrictive (Manual)"
|
||||
audit: |
|
||||
ps -ef | grep $kubeletbin | grep -- --cni-conf-dir | sed 's%.*cni-conf-dir[= ]\([^ ]*\).*%\1%' | xargs -I{} find {} -mindepth 1 | xargs --no-run-if-empty stat -c permissions=%a
|
||||
find /var/lib/cni/networks -type f 2> /dev/null | xargs --no-run-if-empty stat -c permissions=%a
|
||||
use_multiple_values: true
|
||||
tests:
|
||||
test_items:
|
||||
- flag: "permissions"
|
||||
compare:
|
||||
op: bitmask
|
||||
value: "600"
|
||||
remediation: |
|
||||
Run the below command (based on the file location on your system) on the control plane node.
|
||||
For example, chmod 600 <path/to/cni/files>
|
||||
scored: false
|
||||
|
||||
- id: 1.1.10
|
||||
text: "Ensure that the Container Network Interface file ownership is set to root:root (Manual)"
|
||||
audit: |
|
||||
ps -ef | grep $kubeletbin | grep -- --cni-conf-dir | sed 's%.*cni-conf-dir[= ]\([^ ]*\).*%\1%' | xargs -I{} find {} -mindepth 1 | xargs --no-run-if-empty stat -c %U:%G
|
||||
find /var/lib/cni/networks -type f 2> /dev/null | xargs --no-run-if-empty stat -c %U:%G
|
||||
use_multiple_values: true
|
||||
tests:
|
||||
test_items:
|
||||
- flag: "root:root"
|
||||
remediation: |
|
||||
Run the below command (based on the file location on your system) on the control plane node.
|
||||
For example,
|
||||
chown root:root <path/to/cni/files>
|
||||
scored: false
|
||||
|
||||
- id: 1.1.11
|
||||
text: "Ensure that the etcd data directory permissions are set to 700 or more restrictive (Automated)"
|
||||
audit: |
|
||||
DATA_DIR=''
|
||||
for d in $(ps -ef | grep $etcdbin | grep -- --data-dir | sed 's%.*data-dir[= ]\([^ ]*\).*%\1%'); do
|
||||
if test -d "$d"; then DATA_DIR="$d"; fi
|
||||
done
|
||||
if ! test -d "$DATA_DIR"; then DATA_DIR=$etcddatadir; fi
|
||||
stat -c permissions=%a "$DATA_DIR"
|
||||
tests:
|
||||
test_items:
|
||||
- flag: "permissions"
|
||||
compare:
|
||||
op: bitmask
|
||||
value: "700"
|
||||
remediation: |
|
||||
On the etcd server node, get the etcd data directory, passed as an argument --data-dir,
|
||||
from the command 'ps -ef | grep etcd'.
|
||||
Run the below command (based on the etcd data directory found above). For example,
|
||||
chmod 700 /var/lib/etcd
|
||||
scored: true
|
||||
|
||||
- id: 1.1.12
|
||||
text: "Ensure that the etcd data directory ownership is set to etcd:etcd (Automated)"
|
||||
audit: |
|
||||
DATA_DIR=''
|
||||
for d in $(ps -ef | grep $etcdbin | grep -- --data-dir | sed 's%.*data-dir[= ]\([^ ]*\).*%\1%'); do
|
||||
if test -d "$d"; then DATA_DIR="$d"; fi
|
||||
done
|
||||
if ! test -d "$DATA_DIR"; then DATA_DIR=$etcddatadir; fi
|
||||
stat -c %U:%G "$DATA_DIR"
|
||||
tests:
|
||||
test_items:
|
||||
- flag: "etcd:etcd"
|
||||
remediation: |
|
||||
On the etcd server node, get the etcd data directory, passed as an argument --data-dir,
|
||||
from the command 'ps -ef | grep etcd'.
|
||||
Run the below command (based on the etcd data directory found above).
|
||||
For example, chown etcd:etcd /var/lib/etcd
|
||||
scored: true
|
||||
|
||||
- id: 1.1.13
|
||||
text: "Ensure that the default administrative credential file permissions are set to 600 (Automated)"
|
||||
audit: |
|
||||
for adminconf in /etc/kubernetes/admin.conf /etc/kubernetes/super-admin.conf; do if test -e $adminconf; then stat -c "permissions=%a %n" $adminconf; fi; done
|
||||
use_multiple_values: true
|
||||
tests:
|
||||
test_items:
|
||||
- flag: "permissions"
|
||||
compare:
|
||||
op: bitmask
|
||||
value: "600"
|
||||
remediation: |
|
||||
Run the below command (based on the file location on your system) on the control plane node.
|
||||
For example, chmod 600 /etc/kubernetes/admin.conf
|
||||
On Kubernetes 1.29+ the super-admin.conf file should also be modified, if present.
|
||||
For example, chmod 600 /etc/kubernetes/super-admin.conf
|
||||
scored: true
|
||||
|
||||
- id: 1.1.14
|
||||
text: "Ensure that the default administrative credential file ownership is set to root:root (Automated)"
|
||||
audit: |
|
||||
for adminconf in /etc/kubernetes/admin.conf /etc/kubernetes/super-admin.conf; do if test -e $adminconf; then stat -c "ownership=%U:%G %n" $adminconf; fi; done
|
||||
use_multiple_values: true
|
||||
tests:
|
||||
test_items:
|
||||
- flag: "ownership"
|
||||
compare:
|
||||
op: eq
|
||||
value: "root:root"
|
||||
remediation: |
|
||||
Run the below command (based on the file location on your system) on the control plane node.
|
||||
For example, chown root:root /etc/kubernetes/admin.conf
|
||||
On Kubernetes 1.29+ the super-admin.conf file should also be modified, if present.
|
||||
For example, chown root:root /etc/kubernetes/super-admin.conf
|
||||
scored: true
|
||||
|
||||
- id: 1.1.15
|
||||
text: "Ensure that the scheduler.conf file permissions are set to 600 or more restrictive (Automated)"
|
||||
audit: "/bin/sh -c 'if test -e $schedulerkubeconfig; then stat -c permissions=%a $schedulerkubeconfig; fi'"
|
||||
tests:
|
||||
test_items:
|
||||
- flag: "permissions"
|
||||
compare:
|
||||
op: bitmask
|
||||
value: "600"
|
||||
remediation: |
|
||||
Run the below command (based on the file location on your system) on the control plane node.
|
||||
For example,
|
||||
chmod 600 $schedulerkubeconfig
|
||||
scored: true
|
||||
|
||||
- id: 1.1.16
|
||||
text: "Ensure that the scheduler.conf file ownership is set to root:root (Automated)"
|
||||
audit: "/bin/sh -c 'if test -e $schedulerkubeconfig; then stat -c %U:%G $schedulerkubeconfig; fi'"
|
||||
tests:
|
||||
test_items:
|
||||
- flag: "root:root"
|
||||
remediation: |
|
||||
Run the below command (based on the file location on your system) on the control plane node.
|
||||
For example,
|
||||
chown root:root $schedulerkubeconfig
|
||||
scored: true
|
||||
|
||||
- id: 1.1.17
|
||||
text: "Ensure that the controller-manager.conf file permissions are set to 600 or more restrictive (Automated)"
|
||||
audit: "/bin/sh -c 'if test -e $controllermanagerkubeconfig; then stat -c permissions=%a $controllermanagerkubeconfig; fi'"
|
||||
tests:
|
||||
test_items:
|
||||
- flag: "permissions"
|
||||
compare:
|
||||
op: bitmask
|
||||
value: "600"
|
||||
remediation: |
|
||||
Run the below command (based on the file location on your system) on the control plane node.
|
||||
For example,
|
||||
chmod 600 $controllermanagerkubeconfig
|
||||
scored: true
|
||||
|
||||
- id: 1.1.18
|
||||
text: "Ensure that the controller-manager.conf file ownership is set to root:root (Automated)"
|
||||
audit: "/bin/sh -c 'if test -e $controllermanagerkubeconfig; then stat -c %U:%G $controllermanagerkubeconfig; fi'"
|
||||
tests:
|
||||
test_items:
|
||||
- flag: "root:root"
|
||||
remediation: |
|
||||
Run the below command (based on the file location on your system) on the control plane node.
|
||||
For example,
|
||||
chown root:root $controllermanagerkubeconfig
|
||||
scored: true
|
||||
|
||||
- id: 1.1.19
|
||||
text: "Ensure that the Kubernetes PKI directory and file ownership is set to root:root (Automated)"
|
||||
audit: "find /etc/kubernetes/pki/ | xargs stat -c %U:%G"
|
||||
use_multiple_values: true
|
||||
tests:
|
||||
test_items:
|
||||
- flag: "root:root"
|
||||
remediation: |
|
||||
Run the below command (based on the file location on your system) on the control plane node.
|
||||
For example,
|
||||
chown -R root:root /etc/kubernetes/pki/
|
||||
scored: true
|
||||
|
||||
- id: 1.1.20
|
||||
text: "Ensure that the Kubernetes PKI certificate file permissions are set to 644 or more restrictive (Manual)"
|
||||
audit: "find /etc/kubernetes/pki/ -name '*.crt' | xargs stat -c permissions=%a"
|
||||
use_multiple_values: true
|
||||
tests:
|
||||
test_items:
|
||||
- flag: "permissions"
|
||||
compare:
|
||||
op: bitmask
|
||||
value: "644"
|
||||
remediation: |
|
||||
Run the below command (based on the file location on your system) on the control plane node.
|
||||
For example,
|
||||
chmod -R 644 /etc/kubernetes/pki/*.crt
|
||||
scored: false
|
||||
|
||||
- id: 1.1.21
|
||||
text: "Ensure that the Kubernetes PKI key file permissions are set to 600 (Manual)"
|
||||
audit: "find /etc/kubernetes/pki/ -name '*.key' | xargs stat -c permissions=%a"
|
||||
use_multiple_values: true
|
||||
tests:
|
||||
test_items:
|
||||
- flag: "permissions"
|
||||
compare:
|
||||
op: bitmask
|
||||
value: "600"
|
||||
remediation: |
|
||||
Run the below command (based on the file location on your system) on the control plane node.
|
||||
For example,
|
||||
chmod -R 600 /etc/kubernetes/pki/*.key
|
||||
scored: false
|
||||
|
||||
- id: 1.2
|
||||
text: "API Server"
|
||||
checks:
|
||||
- id: 1.2.1
|
||||
text: "Ensure that the --anonymous-auth argument is set to false (Manual)"
|
||||
audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep"
|
||||
tests:
|
||||
test_items:
|
||||
- flag: "--anonymous-auth"
|
||||
compare:
|
||||
op: eq
|
||||
value: false
|
||||
remediation: |
|
||||
Edit the API server pod specification file $apiserverconf
|
||||
on the control plane node and set the below parameter.
|
||||
--anonymous-auth=false
|
||||
scored: false
|
||||
|
||||
- id: 1.2.2
|
||||
text: "Ensure that the --token-auth-file parameter is not set (Automated)"
|
||||
audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep"
|
||||
tests:
|
||||
test_items:
|
||||
- flag: "--token-auth-file"
|
||||
set: false
|
||||
remediation: |
|
||||
Follow the documentation and configure alternate mechanisms for authentication. Then,
|
||||
edit the API server pod specification file $apiserverconf
|
||||
on the control plane node and remove the --token-auth-file=<filename> parameter.
|
||||
scored: true
|
||||
|
||||
- id: 1.2.3
|
||||
text: "Ensure that the --DenyServiceExternalIPs is set (Manual)"
|
||||
audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep"
|
||||
tests:
|
||||
bin_op: or
|
||||
test_items:
|
||||
- flag: "--enable-admission-plugins"
|
||||
compare:
|
||||
op: has
|
||||
value: "DenyServiceExternalIPs"
|
||||
remediation: |
|
||||
Edit the API server pod specification file $apiserverconf
|
||||
on the control plane node and add the `DenyServiceExternalIPs` plugin
|
||||
to the enabled admission plugins, as such --enable-admission-plugin=DenyServiceExternalIPs.
|
||||
scored: false
|
||||
|
||||
- id: 1.2.4
|
||||
text: "Ensure that the --kubelet-client-certificate and --kubelet-client-key arguments are set as appropriate (Automated)"
|
||||
audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep"
|
||||
tests:
|
||||
bin_op: and
|
||||
test_items:
|
||||
- flag: "--kubelet-client-certificate"
|
||||
- flag: "--kubelet-client-key"
|
||||
remediation: |
|
||||
Follow the Kubernetes documentation and set up the TLS connection between the
|
||||
apiserver and kubelets. Then, edit API server pod specification file
|
||||
$apiserverconf on the control plane node and set the
|
||||
kubelet client certificate and key parameters as below.
|
||||
--kubelet-client-certificate=<path/to/client-certificate-file>
|
||||
--kubelet-client-key=<path/to/client-key-file>
|
||||
scored: true
|
||||
|
||||
- id: 1.2.5
|
||||
text: "Ensure that the --kubelet-certificate-authority argument is set as appropriate (Automated)"
|
||||
audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep"
|
||||
tests:
|
||||
test_items:
|
||||
- flag: "--kubelet-certificate-authority"
|
||||
remediation: |
|
||||
Follow the Kubernetes documentation and setup the TLS connection between
|
||||
the apiserver and kubelets. Then, edit the API server pod specification file
|
||||
$apiserverconf on the control plane node and set the
|
||||
--kubelet-certificate-authority parameter to the path to the cert file for the certificate authority.
|
||||
--kubelet-certificate-authority=<ca-string>
|
||||
scored: true
|
||||
|
||||
- id: 1.2.6
|
||||
text: "Ensure that the --authorization-mode argument is not set to AlwaysAllow (Automated)"
|
||||
audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep"
|
||||
tests:
|
||||
test_items:
|
||||
- flag: "--authorization-mode"
|
||||
compare:
|
||||
op: nothave
|
||||
value: "AlwaysAllow"
|
||||
remediation: |
|
||||
Edit the API server pod specification file $apiserverconf
|
||||
on the control plane node and set the --authorization-mode parameter to values other than AlwaysAllow.
|
||||
One such example could be as below.
|
||||
--authorization-mode=RBAC
|
||||
scored: true
|
||||
|
||||
- id: 1.2.7
|
||||
text: "Ensure that the --authorization-mode argument includes Node (Automated)"
|
||||
audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep"
|
||||
tests:
|
||||
test_items:
|
||||
- flag: "--authorization-mode"
|
||||
compare:
|
||||
op: has
|
||||
value: "Node"
|
||||
remediation: |
|
||||
Edit the API server pod specification file $apiserverconf
|
||||
on the control plane node and set the --authorization-mode parameter to a value that includes Node.
|
||||
--authorization-mode=Node,RBAC
|
||||
scored: true
|
||||
|
||||
- id: 1.2.8
|
||||
text: "Ensure that the --authorization-mode argument includes RBAC (Automated)"
|
||||
audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep"
|
||||
tests:
|
||||
test_items:
|
||||
- flag: "--authorization-mode"
|
||||
compare:
|
||||
op: has
|
||||
value: "RBAC"
|
||||
remediation: |
|
||||
Edit the API server pod specification file $apiserverconf
|
||||
on the control plane node and set the --authorization-mode parameter to a value that includes RBAC,
|
||||
for example `--authorization-mode=Node,RBAC`.
|
||||
scored: true
|
||||
|
||||
- id: 1.2.9
|
||||
text: "Ensure that the admission control plugin EventRateLimit is set (Manual)"
|
||||
audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep"
|
||||
tests:
|
||||
test_items:
|
||||
- flag: "--enable-admission-plugins"
|
||||
compare:
|
||||
op: has
|
||||
value: "EventRateLimit"
|
||||
remediation: |
|
||||
Follow the Kubernetes documentation and set the desired limits in a configuration file.
|
||||
Then, edit the API server pod specification file $apiserverconf
|
||||
and set the below parameters.
|
||||
--enable-admission-plugins=...,EventRateLimit,...
|
||||
--admission-control-config-file=<path/to/configuration/file>
|
||||
scored: false
|
||||
|
||||
- id: 1.2.10
|
||||
text: "Ensure that the admission control plugin AlwaysAdmit is not set (Automated)"
|
||||
audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep"
|
||||
tests:
|
||||
bin_op: or
|
||||
test_items:
|
||||
- flag: "--enable-admission-plugins"
|
||||
compare:
|
||||
op: nothave
|
||||
value: AlwaysAdmit
|
||||
- flag: "--enable-admission-plugins"
|
||||
set: false
|
||||
remediation: |
|
||||
Edit the API server pod specification file $apiserverconf
|
||||
on the control plane node and either remove the --enable-admission-plugins parameter, or set it to a
|
||||
value that does not include AlwaysAdmit.
|
||||
scored: true
|
||||
|
||||
- id: 1.2.11
|
||||
text: "Ensure that the admission control plugin AlwaysPullImages is set (Manual)"
|
||||
audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep"
|
||||
tests:
|
||||
test_items:
|
||||
- flag: "--enable-admission-plugins"
|
||||
compare:
|
||||
op: has
|
||||
value: "AlwaysPullImages"
|
||||
remediation: |
|
||||
Edit the API server pod specification file $apiserverconf
|
||||
on the control plane node and set the --enable-admission-plugins parameter to include
|
||||
AlwaysPullImages.
|
||||
--enable-admission-plugins=...,AlwaysPullImages,...
|
||||
scored: false
|
||||
|
||||
- id: 1.2.12
|
||||
text: "Ensure that the admission control plugin ServiceAccount is set (Automated)"
|
||||
audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep"
|
||||
tests:
|
||||
bin_op: or
|
||||
test_items:
|
||||
- flag: "--disable-admission-plugins"
|
||||
compare:
|
||||
op: nothave
|
||||
value: "ServiceAccount"
|
||||
- flag: "--disable-admission-plugins"
|
||||
set: false
|
||||
remediation: |
|
||||
Follow the documentation and create ServiceAccount objects as per your environment.
|
||||
Then, edit the API server pod specification file $apiserverconf
|
||||
on the control plane node and ensure that the --disable-admission-plugins parameter is set to a
|
||||
value that does not include ServiceAccount.
|
||||
scored: true
|
||||
|
||||
- id: 1.2.13
|
||||
text: "Ensure that the admission control plugin NamespaceLifecycle is set (Automated)"
|
||||
audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep"
|
||||
tests:
|
||||
bin_op: or
|
||||
test_items:
|
||||
- flag: "--disable-admission-plugins"
|
||||
compare:
|
||||
op: nothave
|
||||
value: "NamespaceLifecycle"
|
||||
- flag: "--disable-admission-plugins"
|
||||
set: false
|
||||
remediation: |
|
||||
Edit the API server pod specification file $apiserverconf
|
||||
on the control plane node and set the --disable-admission-plugins parameter to
|
||||
ensure it does not include NamespaceLifecycle.
|
||||
scored: true
|
||||
|
||||
- id: 1.2.14
|
||||
text: "Ensure that the admission control plugin NodeRestriction is set (Automated)"
|
||||
audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep"
|
||||
tests:
|
||||
test_items:
|
||||
- flag: "--enable-admission-plugins"
|
||||
compare:
|
||||
op: has
|
||||
value: "NodeRestriction"
|
||||
remediation: |
|
||||
Follow the Kubernetes documentation and configure NodeRestriction plug-in on kubelets.
|
||||
Then, edit the API server pod specification file $apiserverconf
|
||||
on the control plane node and set the --enable-admission-plugins parameter to a
|
||||
value that includes NodeRestriction.
|
||||
--enable-admission-plugins=...,NodeRestriction,...
|
||||
scored: true
|
||||
|
||||
- id: 1.2.15
|
||||
text: "Ensure that the --profiling argument is set to false (Automated)"
|
||||
audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep"
|
||||
tests:
|
||||
test_items:
|
||||
- flag: "--profiling"
|
||||
compare:
|
||||
op: eq
|
||||
value: false
|
||||
remediation: |
|
||||
Edit the API server pod specification file $apiserverconf
|
||||
on the control plane node and set the below parameter.
|
||||
--profiling=false
|
||||
scored: true
|
||||
|
||||
- id: 1.2.16
|
||||
text: "Ensure that the --audit-log-path argument is set (Automated)"
|
||||
audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep"
|
||||
tests:
|
||||
test_items:
|
||||
- flag: "--audit-log-path"
|
||||
remediation: |
|
||||
Edit the API server pod specification file $apiserverconf
|
||||
on the control plane node and set the --audit-log-path parameter to a suitable path and
|
||||
file where you would like audit logs to be written, for example,
|
||||
--audit-log-path=/var/log/apiserver/audit.log
|
||||
scored: true
|
||||
|
||||
- id: 1.2.17
|
||||
text: "Ensure that the --audit-log-maxage argument is set to 30 or as appropriate (Automated)"
|
||||
audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep"
|
||||
tests:
|
||||
test_items:
|
||||
- flag: "--audit-log-maxage"
|
||||
compare:
|
||||
op: gte
|
||||
value: 30
|
||||
remediation: |
|
||||
Edit the API server pod specification file $apiserverconf
|
||||
on the control plane node and set the --audit-log-maxage parameter to 30
|
||||
or as an appropriate number of days, for example,
|
||||
--audit-log-maxage=30
|
||||
scored: true
|
||||
|
||||
- id: 1.2.18
|
||||
text: "Ensure that the --audit-log-maxbackup argument is set to 10 or as appropriate (Automated)"
|
||||
audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep"
|
||||
tests:
|
||||
test_items:
|
||||
- flag: "--audit-log-maxbackup"
|
||||
compare:
|
||||
op: gte
|
||||
value: 10
|
||||
remediation: |
|
||||
Edit the API server pod specification file $apiserverconf
|
||||
on the control plane node and set the --audit-log-maxbackup parameter to 10 or to an appropriate
|
||||
value. For example,
|
||||
--audit-log-maxbackup=10
|
||||
scored: true
|
||||
|
||||
- id: 1.2.19
|
||||
text: "Ensure that the --audit-log-maxsize argument is set to 100 or as appropriate (Automated)"
|
||||
audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep"
|
||||
tests:
|
||||
test_items:
|
||||
- flag: "--audit-log-maxsize"
|
||||
compare:
|
||||
op: gte
|
||||
value: 100
|
||||
remediation: |
|
||||
Edit the API server pod specification file $apiserverconf
|
||||
on the control plane node and set the --audit-log-maxsize parameter to an appropriate size in MB.
|
||||
For example, to set it as 100 MB, --audit-log-maxsize=100
|
||||
scored: true
|
||||
|
||||
- id: 1.2.20
|
||||
text: "Ensure that the --request-timeout argument is set as appropriate (Manual)"
|
||||
audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep"
|
||||
type: manual
|
||||
remediation: |
|
||||
Edit the API server pod specification file $apiserverconf
|
||||
and set the below parameter as appropriate and if needed.
|
||||
For example, --request-timeout=300s
|
||||
scored: false
|
||||
|
||||
- id: 1.2.21
|
||||
text: "Ensure that the --service-account-lookup argument is set to true (Automated)"
|
||||
audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep"
|
||||
tests:
|
||||
bin_op: or
|
||||
test_items:
|
||||
- flag: "--service-account-lookup"
|
||||
set: false
|
||||
- flag: "--service-account-lookup"
|
||||
compare:
|
||||
op: eq
|
||||
value: true
|
||||
remediation: |
|
||||
Edit the API server pod specification file $apiserverconf
|
||||
on the control plane node and set the below parameter.
|
||||
--service-account-lookup=true
|
||||
Alternatively, you can delete the --service-account-lookup parameter from this file so
|
||||
that the default takes effect.
|
||||
scored: true
|
||||
|
||||
- id: 1.2.22
|
||||
text: "Ensure that the --service-account-key-file argument is set as appropriate (Automated)"
|
||||
audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep"
|
||||
tests:
|
||||
test_items:
|
||||
- flag: "--service-account-key-file"
|
||||
remediation: |
|
||||
Edit the API server pod specification file $apiserverconf
|
||||
on the control plane node and set the --service-account-key-file parameter
|
||||
to the public key file for service accounts. For example,
|
||||
--service-account-key-file=<filename>
|
||||
scored: true
|
||||
|
||||
- id: 1.2.23
|
||||
text: "Ensure that the --etcd-certfile and --etcd-keyfile arguments are set as appropriate (Automated)"
|
||||
audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep"
|
||||
tests:
|
||||
bin_op: and
|
||||
test_items:
|
||||
- flag: "--etcd-certfile"
|
||||
- flag: "--etcd-keyfile"
|
||||
remediation: |
|
||||
Follow the Kubernetes documentation and set up the TLS connection between the apiserver and etcd.
|
||||
Then, edit the API server pod specification file $apiserverconf
|
||||
on the control plane node and set the etcd certificate and key file parameters.
|
||||
--etcd-certfile=<path/to/client-certificate-file>
|
||||
--etcd-keyfile=<path/to/client-key-file>
|
||||
scored: true
|
||||
|
||||
- id: 1.2.24
|
||||
text: "Ensure that the --tls-cert-file and --tls-private-key-file arguments are set as appropriate (Automated)"
|
||||
audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep"
|
||||
tests:
|
||||
bin_op: and
|
||||
test_items:
|
||||
- flag: "--tls-cert-file"
|
||||
- flag: "--tls-private-key-file"
|
||||
remediation: |
|
||||
Follow the Kubernetes documentation and set up the TLS connection on the apiserver.
|
||||
Then, edit the API server pod specification file $apiserverconf
|
||||
on the control plane node and set the TLS certificate and private key file parameters.
|
||||
--tls-cert-file=<path/to/tls-certificate-file>
|
||||
--tls-private-key-file=<path/to/tls-key-file>
|
||||
scored: true
|
||||
|
||||
- id: 1.2.25
|
||||
text: "Ensure that the --client-ca-file argument is set as appropriate (Automated)"
|
||||
audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep"
|
||||
tests:
|
||||
test_items:
|
||||
- flag: "--client-ca-file"
|
||||
remediation: |
|
||||
Follow the Kubernetes documentation and set up the TLS connection on the apiserver.
|
||||
Then, edit the API server pod specification file $apiserverconf
|
||||
on the control plane node and set the client certificate authority file.
|
||||
--client-ca-file=<path/to/client-ca-file>
|
||||
scored: true
|
||||
|
||||
- id: 1.2.26
|
||||
text: "Ensure that the --etcd-cafile argument is set as appropriate (Automated)"
|
||||
audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep"
|
||||
tests:
|
||||
test_items:
|
||||
- flag: "--etcd-cafile"
|
||||
remediation: |
|
||||
Follow the Kubernetes documentation and set up the TLS connection between the apiserver and etcd.
|
||||
Then, edit the API server pod specification file $apiserverconf
|
||||
on the control plane node and set the etcd certificate authority file parameter.
|
||||
--etcd-cafile=<path/to/ca-file>
|
||||
scored: true
|
||||
|
||||
- id: 1.2.27
|
||||
text: "Ensure that the --encryption-provider-config argument is set as appropriate (Manual)"
|
||||
audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep"
|
||||
tests:
|
||||
test_items:
|
||||
- flag: "--encryption-provider-config"
|
||||
remediation: |
|
||||
Follow the Kubernetes documentation and configure a EncryptionConfig file.
|
||||
Then, edit the API server pod specification file $apiserverconf
|
||||
on the control plane node and set the --encryption-provider-config parameter to the path of that file.
|
||||
For example, --encryption-provider-config=</path/to/EncryptionConfig/File>
|
||||
scored: false
|
||||
|
||||
- id: 1.2.28
|
||||
text: "Ensure that encryption providers are appropriately configured (Manual)"
|
||||
audit: |
|
||||
ENCRYPTION_PROVIDER_CONFIG=$(ps -ef | grep $apiserverbin | grep -- --encryption-provider-config | sed 's%.*encryption-provider-config[= ]\([^ ]*\).*%\1%')
|
||||
if test -e $ENCRYPTION_PROVIDER_CONFIG; then grep -A1 'providers:' $ENCRYPTION_PROVIDER_CONFIG | tail -n1 | grep -o "[A-Za-z]*" | sed 's/^/provider=/'; fi
|
||||
tests:
|
||||
test_items:
|
||||
- flag: "provider"
|
||||
compare:
|
||||
op: valid_elements
|
||||
value: "aescbc,kms,secretbox"
|
||||
remediation: |
|
||||
Follow the Kubernetes documentation and configure a EncryptionConfig file.
|
||||
In this file, choose aescbc, kms or secretbox as the encryption provider.
|
||||
scored: false
|
||||
|
||||
- id: 1.2.29
|
||||
text: "Ensure that the API Server only makes use of Strong Cryptographic Ciphers (Manual)"
|
||||
audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep"
|
||||
tests:
|
||||
test_items:
|
||||
- flag: "--tls-cipher-suites"
|
||||
compare:
|
||||
op: valid_elements
|
||||
value: "TLS_AES_128_GCM_SHA256,TLS_AES_256_GCM_SHA384,TLS_CHACHA20_POLY1305_SHA256,TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256"
|
||||
remediation: |
|
||||
Edit the API server pod specification file /etc/kubernetes/manifests/kube-apiserver.yaml
|
||||
on the control plane node and set the below parameter.
|
||||
--tls-cipher-suites=TLS_AES_128_GCM_SHA256,TLS_AES_256_GCM_SHA384,
|
||||
TLS_CHACHA20_POLY1305_SHA256,TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,
|
||||
TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,
|
||||
TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256
|
||||
scored: false
|
||||
|
||||
- id: 1.2.30
|
||||
text: "Ensure that the --service-account-extend-token-expiration parameter is set to false (Automated)"
|
||||
audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep"
|
||||
tests:
|
||||
test_items:
|
||||
- flag: "--service-account-extend-token-expiration"
|
||||
compare:
|
||||
op: eq
|
||||
value: false
|
||||
remediation: |
|
||||
Edit the API server pod specification file /etc/kubernetes/manifests/kube-apiserver.yaml on the Control Plane node and set the --service-account-extend-token-expiration parameter to false.
|
||||
`--service-account-extend-token-expiration=false`
|
||||
By default, this parameter is set to true.
|
||||
scored: true
|
||||
|
||||
- id: 1.3
|
||||
text: "Controller Manager"
|
||||
checks:
|
||||
- id: 1.3.1
|
||||
text: "Ensure that the --terminated-pod-gc-threshold argument is set as appropriate (Manual)"
|
||||
audit: "/bin/ps -ef | grep $controllermanagerbin | grep -v grep"
|
||||
tests:
|
||||
test_items:
|
||||
- flag: "--terminated-pod-gc-threshold"
|
||||
remediation: |
|
||||
Edit the Controller Manager pod specification file $controllermanagerconf
|
||||
on the control plane node and set the --terminated-pod-gc-threshold to an appropriate threshold,
|
||||
for example, --terminated-pod-gc-threshold=10
|
||||
scored: false
|
||||
|
||||
- id: 1.3.2
|
||||
text: "Ensure that the --profiling argument is set to false (Automated)"
|
||||
audit: "/bin/ps -ef | grep $controllermanagerbin | grep -v grep"
|
||||
tests:
|
||||
test_items:
|
||||
- flag: "--profiling"
|
||||
compare:
|
||||
op: eq
|
||||
value: false
|
||||
remediation: |
|
||||
Edit the Controller Manager pod specification file $controllermanagerconf
|
||||
on the control plane node and set the below parameter.
|
||||
--profiling=false
|
||||
scored: true
|
||||
|
||||
- id: 1.3.3
|
||||
text: "Ensure that the --use-service-account-credentials argument is set to true (Automated)"
|
||||
audit: "/bin/ps -ef | grep $controllermanagerbin | grep -v grep"
|
||||
tests:
|
||||
test_items:
|
||||
- flag: "--use-service-account-credentials"
|
||||
compare:
|
||||
op: noteq
|
||||
value: false
|
||||
remediation: |
|
||||
Edit the Controller Manager pod specification file $controllermanagerconf
|
||||
on the control plane node to set the below parameter.
|
||||
--use-service-account-credentials=true
|
||||
scored: true
|
||||
|
||||
- id: 1.3.4
|
||||
text: "Ensure that the --service-account-private-key-file argument is set as appropriate (Automated)"
|
||||
audit: "/bin/ps -ef | grep $controllermanagerbin | grep -v grep"
|
||||
tests:
|
||||
test_items:
|
||||
- flag: "--service-account-private-key-file"
|
||||
remediation: |
|
||||
Edit the Controller Manager pod specification file $controllermanagerconf
|
||||
on the control plane node and set the --service-account-private-key-file parameter
|
||||
to the private key file for service accounts.
|
||||
--service-account-private-key-file=<filename>
|
||||
scored: true
|
||||
|
||||
- id: 1.3.5
|
||||
text: "Ensure that the --root-ca-file argument is set as appropriate (Automated)"
|
||||
audit: "/bin/ps -ef | grep $controllermanagerbin | grep -v grep"
|
||||
tests:
|
||||
test_items:
|
||||
- flag: "--root-ca-file"
|
||||
remediation: |
|
||||
Edit the Controller Manager pod specification file $controllermanagerconf
|
||||
on the control plane node and set the --root-ca-file parameter to the certificate bundle file`.
|
||||
--root-ca-file=<path/to/file>
|
||||
scored: true
|
||||
|
||||
- id: 1.3.6
|
||||
text: "Ensure that the RotateKubeletServerCertificate argument is set to true (Automated)"
|
||||
audit: "/bin/ps -ef | grep $controllermanagerbin | grep -v grep"
|
||||
tests:
|
||||
bin_op: or
|
||||
test_items:
|
||||
- flag: "--feature-gates"
|
||||
compare:
|
||||
op: nothave
|
||||
value: "RotateKubeletServerCertificate=false"
|
||||
set: true
|
||||
- flag: "--feature-gates"
|
||||
set: false
|
||||
remediation: |
|
||||
Edit the Controller Manager pod specification file $controllermanagerconf
|
||||
on the control plane node and set the --feature-gates parameter to include RotateKubeletServerCertificate=true.
|
||||
--feature-gates=RotateKubeletServerCertificate=true
|
||||
scored: true
|
||||
|
||||
- id: 1.3.7
|
||||
text: "Ensure that the --bind-address argument is set to 127.0.0.1 (Automated)"
|
||||
audit: "/bin/ps -ef | grep $controllermanagerbin | grep -v grep"
|
||||
tests:
|
||||
bin_op: or
|
||||
test_items:
|
||||
- flag: "--bind-address"
|
||||
compare:
|
||||
op: eq
|
||||
value: "127.0.0.1"
|
||||
- flag: "--bind-address"
|
||||
set: false
|
||||
remediation: |
|
||||
Edit the Controller Manager pod specification file $controllermanagerconf
|
||||
on the control plane node and ensure the correct value for the --bind-address parameter
|
||||
scored: true
|
||||
|
||||
- id: 1.4
|
||||
text: "Scheduler"
|
||||
checks:
|
||||
- id: 1.4.1
|
||||
text: "Ensure that the --profiling argument is set to false (Automated)"
|
||||
audit: "/bin/ps -ef | grep $schedulerbin | grep -v grep"
|
||||
tests:
|
||||
test_items:
|
||||
- flag: "--profiling"
|
||||
compare:
|
||||
op: eq
|
||||
value: false
|
||||
remediation: |
|
||||
Edit the Scheduler pod specification file $schedulerconf file
|
||||
on the control plane node and set the below parameter.
|
||||
--profiling=false
|
||||
scored: true
|
||||
|
||||
- id: 1.4.2
|
||||
text: "Ensure that the --bind-address argument is set to 127.0.0.1 (Automated)"
|
||||
audit: "/bin/ps -ef | grep $schedulerbin | grep -v grep"
|
||||
tests:
|
||||
bin_op: or
|
||||
test_items:
|
||||
- flag: "--bind-address"
|
||||
compare:
|
||||
op: eq
|
||||
value: "127.0.0.1"
|
||||
- flag: "--bind-address"
|
||||
set: false
|
||||
remediation: |
|
||||
Edit the Scheduler pod specification file $schedulerconf
|
||||
on the control plane node and ensure the correct value for the --bind-address parameter
|
||||
scored: true
|
||||
492
cfg/cis-1.12/node.yaml
Normal file
492
cfg/cis-1.12/node.yaml
Normal file
@@ -0,0 +1,492 @@
|
||||
---
|
||||
controls:
|
||||
version: "cis-1.12"
|
||||
id: 4
|
||||
text: "Worker Node Security Configuration"
|
||||
type: "node"
|
||||
groups:
|
||||
- id: 4.1
|
||||
text: "Worker Node Configuration Files"
|
||||
checks:
|
||||
- id: 4.1.1
|
||||
text: "Ensure that the kubelet service file permissions are set to 600 or more restrictive (Automated)"
|
||||
audit: '/bin/sh -c ''if test -e $kubeletsvc; then stat -c permissions=%a $kubeletsvc; fi'' '
|
||||
tests:
|
||||
test_items:
|
||||
- flag: "permissions"
|
||||
compare:
|
||||
op: bitmask
|
||||
value: "600"
|
||||
remediation: |
|
||||
Run the below command (based on the file location on your system) on the each worker node.
|
||||
For example, chmod 600 $kubeletsvc
|
||||
scored: true
|
||||
|
||||
- id: 4.1.2
|
||||
text: "Ensure that the kubelet service file ownership is set to root:root (Automated)"
|
||||
audit: '/bin/sh -c "if test -e $kubeletsvc; then stat -c %U:%G $kubeletsvc; else echo \"File not found\"; fi"'
|
||||
tests:
|
||||
bin_op: or
|
||||
test_items:
|
||||
- flag: root:root
|
||||
- flag: "File not found"
|
||||
remediation: |
|
||||
Run the below command (based on the file location on your system) on the each worker node.
|
||||
For example,
|
||||
chown root:root $kubeletsvc
|
||||
scored: true
|
||||
|
||||
- id: 4.1.3
|
||||
text: "If proxy kubeconfig file exists ensure permissions are set to 600 or more restrictive (Manual)"
|
||||
audit: '/bin/sh -c ''if test -e $proxykubeconfig; then stat -c permissions=%a $proxykubeconfig; fi'' '
|
||||
tests:
|
||||
bin_op: or
|
||||
test_items:
|
||||
- flag: "permissions"
|
||||
set: true
|
||||
compare:
|
||||
op: bitmask
|
||||
value: "600"
|
||||
remediation: |
|
||||
Run the below command (based on the file location on your system) on the each worker node.
|
||||
For example,
|
||||
chmod 600 $proxykubeconfig
|
||||
scored: false
|
||||
|
||||
- id: 4.1.4
|
||||
text: "If proxy kubeconfig file exists ensure ownership is set to root:root (Manual)"
|
||||
audit: '/bin/sh -c ''if test -e $proxykubeconfig; then stat -c %U:%G $proxykubeconfig; fi'' '
|
||||
tests:
|
||||
bin_op: or
|
||||
test_items:
|
||||
- flag: root:root
|
||||
remediation: |
|
||||
Run the below command (based on the file location on your system) on the each worker node.
|
||||
For example, chown root:root $proxykubeconfig
|
||||
scored: false
|
||||
|
||||
- id: 4.1.5
|
||||
text: "Ensure that the --kubeconfig kubelet.conf file permissions are set to 600 or more restrictive (Automated)"
|
||||
audit: '/bin/sh -c ''if test -e $kubeletkubeconfig; then stat -c permissions=%a $kubeletkubeconfig; fi'' '
|
||||
tests:
|
||||
test_items:
|
||||
- flag: "permissions"
|
||||
compare:
|
||||
op: bitmask
|
||||
value: "600"
|
||||
remediation: |
|
||||
Run the below command (based on the file location on your system) on the each worker node.
|
||||
For example,
|
||||
chmod 600 $kubeletkubeconfig
|
||||
scored: true
|
||||
|
||||
- id: 4.1.6
|
||||
text: "Ensure that the --kubeconfig kubelet.conf file ownership is set to root:root (Automated)"
|
||||
audit: '/bin/sh -c ''if test -e $kubeletkubeconfig; then stat -c %U:%G $kubeletkubeconfig; fi'' '
|
||||
tests:
|
||||
test_items:
|
||||
- flag: root:root
|
||||
remediation: |
|
||||
Run the below command (based on the file location on your system) on the each worker node.
|
||||
For example,
|
||||
chown root:root $kubeletkubeconfig
|
||||
scored: true
|
||||
|
||||
- id: 4.1.7
|
||||
text: "Ensure that the certificate authorities file permissions are set to 644 or more restrictive (Manual)"
|
||||
audit: |
|
||||
CAFILE=$(ps -ef | grep kubelet | grep -v apiserver | grep -- --client-ca-file= | awk -F '--client-ca-file=' '{print $2}' | awk '{print $1}' | uniq)
|
||||
if test -z $CAFILE; then CAFILE=$kubeletcafile; fi
|
||||
if test -e $CAFILE; then stat -c permissions=%a $CAFILE; fi
|
||||
tests:
|
||||
test_items:
|
||||
- flag: "permissions"
|
||||
compare:
|
||||
op: bitmask
|
||||
value: "644"
|
||||
remediation: |
|
||||
Run the following command to modify the file permissions of the
|
||||
--client-ca-file chmod 644 <filename>
|
||||
scored: false
|
||||
|
||||
- id: 4.1.8
|
||||
text: "Ensure that the client certificate authorities file ownership is set to root:root (Manual)"
|
||||
audit: |
|
||||
CAFILE=$(ps -ef | grep kubelet | grep -v apiserver | grep -- --client-ca-file= | awk -F '--client-ca-file=' '{print $2}' | awk '{print $1}' | uniq)
|
||||
if test -z $CAFILE; then CAFILE=$kubeletcafile; fi
|
||||
if test -e $CAFILE; then stat -c %U:%G $CAFILE; fi
|
||||
tests:
|
||||
test_items:
|
||||
- flag: root:root
|
||||
compare:
|
||||
op: eq
|
||||
value: root:root
|
||||
remediation: |
|
||||
Run the following command to modify the ownership of the --client-ca-file.
|
||||
chown root:root <filename>
|
||||
scored: false
|
||||
|
||||
- id: 4.1.9
|
||||
text: "If the kubelet config.yaml configuration file is being used validate permissions set to 600 or more restrictive (Automated)"
|
||||
audit: '/bin/sh -c ''if test -e $kubeletconf; then stat -c permissions=%a $kubeletconf; fi'' '
|
||||
tests:
|
||||
test_items:
|
||||
- flag: "permissions"
|
||||
compare:
|
||||
op: bitmask
|
||||
value: "600"
|
||||
remediation: |
|
||||
Run the following command (using the config file location identified in the Audit step)
|
||||
chmod 600 $kubeletconf
|
||||
scored: true
|
||||
|
||||
- id: 4.1.10
|
||||
text: "If the kubelet config.yaml configuration file is being used validate file ownership is set to root:root (Automated)"
|
||||
audit: '/bin/sh -c ''if test -e $kubeletconf; then stat -c %U:%G $kubeletconf; fi'' '
|
||||
tests:
|
||||
test_items:
|
||||
- flag: root:root
|
||||
remediation: |
|
||||
Run the following command (using the config file location identified in the Audit step)
|
||||
chown root:root $kubeletconf
|
||||
scored: true
|
||||
|
||||
- id: 4.2
|
||||
text: "Kubelet"
|
||||
checks:
|
||||
- id: 4.2.1
|
||||
text: "Ensure that the --anonymous-auth argument is set to false (Automated)"
|
||||
audit: "/bin/ps -fC $kubeletbin"
|
||||
audit_config: "/bin/cat $kubeletconf"
|
||||
tests:
|
||||
test_items:
|
||||
- flag: "--anonymous-auth"
|
||||
path: '{.authentication.anonymous.enabled}'
|
||||
compare:
|
||||
op: eq
|
||||
value: false
|
||||
remediation: |
|
||||
If using a Kubelet config file, edit the file to set `authentication: anonymous: enabled` to
|
||||
`false`.
|
||||
If using executable arguments, edit the kubelet service file
|
||||
$kubeletsvc on each worker node and
|
||||
set the below parameter in KUBELET_SYSTEM_PODS_ARGS variable.
|
||||
`--anonymous-auth=false`
|
||||
Based on your system, restart the kubelet service. For example,
|
||||
systemctl daemon-reload
|
||||
systemctl restart kubelet.service
|
||||
scored: true
|
||||
|
||||
- id: 4.2.2
|
||||
text: "Ensure that the --authorization-mode argument is not set to AlwaysAllow (Automated)"
|
||||
audit: "/bin/ps -fC $kubeletbin"
|
||||
audit_config: "/bin/cat $kubeletconf"
|
||||
tests:
|
||||
test_items:
|
||||
- flag: --authorization-mode
|
||||
path: '{.authorization.mode}'
|
||||
compare:
|
||||
op: nothave
|
||||
value: AlwaysAllow
|
||||
remediation: |
|
||||
If using a Kubelet config file, edit the file to set `authorization.mode` to Webhook. If
|
||||
using executable arguments, edit the kubelet service file
|
||||
$kubeletsvc on each worker node and
|
||||
set the below parameter in KUBELET_AUTHZ_ARGS variable.
|
||||
--authorization-mode=Webhook
|
||||
Based on your system, restart the kubelet service. For example,
|
||||
systemctl daemon-reload
|
||||
systemctl restart kubelet.service
|
||||
scored: true
|
||||
|
||||
- id: 4.2.3
|
||||
text: "Ensure that the --client-ca-file argument is set as appropriate (Automated)"
|
||||
audit: "/bin/ps -fC $kubeletbin"
|
||||
audit_config: "/bin/cat $kubeletconf"
|
||||
tests:
|
||||
test_items:
|
||||
- flag: --client-ca-file
|
||||
path: '{.authentication.x509.clientCAFile}'
|
||||
remediation: |
|
||||
If using a Kubelet config file, edit the file to set `authentication.x509.clientCAFile` to
|
||||
the location of the client CA file.
|
||||
If using command line arguments, edit the kubelet service file
|
||||
$kubeletsvc on each worker node and
|
||||
set the below parameter in KUBELET_AUTHZ_ARGS variable.
|
||||
--client-ca-file=<path/to/client-ca-file>
|
||||
Based on your system, restart the kubelet service. For example,
|
||||
systemctl daemon-reload
|
||||
systemctl restart kubelet.service
|
||||
scored: true
|
||||
|
||||
- id: 4.2.4
|
||||
text: "Verify that if defined, the --read-only-port argument is set to 0 (Manual)"
|
||||
audit: "/bin/ps -fC $kubeletbin"
|
||||
audit_config: "/bin/cat $kubeletconf"
|
||||
tests:
|
||||
bin_op: or
|
||||
test_items:
|
||||
- flag: "--read-only-port"
|
||||
path: '{.readOnlyPort}'
|
||||
compare:
|
||||
op: eq
|
||||
value: 0
|
||||
- flag: "--read-only-port"
|
||||
path: '{.readOnlyPort}'
|
||||
set: false
|
||||
remediation: |
|
||||
If using a Kubelet config file, edit the file to set `readOnlyPort` to 0.
|
||||
If using command line arguments, edit the kubelet service file
|
||||
$kubeletsvc on each worker node and
|
||||
set the below parameter in KUBELET_SYSTEM_PODS_ARGS variable.
|
||||
--read-only-port=0
|
||||
Based on your system, restart the kubelet service. For example,
|
||||
systemctl daemon-reload
|
||||
systemctl restart kubelet.service
|
||||
scored: false
|
||||
|
||||
- id: 4.2.5
|
||||
text: "Ensure that the --streaming-connection-idle-timeout argument is not set to 0 (Manual)"
|
||||
audit: "/bin/ps -fC $kubeletbin"
|
||||
audit_config: "/bin/cat $kubeletconf"
|
||||
tests:
|
||||
test_items:
|
||||
- flag: --streaming-connection-idle-timeout
|
||||
path: '{.streamingConnectionIdleTimeout}'
|
||||
compare:
|
||||
op: noteq
|
||||
value: 0
|
||||
- flag: --streaming-connection-idle-timeout
|
||||
path: '{.streamingConnectionIdleTimeout}'
|
||||
set: false
|
||||
bin_op: or
|
||||
remediation: |
|
||||
If using a Kubelet config file, edit the file to set `streamingConnectionIdleTimeout` to a
|
||||
value other than 0.
|
||||
If using command line arguments, edit the kubelet service file
|
||||
$kubeletsvc on each worker node and
|
||||
set the below parameter in KUBELET_SYSTEM_PODS_ARGS variable.
|
||||
--streaming-connection-idle-timeout=5m
|
||||
Based on your system, restart the kubelet service. For example,
|
||||
systemctl daemon-reload
|
||||
systemctl restart kubelet.service
|
||||
scored: false
|
||||
|
||||
- id: 4.2.6
|
||||
text: "Ensure that the --make-iptables-util-chains argument is set to true (Automated)"
|
||||
audit: "/bin/ps -fC $kubeletbin"
|
||||
audit_config: "/bin/cat $kubeletconf"
|
||||
tests:
|
||||
test_items:
|
||||
- flag: --make-iptables-util-chains
|
||||
path: '{.makeIPTablesUtilChains}'
|
||||
compare:
|
||||
op: eq
|
||||
value: true
|
||||
- flag: --make-iptables-util-chains
|
||||
path: '{.makeIPTablesUtilChains}'
|
||||
set: false
|
||||
bin_op: or
|
||||
remediation: |
|
||||
If using a Kubelet config file, edit the file to set `makeIPTablesUtilChains` to `true`.
|
||||
If using command line arguments, edit the kubelet service file
|
||||
$kubeletsvc on each worker node and
|
||||
remove the --make-iptables-util-chains argument from the
|
||||
KUBELET_SYSTEM_PODS_ARGS variable.
|
||||
Based on your system, restart the kubelet service. For example:
|
||||
systemctl daemon-reload
|
||||
systemctl restart kubelet.service
|
||||
scored: true
|
||||
|
||||
- id: 4.2.7
|
||||
text: "Ensure that the --hostname-override argument is not set (Manual)"
|
||||
# This is one of those properties that can only be set as a command line argument.
|
||||
# To check if the property is set as expected, we need to parse the kubelet command
|
||||
# instead reading the Kubelet Configuration file.
|
||||
audit: "/bin/ps -fC $kubeletbin"
|
||||
tests:
|
||||
test_items:
|
||||
- flag: --hostname-override
|
||||
set: false
|
||||
remediation: |
|
||||
Edit the kubelet service file $kubeletsvc
|
||||
on each worker node and remove the --hostname-override argument from the
|
||||
KUBELET_SYSTEM_PODS_ARGS variable.
|
||||
Based on your system, restart the kubelet service. For example,
|
||||
systemctl daemon-reload
|
||||
systemctl restart kubelet.service
|
||||
scored: false
|
||||
|
||||
- id: 4.2.8
|
||||
text: "Ensure that the eventRecordQPS argument is set to a level which ensures appropriate event capture (Manual)"
|
||||
audit: "/bin/ps -fC $kubeletbin"
|
||||
audit_config: "/bin/cat $kubeletconf"
|
||||
tests:
|
||||
test_items:
|
||||
- flag: --event-qps
|
||||
path: '{.eventRecordQPS}'
|
||||
compare:
|
||||
op: gte
|
||||
value: 0
|
||||
- flag: --event-qps
|
||||
path: '{.eventRecordQPS}'
|
||||
set: false
|
||||
bin_op: or
|
||||
remediation: |
|
||||
If using a Kubelet config file, edit the file to set `eventRecordQPS` to an appropriate level.
|
||||
If using command line arguments, edit the kubelet service file
|
||||
$kubeletsvc on each worker node and
|
||||
set the below parameter in KUBELET_SYSTEM_PODS_ARGS variable.
|
||||
Based on your system, restart the kubelet service. For example,
|
||||
systemctl daemon-reload
|
||||
systemctl restart kubelet.service
|
||||
scored: false
|
||||
|
||||
- id: 4.2.9
|
||||
text: "Ensure that the --tls-cert-file and --tls-private-key-file arguments are set as appropriate (Manual)"
|
||||
audit: "/bin/ps -fC $kubeletbin"
|
||||
audit_config: "/bin/cat $kubeletconf"
|
||||
tests:
|
||||
test_items:
|
||||
- flag: --tls-cert-file
|
||||
path: '{.tlsCertFile}'
|
||||
- flag: --tls-private-key-file
|
||||
path: '{.tlsPrivateKeyFile}'
|
||||
remediation: |
|
||||
If using a Kubelet config file, edit the file to set `tlsCertFile` to the location
|
||||
of the certificate file to use to identify this Kubelet, and `tlsPrivateKeyFile`
|
||||
to the location of the corresponding private key file.
|
||||
If using command line arguments, edit the kubelet service file
|
||||
$kubeletsvc on each worker node and
|
||||
set the below parameters in KUBELET_CERTIFICATE_ARGS variable.
|
||||
--tls-cert-file=<path/to/tls-certificate-file>
|
||||
--tls-private-key-file=<path/to/tls-key-file>
|
||||
Based on your system, restart the kubelet service. For example,
|
||||
systemctl daemon-reload
|
||||
systemctl restart kubelet.service
|
||||
scored: false
|
||||
|
||||
- id: 4.2.10
|
||||
text: "Ensure that the --rotate-certificates argument is not set to false (Automated)"
|
||||
audit: "/bin/ps -fC $kubeletbin"
|
||||
audit_config: "/bin/cat $kubeletconf"
|
||||
tests:
|
||||
test_items:
|
||||
- flag: --rotate-certificates
|
||||
path: '{.rotateCertificates}'
|
||||
compare:
|
||||
op: eq
|
||||
value: true
|
||||
- flag: --rotate-certificates
|
||||
path: '{.rotateCertificates}'
|
||||
set: false
|
||||
bin_op: or
|
||||
remediation: |
|
||||
If using a Kubelet config file, edit the file to add the line `rotateCertificates` to `true` or
|
||||
remove it altogether to use the default value.
|
||||
If using command line arguments, edit the kubelet service file
|
||||
$kubeletsvc on each worker node and
|
||||
remove --rotate-certificates=false argument from the KUBELET_CERTIFICATE_ARGS
|
||||
variable.
|
||||
Based on your system, restart the kubelet service. For example,
|
||||
systemctl daemon-reload
|
||||
systemctl restart kubelet.service
|
||||
scored: true
|
||||
|
||||
- id: 4.2.11
|
||||
text: "Verify that the RotateKubeletServerCertificate argument is set to true (Manual)"
|
||||
audit: "/bin/ps -fC $kubeletbin"
|
||||
audit_config: "/bin/cat $kubeletconf"
|
||||
tests:
|
||||
bin_op: or
|
||||
test_items:
|
||||
- flag: RotateKubeletServerCertificate
|
||||
path: '{.featureGates.RotateKubeletServerCertificate}'
|
||||
compare:
|
||||
op: nothave
|
||||
value: false
|
||||
- flag: RotateKubeletServerCertificate
|
||||
path: '{.featureGates.RotateKubeletServerCertificate}'
|
||||
set: false
|
||||
remediation: |
|
||||
Edit the kubelet service file $kubeletsvc
|
||||
on each worker node and set the below parameter in KUBELET_CERTIFICATE_ARGS variable.
|
||||
--feature-gates=RotateKubeletServerCertificate=true
|
||||
Based on your system, restart the kubelet service. For example:
|
||||
systemctl daemon-reload
|
||||
systemctl restart kubelet.service
|
||||
scored: false
|
||||
|
||||
- id: 4.2.12
|
||||
text: "Ensure that the Kubelet only makes use of Strong Cryptographic Ciphers (Manual)"
|
||||
audit: "/bin/ps -fC $kubeletbin"
|
||||
audit_config: "/bin/cat $kubeletconf"
|
||||
tests:
|
||||
test_items:
|
||||
- flag: --tls-cipher-suites
|
||||
path: '{range .tlsCipherSuites[:]}{}{'',''}{end}'
|
||||
compare:
|
||||
op: valid_elements
|
||||
value: TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384
|
||||
remediation: |
|
||||
If using a Kubelet config file, edit the file to set `tlsCipherSuites` to
|
||||
TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384
|
||||
or to a subset of these values.
|
||||
If using executable arguments, edit the kubelet service file
|
||||
$kubeletsvc on each worker node and
|
||||
set the --tls-cipher-suites parameter as follows, or to a subset of these values.
|
||||
--tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384
|
||||
Based on your system, restart the kubelet service. For example:
|
||||
systemctl daemon-reload
|
||||
systemctl restart kubelet.service
|
||||
scored: false
|
||||
|
||||
- id: 4.2.13
|
||||
text: "Ensure that a limit is set on pod PIDs (Manual)"
|
||||
audit: "/bin/ps -fC $kubeletbin"
|
||||
audit_config: "/bin/cat $kubeletconf"
|
||||
tests:
|
||||
test_items:
|
||||
- flag: --pod-max-pids
|
||||
path: '{.podPidsLimit}'
|
||||
remediation: |
|
||||
Decide on an appropriate level for this parameter and set it,
|
||||
either via the --pod-max-pids command line parameter or the PodPidsLimit configuration file setting.
|
||||
scored: false
|
||||
|
||||
- id: 4.2.14
|
||||
text: "Ensure that the --seccomp-default parameter is set to true (Manual)"
|
||||
audit: "/bin/ps -fC $kubeletbin"
|
||||
audit_config: "/bin/cat $kubeletconf"
|
||||
tests:
|
||||
test_items:
|
||||
- flag: --seccomp-default
|
||||
path: '{.seccompDefault}'
|
||||
remediation: |
|
||||
Set the parameter, either via the --seccomp-default command line parameter or the
|
||||
seccompDefault configuration file setting.
|
||||
By default the seccomp profile is not enabled.
|
||||
scored: false
|
||||
|
||||
- id: 4.3
|
||||
text: "kube-proxy"
|
||||
checks:
|
||||
- id: 4.3.1
|
||||
text: "Ensure that the kube-proxy metrics service is bound to localhost (Automated)"
|
||||
audit: "/bin/ps -fC $proxybin"
|
||||
audit_config: "/bin/sh -c 'if test -e $proxykubeconfig; then cat $proxykubeconfig; fi'"
|
||||
tests:
|
||||
bin_op: or
|
||||
test_items:
|
||||
- flag: "--metrics-bind-address"
|
||||
path: '{.metricsBindAddress}'
|
||||
compare:
|
||||
op: has
|
||||
value: "127.0.0.1"
|
||||
- flag: "--metrics-bind-address"
|
||||
path: '{.metricsBindAddress}'
|
||||
set: false
|
||||
remediation: |
|
||||
Modify or remove any values which bind the metrics service to a non-localhost address.
|
||||
The default value is 127.0.0.1:10249.
|
||||
scored: true
|
||||
515
cfg/cis-1.12/policies.yaml
Normal file
515
cfg/cis-1.12/policies.yaml
Normal file
@@ -0,0 +1,515 @@
|
||||
---
|
||||
controls:
|
||||
version: "cis-1.12"
|
||||
id: 5
|
||||
text: "Kubernetes Policies"
|
||||
type: "policies"
|
||||
groups:
|
||||
- id: 5.1
|
||||
text: "RBAC and Service Accounts"
|
||||
checks:
|
||||
- id: 5.1.1
|
||||
text: "Ensure that the cluster-admin role is only used where required (Manual)"
|
||||
audit: |
|
||||
kubectl get clusterrolebindings -o=custom-columns=NAME:.metadata.name,ROLE:.roleRef.name,SUBJECT:.subjects[*].name --no-headers | while read -r role_name role_binding subject
|
||||
do
|
||||
if [[ "${role_name}" != "cluster-admin" && "${role_binding}" == "cluster-admin" ]]; then
|
||||
is_compliant="false"
|
||||
else
|
||||
is_compliant="true"
|
||||
fi;
|
||||
echo "**role_name: ${role_name} role_binding: ${role_binding} subject: ${subject} is_compliant: ${is_compliant}"
|
||||
done
|
||||
use_multiple_values: true
|
||||
tests:
|
||||
test_items:
|
||||
- flag: "is_compliant"
|
||||
compare:
|
||||
op: eq
|
||||
value: true
|
||||
remediation: |
|
||||
Identify all clusterrolebindings to the cluster-admin role. Check if they are used and
|
||||
if they need this role or if they could use a role with fewer privileges.
|
||||
Where possible, first bind users to a lower privileged role and then remove the
|
||||
clusterrolebinding to the cluster-admin role : kubectl delete clusterrolebinding [name]
|
||||
Condition: is_compliant is false if rolename is not cluster-admin and rolebinding is cluster-admin.
|
||||
scored: false
|
||||
|
||||
- id: 5.1.2
|
||||
text: "Minimize access to secrets (Manual)"
|
||||
audit: "echo \"canGetListWatchSecretsAsSystemAuthenticated: $(kubectl auth can-i get,list,watch secrets --all-namespaces --as=system:authenticated)\""
|
||||
tests:
|
||||
test_items:
|
||||
- flag: "canGetListWatchSecretsAsSystemAuthenticated"
|
||||
compare:
|
||||
op: eq
|
||||
value: no
|
||||
remediation: |
|
||||
Where possible, remove get, list and watch access to Secret objects in the cluster.
|
||||
scored: false
|
||||
|
||||
- id: 5.1.3
|
||||
text: "Minimize wildcard use in Roles and ClusterRoles (Manual)"
|
||||
audit: |
|
||||
# Check Roles
|
||||
kubectl get roles --all-namespaces -o custom-columns=ROLE_NAMESPACE:.metadata.namespace,ROLE_NAME:.metadata.name --no-headers | while read -r role_namespace role_name
|
||||
do
|
||||
role_rules=$(kubectl get role -n "${role_namespace}" "${role_name}" -o=json | jq -c '.rules')
|
||||
if echo "${role_rules}" | grep -q "\[\"\*\"\]"; then
|
||||
role_is_compliant="false"
|
||||
else
|
||||
role_is_compliant="true"
|
||||
fi;
|
||||
echo "**role_name: ${role_name} role_namespace: ${role_namespace} role_rules: ${role_rules} role_is_compliant: ${role_is_compliant}"
|
||||
done
|
||||
|
||||
# Check ClusterRoles
|
||||
kubectl get clusterroles -o custom-columns=CLUSTERROLE_NAME:.metadata.name --no-headers | while read -r clusterrole_name
|
||||
do
|
||||
clusterrole_rules=$(kubectl get clusterrole "${clusterrole_name}" -o=json | jq -c '.rules')
|
||||
if echo "${clusterrole_rules}" | grep -q "\[\"\*\"\]"; then
|
||||
clusterrole_is_compliant="false"
|
||||
else
|
||||
clusterrole_is_compliant="true"
|
||||
fi;
|
||||
echo "**clusterrole_name: ${clusterrole_name} clusterrole_rules: ${clusterrole_rules} clusterrole_is_compliant: ${clusterrole_is_compliant}"
|
||||
done
|
||||
use_multiple_values: true
|
||||
tests:
|
||||
bin_op: or
|
||||
test_items:
|
||||
- flag: "role_is_compliant"
|
||||
compare:
|
||||
op: eq
|
||||
value: true
|
||||
set: true
|
||||
- flag: "clusterrole_is_compliant"
|
||||
compare:
|
||||
op: eq
|
||||
value: true
|
||||
set: true
|
||||
remediation: |
|
||||
Where possible replace any use of wildcards ["*"] in roles and clusterroles with specific
|
||||
objects or actions.
|
||||
Condition: role_is_compliant is false if ["*"] is found in rules.
|
||||
Condition: clusterrole_is_compliant is false if ["*"] is found in rules.
|
||||
scored: false
|
||||
|
||||
- id: 5.1.4
|
||||
text: "Minimize access to create pods (Manual)"
|
||||
audit: |
|
||||
echo "canCreatePodsAsSystemAuthenticated: $(kubectl auth can-i create pods --all-namespaces --as=system:authenticated)"
|
||||
tests:
|
||||
test_items:
|
||||
- flag: "canCreatePodsAsSystemAuthenticated"
|
||||
compare:
|
||||
op: eq
|
||||
value: no
|
||||
remediation: |
|
||||
Where possible, remove create access to pod objects in the cluster.
|
||||
scored: false
|
||||
|
||||
- id: 5.1.5
|
||||
text: "Ensure that default service accounts are not actively used (Manual)"
|
||||
audit: |
|
||||
kubectl get serviceaccount --all-namespaces --field-selector metadata.name=default -o=json | jq -r '.items[] | " namespace: \(.metadata.namespace), kind: \(.kind), name: \(.metadata.name), automountServiceAccountToken: \(.automountServiceAccountToken | if . == null then "notset" else . end )"' | xargs -L 1
|
||||
use_multiple_values: true
|
||||
tests:
|
||||
test_items:
|
||||
- flag: "automountServiceAccountToken"
|
||||
compare:
|
||||
op: eq
|
||||
value: false
|
||||
set: true
|
||||
remediation: |
|
||||
Create explicit service accounts wherever a Kubernetes workload requires specific access
|
||||
to the Kubernetes API server.
|
||||
Modify the configuration of each default service account to include this value
|
||||
`automountServiceAccountToken: false`.
|
||||
scored: false
|
||||
|
||||
- id: 5.1.6
|
||||
text: "Ensure that Service Account Tokens are only mounted where necessary (Manual)"
|
||||
audit: |
|
||||
kubectl get pods --all-namespaces -o custom-columns=POD_NAMESPACE:.metadata.namespace,POD_NAME:.metadata.name,POD_SERVICE_ACCOUNT:.spec.serviceAccount,POD_IS_AUTOMOUNTSERVICEACCOUNTTOKEN:.spec.automountServiceAccountToken --no-headers | while read -r pod_namespace pod_name pod_service_account pod_is_automountserviceaccounttoken
|
||||
do
|
||||
# Retrieve automountServiceAccountToken's value for ServiceAccount and Pod, set to notset if null or <none>.
|
||||
svacc_is_automountserviceaccounttoken=$(kubectl get serviceaccount -n "${pod_namespace}" "${pod_service_account}" -o json | jq -r '.automountServiceAccountToken' | sed -e 's/<none>/notset/g' -e 's/null/notset/g')
|
||||
pod_is_automountserviceaccounttoken=$(echo "${pod_is_automountserviceaccounttoken}" | sed -e 's/<none>/notset/g' -e 's/null/notset/g')
|
||||
if [ "${svacc_is_automountserviceaccounttoken}" = "false" ] && ( [ "${pod_is_automountserviceaccounttoken}" = "false" ] || [ "${pod_is_automountserviceaccounttoken}" = "notset" ] ); then
|
||||
is_compliant="true"
|
||||
elif [ "${svacc_is_automountserviceaccounttoken}" = "true" ] && [ "${pod_is_automountserviceaccounttoken}" = "false" ]; then
|
||||
is_compliant="true"
|
||||
else
|
||||
is_compliant="false"
|
||||
fi
|
||||
echo "**namespace: ${pod_namespace} pod_name: ${pod_name} service_account: ${pod_service_account} pod_is_automountserviceaccounttoken: ${pod_is_automountserviceaccounttoken} svacc_is_automountServiceAccountToken: ${svacc_is_automountserviceaccounttoken} is_compliant: ${is_compliant}"
|
||||
done
|
||||
use_multiple_values: true
|
||||
tests:
|
||||
test_items:
|
||||
- flag: "is_compliant"
|
||||
compare:
|
||||
op: eq
|
||||
value: true
|
||||
remediation: |
|
||||
Modify the definition of ServiceAccounts and Pods which do not need to mount service
|
||||
account tokens to disable it, with `automountServiceAccountToken: false`.
|
||||
If both the ServiceAccount and the Pod's .spec specify a value for automountServiceAccountToken, the Pod spec takes precedence.
|
||||
Condition: Pod is_compliant to true when
|
||||
- ServiceAccount is automountServiceAccountToken: false and Pod is automountServiceAccountToken: false or notset
|
||||
- ServiceAccount is automountServiceAccountToken: true notset and Pod is automountServiceAccountToken: false
|
||||
scored: false
|
||||
|
||||
- id: 5.1.7
|
||||
text: "Avoid use of system:masters group (Manual)"
|
||||
type: "manual"
|
||||
remediation: |
|
||||
Remove the system:masters group from all users in the cluster.
|
||||
scored: false
|
||||
|
||||
- id: 5.1.8
|
||||
text: "Limit use of the Bind, Impersonate and Escalate permissions in the Kubernetes cluster (Manual)"
|
||||
type: "manual"
|
||||
remediation: |
|
||||
Where possible, remove the impersonate, bind and escalate rights from subjects.
|
||||
scored: false
|
||||
|
||||
- id: 5.1.9
|
||||
text: "Minimize access to create persistent volumes (Manual)"
|
||||
type: "manual"
|
||||
remediation: |
|
||||
Where possible, remove create access to PersistentVolume objects in the cluster.
|
||||
scored: false
|
||||
|
||||
- id: 5.1.10
|
||||
text: "Minimize access to the proxy sub-resource of nodes (Manual)"
|
||||
type: "manual"
|
||||
remediation: |
|
||||
Where possible, remove access to the proxy sub-resource of node objects.
|
||||
scored: false
|
||||
|
||||
- id: 5.1.11
|
||||
text: "Minimize access to the approval sub-resource of certificatesigningrequests objects (Manual)"
|
||||
type: "manual"
|
||||
remediation: |
|
||||
Where possible, remove access to the approval sub-resource of certificatesigningrequests objects.
|
||||
scored: false
|
||||
|
||||
- id: 5.1.12
|
||||
text: "Minimize access to webhook configuration objects (Manual)"
|
||||
type: "manual"
|
||||
remediation: |
|
||||
Where possible, remove access to the validatingwebhookconfigurations or mutatingwebhookconfigurations objects
|
||||
scored: false
|
||||
|
||||
- id: 5.1.13
|
||||
text: "Minimize access to the service account token creation (Manual)"
|
||||
type: "manual"
|
||||
remediation: |
|
||||
Where possible, remove access to the token sub-resource of serviceaccount objects.
|
||||
scored: false
|
||||
|
||||
- id: 5.2
|
||||
text: "Pod Security Standards"
|
||||
checks:
|
||||
- id: 5.2.1
|
||||
text: "Ensure that the cluster has at least one active policy control mechanism in place (Manual)"
|
||||
type: "manual"
|
||||
remediation: |
|
||||
Ensure that either Pod Security Admission or an external policy control system is in place
|
||||
for every namespace which contains user workloads.
|
||||
scored: false
|
||||
|
||||
- id: 5.2.2
|
||||
text: "Minimize the admission of privileged containers (Manual)"
|
||||
audit: |
|
||||
kubectl get pods --all-namespaces -o custom-columns=POD_NAME:.metadata.name,POD_NAMESPACE:.metadata.namespace --no-headers | while read -r pod_name pod_namespace
|
||||
do
|
||||
# Retrieve container(s) for each Pod.
|
||||
kubectl get pod "${pod_name}" --namespace "${pod_namespace}" -o json | jq -c '.spec.containers[]' | while read -r container
|
||||
do
|
||||
# Retrieve container's name.
|
||||
container_name=$(echo ${container} | jq -r '.name')
|
||||
# Retrieve container's .securityContext.privileged value.
|
||||
container_privileged=$(echo ${container} | jq -r '.securityContext.privileged' | sed -e 's/null/notset/g')
|
||||
if [ "${container_privileged}" = "false" ] || [ "${container_privileged}" = "notset" ] ; then
|
||||
echo "***pod_name: ${pod_name} container_name: ${container_name} pod_namespace: ${pod_namespace} is_container_privileged: ${container_privileged} is_compliant: true"
|
||||
else
|
||||
echo "***pod_name: ${pod_name} container_name: ${container_name} pod_namespace: ${pod_namespace} is_container_privileged: ${container_privileged} is_compliant: false"
|
||||
fi
|
||||
done
|
||||
done
|
||||
use_multiple_values: true
|
||||
tests:
|
||||
test_items:
|
||||
- flag: "is_compliant"
|
||||
compare:
|
||||
op: eq
|
||||
value: true
|
||||
remediation: |
|
||||
Add policies to each namespace in the cluster which has user workloads to restrict the
|
||||
admission of privileged containers.
|
||||
Audit: the audit list all pods' containers to retrieve their .securityContext.privileged value.
|
||||
Condition: is_compliant is false if container's `.securityContext.privileged` is set to `true`.
|
||||
Default: by default, there are no restrictions on the creation of privileged containers.
|
||||
scored: false
|
||||
|
||||
- id: 5.2.3
|
||||
text: "Minimize the admission of containers wishing to share the host process ID namespace (Manual)"
|
||||
audit: |
|
||||
kubectl get pods --all-namespaces -o custom-columns=POD_NAME:.metadata.name,POD_NAMESPACE:.metadata.namespace --no-headers | while read -r pod_name pod_namespace
|
||||
do
|
||||
# Retrieve spec.hostPID for each pod.
|
||||
pod_hostpid=$(kubectl get pod "${pod_name}" --namespace "${pod_namespace}" -o jsonpath='{.spec.hostPID}' 2>/dev/null)
|
||||
if [ -z "${pod_hostpid}" ]; then
|
||||
pod_hostpid="false"
|
||||
echo "***pod_name: ${pod_name} pod_namespace: ${pod_namespace} is_pod_hostpid: ${pod_hostpid} is_compliant: true"
|
||||
else
|
||||
echo "***pod_name: ${pod_name} pod_namespace: ${pod_namespace} is_pod_hostpid: ${pod_hostpid} is_compliant: false"
|
||||
fi
|
||||
done
|
||||
use_multiple_values: true
|
||||
tests:
|
||||
test_items:
|
||||
- flag: "is_compliant"
|
||||
compare:
|
||||
op: eq
|
||||
value: true
|
||||
remediation: |
|
||||
Add policies to each namespace in the cluster which has user workloads to restrict the
|
||||
admission of `hostPID` containers.
|
||||
Audit: the audit retrieves each Pod' spec.hostPID.
|
||||
Condition: is_compliant is false if Pod's spec.hostPID is set to `true`.
|
||||
Default: by default, there are no restrictions on the creation of hostPID containers.
|
||||
scored: false
|
||||
|
||||
- id: 5.2.4
|
||||
text: "Minimize the admission of containers wishing to share the host IPC namespace (Manual)"
|
||||
audit: |
|
||||
kubectl get pods --all-namespaces -o custom-columns=POD_NAME:.metadata.name,POD_NAMESPACE:.metadata.namespace --no-headers | while read -r pod_name pod_namespace
|
||||
do
|
||||
# Retrieve spec.hostIPC for each pod.
|
||||
pod_hostipc=$(kubectl get pod "${pod_name}" --namespace "${pod_namespace}" -o jsonpath='{.spec.hostIPC}' 2>/dev/null)
|
||||
if [ -z "${pod_hostipc}" ]; then
|
||||
pod_hostipc="false"
|
||||
echo "***pod_name: ${pod_name} pod_namespace: ${pod_namespace} is_pod_hostipc: ${pod_hostipc} is_compliant: true"
|
||||
else
|
||||
echo "***pod_name: ${pod_name} pod_namespace: ${pod_namespace} is_pod_hostipc: ${pod_hostipc} is_compliant: false"
|
||||
fi
|
||||
done
|
||||
use_multiple_values: true
|
||||
tests:
|
||||
test_items:
|
||||
- flag: "is_compliant"
|
||||
compare:
|
||||
op: eq
|
||||
value: true
|
||||
remediation: |
|
||||
Add policies to each namespace in the cluster which has user workloads to restrict the
|
||||
admission of `hostIPC` containers.
|
||||
Audit: the audit retrieves each Pod' spec.IPC.
|
||||
Condition: is_compliant is false if Pod's spec.hostIPC is set to `true`.
|
||||
Default: by default, there are no restrictions on the creation of hostIPC containers.
|
||||
scored: false
|
||||
|
||||
- id: 5.2.5
|
||||
text: "Minimize the admission of containers wishing to share the host network namespace (Manual)"
|
||||
audit: |
|
||||
kubectl get pods --all-namespaces -o custom-columns=POD_NAME:.metadata.name,POD_NAMESPACE:.metadata.namespace --no-headers | while read -r pod_name pod_namespace
|
||||
do
|
||||
# Retrieve spec.hostNetwork for each pod.
|
||||
pod_hostnetwork=$(kubectl get pod "${pod_name}" --namespace "${pod_namespace}" -o jsonpath='{.spec.hostNetwork}' 2>/dev/null)
|
||||
if [ -z "${pod_hostnetwork}" ]; then
|
||||
pod_hostnetwork="false"
|
||||
echo "***pod_name: ${pod_name} pod_namespace: ${pod_namespace} is_pod_hostnetwork: ${pod_hostnetwork} is_compliant: true"
|
||||
else
|
||||
echo "***pod_name: ${pod_name} pod_namespace: ${pod_namespace} is_pod_hostnetwork: ${pod_hostnetwork} is_compliant: false"
|
||||
fi
|
||||
done
|
||||
use_multiple_values: true
|
||||
tests:
|
||||
test_items:
|
||||
- flag: "is_compliant"
|
||||
compare:
|
||||
op: eq
|
||||
value: true
|
||||
remediation: |
|
||||
Add policies to each namespace in the cluster which has user workloads to restrict the
|
||||
admission of `hostNetwork` containers.
|
||||
Audit: the audit retrieves each Pod' spec.hostNetwork.
|
||||
Condition: is_compliant is false if Pod's spec.hostNetwork is set to `true`.
|
||||
Default: by default, there are no restrictions on the creation of hostNetwork containers.
|
||||
scored: false
|
||||
|
||||
- id: 5.2.6
|
||||
text: "Minimize the admission of containers with allowPrivilegeEscalation (Manual)"
|
||||
audit: |
|
||||
kubectl get pods --all-namespaces -o custom-columns=POD_NAME:.metadata.name,POD_NAMESPACE:.metadata.namespace --no-headers | while read -r pod_name pod_namespace
|
||||
do
|
||||
# Retrieve container(s) for each Pod.
|
||||
kubectl get pod "${pod_name}" --namespace "${pod_namespace}" -o json | jq -c '.spec.containers[]' | while read -r container
|
||||
do
|
||||
# Retrieve container's name
|
||||
container_name=$(echo ${container} | jq -r '.name')
|
||||
# Retrieve container's .securityContext.allowPrivilegeEscalation
|
||||
container_allowprivesc=$(echo ${container} | jq -r '.securityContext.allowPrivilegeEscalation' | sed -e 's/null/notset/g')
|
||||
if [ "${container_allowprivesc}" = "false" ] || [ "${container_allowprivesc}" = "notset" ]; then
|
||||
echo "***pod_name: ${pod_name} container_name: ${container_name} pod_namespace: ${pod_namespace} is_container_allowprivesc: ${container_allowprivesc} is_compliant: true"
|
||||
else
|
||||
echo "***pod_name: ${pod_name} container_name: ${container_name} pod_namespace: ${pod_namespace} is_container_allowprivesc: ${container_allowprivesc} is_compliant: false"
|
||||
fi
|
||||
done
|
||||
done
|
||||
use_multiple_values: true
|
||||
tests:
|
||||
test_items:
|
||||
- flag: "is_compliant"
|
||||
compare:
|
||||
op: eq
|
||||
value: true
|
||||
remediation: |
|
||||
Add policies to each namespace in the cluster which has user workloads to restrict the
|
||||
admission of containers with `.securityContext.allowPrivilegeEscalation` set to `true`.
|
||||
Audit: the audit retrieves each Pod's container(s) `.securityContext.allowPrivilegeEscalation`.
|
||||
Condition: is_compliant is false if container's `.securityContext.allowPrivilegeEscalation` is set to `true`.
|
||||
Default: If notset, privilege escalation is allowed (default to true). However if PSP/PSA is used with a `restricted` profile,
|
||||
privilege escalation is explicitly disallowed unless configured otherwise.
|
||||
scored: false
|
||||
|
||||
- id: 5.2.7
|
||||
text: "Minimize the admission of root containers (Manual)"
|
||||
type: "manual"
|
||||
remediation: |
|
||||
Create a policy for each namespace in the cluster, ensuring that either `MustRunAsNonRoot`
|
||||
or `MustRunAs` with the range of UIDs not including 0, is set.
|
||||
scored: false
|
||||
|
||||
- id: 5.2.8
|
||||
text: "Minimize the admission of containers with the NET_RAW capability (Manual)"
|
||||
type: "manual"
|
||||
remediation: |
|
||||
Add policies to each namespace in the cluster which has user workloads to restrict the
|
||||
admission of containers with the `NET_RAW` capability.
|
||||
scored: false
|
||||
|
||||
- id: 5.2.9
|
||||
text: "Minimize the admission of containers with capabilities assigned (Manual)"
|
||||
type: "manual"
|
||||
remediation: |
|
||||
Review the use of capabilities in applications running on your cluster. Where a
|
||||
namespace contains applications which do not require any Linux capabilities to operate
|
||||
consider adding a policy which forbids the admission of containers which do not drop all
|
||||
capabilities.
|
||||
scored: false
|
||||
|
||||
- id: 5.2.10
|
||||
text: "Minimize the admission of Windows HostProcess containers (Manual)"
|
||||
type: "manual"
|
||||
remediation: |
|
||||
Add policies to each namespace in the cluster which has user workloads to restrict the
|
||||
admission of containers that have `.securityContext.windowsOptions.hostProcess` set to `true`.
|
||||
scored: false
|
||||
|
||||
- id: 5.2.11
|
||||
text: "Minimize the admission of HostPath volumes (Manual)"
|
||||
type: "manual"
|
||||
remediation: |
|
||||
Add policies to each namespace in the cluster which has user workloads to restrict the
|
||||
admission of containers with `hostPath` volumes.
|
||||
scored: false
|
||||
|
||||
- id: 5.2.12
|
||||
text: "Minimize the admission of containers which use HostPorts (Manual)"
|
||||
type: "manual"
|
||||
remediation: |
|
||||
Add policies to each namespace in the cluster which has user workloads to restrict the
|
||||
admission of containers which use `hostPort` sections.
|
||||
scored: false
|
||||
|
||||
- id: 5.3
|
||||
text: "Network Policies and CNI"
|
||||
checks:
|
||||
- id: 5.3.1
|
||||
text: "Ensure that the CNI in use supports NetworkPolicies (Manual)"
|
||||
type: "manual"
|
||||
remediation: |
|
||||
If the CNI plugin in use does not support network policies, consideration should be given to
|
||||
making use of a different plugin, or finding an alternate mechanism for restricting traffic
|
||||
in the Kubernetes cluster.
|
||||
scored: false
|
||||
|
||||
- id: 5.3.2
|
||||
text: "Ensure that all Namespaces have NetworkPolicies defined (Manual)"
|
||||
type: "manual"
|
||||
remediation: |
|
||||
Follow the documentation and create NetworkPolicy objects as you need them.
|
||||
scored: false
|
||||
|
||||
- id: 5.4
|
||||
text: "Secrets Management"
|
||||
checks:
|
||||
- id: 5.4.1
|
||||
text: "Prefer using Secrets as files over Secrets as environment variables (Manual)"
|
||||
type: "manual"
|
||||
remediation: |
|
||||
If possible, rewrite application code to read Secrets from mounted secret files, rather than
|
||||
from environment variables.
|
||||
scored: false
|
||||
|
||||
- id: 5.4.2
|
||||
text: "Consider external secret storage (Manual)"
|
||||
type: "manual"
|
||||
remediation: |
|
||||
Refer to the Secrets management options offered by your cloud provider or a third-party
|
||||
secrets management solution.
|
||||
scored: false
|
||||
|
||||
- id: 5.5
|
||||
text: "Extensible Admission Control"
|
||||
checks:
|
||||
- id: 5.5.1
|
||||
text: "Configure Image Provenance using ImagePolicyWebhook admission controller (Manual)"
|
||||
type: "manual"
|
||||
remediation: |
|
||||
Follow the Kubernetes documentation and setup image provenance.
|
||||
scored: false
|
||||
|
||||
- id: 5.6
|
||||
text: "General Policies"
|
||||
checks:
|
||||
- id: 5.6.1
|
||||
text: "Create administrative boundaries between resources using namespaces (Manual)"
|
||||
type: "manual"
|
||||
remediation: |
|
||||
Follow the documentation and create namespaces for objects in your deployment as you need
|
||||
them.
|
||||
scored: false
|
||||
|
||||
- id: 5.6.2
|
||||
text: "Ensure that the seccomp profile is set to docker/default in your Pod definitions (Manual)"
|
||||
type: "manual"
|
||||
remediation: |
|
||||
Use `securityContext` to enable the docker/default seccomp profile in your pod definitions.
|
||||
An example is as below:
|
||||
securityContext:
|
||||
seccompProfile:
|
||||
type: RuntimeDefault
|
||||
scored: false
|
||||
|
||||
- id: 5.6.3
|
||||
text: "Apply SecurityContext to your Pods and Containers (Manual)"
|
||||
type: "manual"
|
||||
remediation: |
|
||||
Follow the Kubernetes documentation and apply SecurityContexts to your Pods. For a
|
||||
suggested list of SecurityContexts, you may refer to the CIS Security Benchmark for Docker
|
||||
Containers.
|
||||
scored: false
|
||||
|
||||
- id: 5.6.4
|
||||
text: "The default namespace should not be used (Manual)"
|
||||
type: "manual"
|
||||
remediation: |
|
||||
Ensure that namespaces are created to allow for appropriate segregation of Kubernetes
|
||||
resources and that all new resources are created in a specific namespace.
|
||||
scored: false
|
||||
@@ -111,6 +111,9 @@ master:
|
||||
- /var/lib/rancher/k3s/server/db/etcd/config
|
||||
defaultconf: /etc/kubernetes/manifests/etcd.yaml
|
||||
defaultdatadir: /var/lib/etcd/default.etcd
|
||||
client_cert_file: /var/lib/rancher/rke2/server/tls/etcd/server-client.crt
|
||||
client_key_file: /var/lib/rancher/rke2/server/tls/etcd/server-client.key
|
||||
|
||||
|
||||
flanneld:
|
||||
optional: true
|
||||
@@ -136,12 +139,13 @@ node:
|
||||
|
||||
kubelet:
|
||||
cafile:
|
||||
- "/var/lib/rancher/rke2/agent/client-ca.crt"
|
||||
- "/var/lib/rancher/rke2/server/tls/server-ca.crt"
|
||||
- "/etc/kubernetes/pki/ca.crt"
|
||||
- "/etc/kubernetes/certs/ca.crt"
|
||||
- "/etc/kubernetes/cert/ca.pem"
|
||||
- "/var/snap/microk8s/current/certs/ca.crt"
|
||||
- "/var/lib/rancher/rke2/agent/server.crt"
|
||||
- "/var/lib/rancher/rke2/agent/client-ca.crt"
|
||||
- "/var/lib/rancher/k3s/agent/client-ca.crt"
|
||||
svc:
|
||||
# These paths must also be included
|
||||
@@ -169,6 +173,7 @@ node:
|
||||
- "/var/lib/rancher/k3s/agent/kubelet.kubeconfig"
|
||||
confs:
|
||||
- "/etc/kubernetes/kubelet-config.yaml"
|
||||
- "/var/lib/rancher/rke2/agent/etc/kubelet.conf.d"
|
||||
- "/var/lib/kubelet/config.yaml"
|
||||
- "/var/lib/kubelet/config.yml"
|
||||
- "/etc/kubernetes/kubelet/kubelet-config.json"
|
||||
@@ -239,6 +244,7 @@ etcd:
|
||||
- /var/lib/etcd/data.etcd
|
||||
- /var/lib/rancher/k3s/server/db/etcd
|
||||
confs:
|
||||
- /var/lib/rancher/rke2/server/db/etcd/config
|
||||
- /etc/kubernetes/manifests/etcd.yaml
|
||||
- /etc/kubernetes/manifests/etcd.yml
|
||||
- /etc/kubernetes/manifests/etcd.manifest
|
||||
@@ -287,11 +293,15 @@ version_mapping:
|
||||
"1.29": "cis-1.11"
|
||||
"1.30": "cis-1.11"
|
||||
"1.31": "cis-1.11"
|
||||
"1.32": "cis-1.11"
|
||||
"1.32": "cis-1.12"
|
||||
"1.33": "cis-1.12"
|
||||
"1.34": "cis-1.12"
|
||||
"eks-1.0.1": "eks-1.0.1"
|
||||
"eks-1.1.0": "eks-1.1.0"
|
||||
"eks-1.2.0": "eks-1.2.0"
|
||||
"eks-1.5.0": "eks-1.5.0"
|
||||
"eks-1.7.0": "eks-1.7.0"
|
||||
"eks-1.8.0": "eks-1.8.0"
|
||||
"gke-1.0": "gke-1.0"
|
||||
"gke-1.2.0": "gke-1.2.0"
|
||||
"gke-1.6.0": "gke-1.6.0"
|
||||
@@ -391,6 +401,12 @@ target_mapping:
|
||||
- "controlplane"
|
||||
- "etcd"
|
||||
- "policies"
|
||||
"cis-1.12":
|
||||
- "master"
|
||||
- "node"
|
||||
- "controlplane"
|
||||
- "etcd"
|
||||
- "policies"
|
||||
"gke-1.0":
|
||||
- "master"
|
||||
- "node"
|
||||
@@ -446,6 +462,12 @@ target_mapping:
|
||||
- "controlplane"
|
||||
- "policies"
|
||||
- "managedservices"
|
||||
"eks-1.8.0":
|
||||
- "master"
|
||||
- "node"
|
||||
- "controlplane"
|
||||
- "policies"
|
||||
- "managedservices"
|
||||
"rh-0.7":
|
||||
- "master"
|
||||
- "node"
|
||||
|
||||
9
cfg/eks-1.8.0/config.yaml
Normal file
9
cfg/eks-1.8.0/config.yaml
Normal file
@@ -0,0 +1,9 @@
|
||||
---
|
||||
## Version-specific settings that override the values in cfg/config.yaml
|
||||
## These settings are required if you are using the --asff option to report findings to AWS Security Hub
|
||||
## AWS account number is required.
|
||||
AWS_ACCOUNT: "<AWS_ACCT_NUMBER>"
|
||||
## AWS region is required.
|
||||
AWS_REGION: "<AWS_REGION>"
|
||||
## EKS Cluster ARN is required.
|
||||
CLUSTER_ARN: "<AWS_CLUSTER_ARN>"
|
||||
69
cfg/eks-1.8.0/controlplane.yaml
Normal file
69
cfg/eks-1.8.0/controlplane.yaml
Normal file
@@ -0,0 +1,69 @@
|
||||
---
|
||||
controls:
|
||||
version: "eks-1.8.0"
|
||||
id: 2
|
||||
text: "Control Plane Configuration"
|
||||
type: "controlplane"
|
||||
groups:
|
||||
- id: 2.1
|
||||
text: "Logging"
|
||||
checks:
|
||||
- id: 2.1.1
|
||||
text: "Enable audit Logs (Manual)"
|
||||
type: manual
|
||||
remediation: |
|
||||
From Console:
|
||||
1. For each EKS Cluster in each region;
|
||||
2. Go to 'Amazon EKS' > 'Clusters' > '' > 'Configuration' > 'Logging'.
|
||||
3. Click 'Manage logging'.
|
||||
4. Ensure that all options are toggled to 'Enabled'.
|
||||
API server: Enabled
|
||||
Audit: Enabled
|
||||
Authenticator: Enabled
|
||||
Controller manager: Enabled
|
||||
Scheduler: Enabled
|
||||
5. Click 'Save Changes'.
|
||||
|
||||
From CLI:
|
||||
# For each EKS Cluster in each region;
|
||||
aws eks update-cluster-config \
|
||||
--region '${REGION_CODE}' \
|
||||
--name '${CLUSTER_NAME}' \
|
||||
--logging '{"clusterLogging":[{"types":["api","audit","authenticator","controllerManager","scheduler"],"enabled":true}]}'
|
||||
scored: false
|
||||
|
||||
- id: 2.1.2
|
||||
text: "Ensure audit logs are collected and managed (Manual)"
|
||||
type: manual
|
||||
remediation: |
|
||||
Create or update the audit-policy.yaml to specify the audit logging configuration:
|
||||
apiVersion: audit.k8s.io/v1
|
||||
kind: Policy
|
||||
rules:
|
||||
- level: Metadata
|
||||
resources:
|
||||
- group: ""
|
||||
resources: ["pods"]
|
||||
Apply the audit policy configuration to the cluster:
|
||||
kubectl apply -f <path-to-audit-policy>.yaml
|
||||
Ensure audit logs are forwarded to a centralized logging system like CloudWatch, Elasticsearch, or another log management solution:
|
||||
kubectl create configmap cluster-audit-policy --from-file=audit-policy.yaml -n kube-system
|
||||
kubectl apply -f - <<EOF
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: audit-logging
|
||||
namespace: kube-system
|
||||
spec:
|
||||
containers:
|
||||
- name: audit-log-forwarder
|
||||
image: my-log-forwarder-image
|
||||
volumeMounts:
|
||||
- mountPath: /etc/kubernetes/audit
|
||||
name: audit-config
|
||||
volumes:
|
||||
- name: audit-config
|
||||
configMap:
|
||||
name: cluster-audit-policy
|
||||
EOF
|
||||
scored: false
|
||||
227
cfg/eks-1.8.0/managedservices.yaml
Normal file
227
cfg/eks-1.8.0/managedservices.yaml
Normal file
@@ -0,0 +1,227 @@
|
||||
---
|
||||
controls:
|
||||
version: "eks-1.8.0"
|
||||
id: 5
|
||||
text: "Managed Services"
|
||||
type: "managedservices"
|
||||
groups:
|
||||
- id: 5.1
|
||||
text: "Image Registry and Image Scanning"
|
||||
checks:
|
||||
- id: 5.1.1
|
||||
text: "Ensure Image Vulnerability Scanning using Amazon ECR image scanning or a third party provider (Manual)"
|
||||
type: "manual"
|
||||
remediation: |
|
||||
To utilize AWS ECR for Image scanning please follow the steps below:
|
||||
|
||||
To create a repository configured for scan on push (AWS CLI):
|
||||
aws ecr create-repository --repository-name $REPO_NAME --image-scanning-configuration scanOnPush=true --region $REGION_CODE
|
||||
|
||||
To edit the settings of an existing repository (AWS CLI):
|
||||
aws ecr put-image-scanning-configuration --repository-name $REPO_NAME --image-scanning-configuration scanOnPush=true --region $REGION_CODE
|
||||
|
||||
Use the following steps to start a manual image scan using the AWS Management Console.
|
||||
|
||||
1. Open the Amazon ECR console at https://console.aws.amazon.com/ecr/repositories.
|
||||
2. From the navigation bar, choose the Region to create your repository in.
|
||||
3. In the navigation pane, choose Repositories.
|
||||
4. On the Repositories page, choose the repository that contains the image to scan.
|
||||
5. On the Images page, select the image to scan and then choose Scan.
|
||||
scored: false
|
||||
|
||||
- id: 5.1.2
|
||||
text: "Minimize user access to Amazon ECR (Manual)"
|
||||
type: "manual"
|
||||
remediation: |
|
||||
Before you use IAM to manage access to Amazon ECR, you should understand what IAM features
|
||||
are available to use with Amazon ECR. To get a high-level view of how Amazon ECR and other
|
||||
AWS services work with IAM, see AWS Services That Work with IAM in the IAM User Guide.
|
||||
scored: false
|
||||
|
||||
- id: 5.1.3
|
||||
text: "Minimize cluster access to read-only for Amazon ECR (Manual)"
|
||||
type: "manual"
|
||||
remediation: |
|
||||
You can use your Amazon ECR images with Amazon EKS, but you need to satisfy the following prerequisites.
|
||||
|
||||
The Amazon EKS worker node IAM role (NodeInstanceRole) that you use with your worker nodes must possess
|
||||
the following IAM policy permissions for Amazon ECR.
|
||||
|
||||
{
|
||||
"Version": "2012-10-17",
|
||||
"Statement": [
|
||||
{
|
||||
"Effect": "Allow",
|
||||
"Action": [
|
||||
"ecr:BatchCheckLayerAvailability",
|
||||
"ecr:BatchGetImage",
|
||||
"ecr:GetDownloadUrlForLayer",
|
||||
"ecr:GetAuthorizationToken"
|
||||
],
|
||||
"Resource": "*"
|
||||
}
|
||||
]
|
||||
}
|
||||
scored: false
|
||||
|
||||
- id: 5.1.4
|
||||
text: "Minimize Container Registries to only those approved (Manual)"
|
||||
type: "manual"
|
||||
remediation: |
|
||||
To minimize AWS ECR container registries to only those approved, you can follow these steps:
|
||||
|
||||
1. Define your approval criteria: Determine the criteria that containers must meet to
|
||||
be considered approved. This can include factors such as security, compliance,
|
||||
compatibility, and other requirements.
|
||||
2. Identify all existing ECR registries: Identify all ECR registries that are currently
|
||||
being used in your organization.
|
||||
3. Evaluate ECR registries against approval criteria: Evaluate each ECR registry
|
||||
against your approval criteria to determine whether it should be approved or not.
|
||||
This can be done by reviewing the registry settings and configuration, as well as
|
||||
conducting security assessments and vulnerability scans.
|
||||
4. Establish policies and procedures: Establish policies and procedures that outline
|
||||
how ECR registries will be approved, maintained, and monitored. This should
|
||||
include guidelines for developers to follow when selecting a registry for their
|
||||
container images.
|
||||
5. Implement access controls: Implement access controls to ensure that only
|
||||
approved ECR registries are used to store and distribute container images. This
|
||||
can be done by setting up IAM policies and roles that restrict access to
|
||||
unapproved registries or create a whitelist of approved registries.
|
||||
6. Monitor and review: Continuously monitor and review the use of ECR registries
|
||||
to ensure that they continue to meet your approval criteria. This can include
|
||||
scored: false
|
||||
|
||||
- id: 5.2
|
||||
text: "Identity and Access Management (IAM)"
|
||||
checks:
|
||||
- id: 5.2.1
|
||||
text: "Prefer using dedicated Amazon EKS Service Accounts (Manual)"
|
||||
type: "manual"
|
||||
remediation: |
|
||||
With IAM roles for service accounts on Amazon EKS clusters, you can associate an
|
||||
IAM role with a Kubernetes service account. This service account can then provide
|
||||
AWS permissions to the containers in any pod that uses that service account. With this
|
||||
feature, you no longer need to provide extended permissions to the worker node IAM
|
||||
role so that pods on that node can call AWS APIs.
|
||||
Applications must sign their AWS API requests with AWS credentials. This feature
|
||||
provides a strategy for managing credentials for your applications, similar to the way
|
||||
that Amazon EC2 instance profiles provide credentials to Amazon EC2 instances.
|
||||
Instead of creating and distributing your AWS credentials to the containers or using the
|
||||
Amazon EC2 instance’s role, you can associate an IAM role with a Kubernetes service
|
||||
account. The applications in the pod’s containers can then use an AWS SDK or the
|
||||
AWS CLI to make API requests to authorized AWS services.
|
||||
|
||||
The IAM roles for service accounts feature provides the following benefits:
|
||||
|
||||
- Least privilege - By using the IAM roles for service accounts feature, you no
|
||||
longer need to provide extended permissions to the worker node IAM role so that
|
||||
pods on that node can call AWS APIs. You can scope IAM permissions to a
|
||||
service account, and only pods that use that service account have access to
|
||||
those permissions. This feature also eliminates the need for third-party solutions
|
||||
such as kiam or kube2iam.
|
||||
- Credential isolation - A container can only retrieve credentials for the IAM role
|
||||
that is associated with the service account to which it belongs. A container never
|
||||
has access to credentials that are intended for another container that belongs to
|
||||
another pod.
|
||||
- Audit-ability - Access and event logging is available through CloudTrail to help
|
||||
ensure retrospective auditing.
|
||||
scored: false
|
||||
|
||||
- id: 5.3
|
||||
text: "AWS EKS Key Management Service"
|
||||
checks:
|
||||
- id: 5.3.1
|
||||
text: "Ensure Kubernetes Secrets are encrypted using Customer Master Keys (CMKs) managed in AWS KMS (Manual)"
|
||||
type: "manual"
|
||||
remediation: |
|
||||
This process can only be performed during Cluster Creation.
|
||||
|
||||
Enable 'Secrets Encryption' during Amazon EKS cluster creation as described
|
||||
in the links within the 'References' section.
|
||||
scored: false
|
||||
|
||||
- id: 5.4
|
||||
text: "Cluster Networking"
|
||||
checks:
|
||||
- id: 5.4.1
|
||||
text: "Restrict Access to the Control Plane Endpoint (Manual)"
|
||||
type: "manual"
|
||||
remediation: |
|
||||
By enabling private endpoint access to the Kubernetes API server, all communication
|
||||
between your nodes and the API server stays within your VPC. You can also limit the IP
|
||||
addresses that can access your API server from the internet, or completely disable
|
||||
internet access to the API server.
|
||||
With this in mind, you can update your cluster accordingly using the AWS CLI to ensure
|
||||
that Private Endpoint Access is enabled.
|
||||
If you choose to also enable Public Endpoint Access then you should also configure a
|
||||
list of allowable CIDR blocks, resulting in restricted access from the internet. If you
|
||||
specify no CIDR blocks, then the public API server endpoint is able to receive and
|
||||
process requests from all IP addresses by defaulting to ['0.0.0.0/0'].
|
||||
For example, the following command would enable private access to the Kubernetes
|
||||
API as well as limited public access over the internet from a single IP address (noting
|
||||
the /32 CIDR suffix):
|
||||
aws eks update-cluster-config --region $AWS_REGION --name $CLUSTER_NAME --resources-vpc-config endpointPrivateAccess=true,publicAccessCidrs="203.0.113.5/32"
|
||||
|
||||
Note: The CIDR blocks specified cannot include reserved addresses.
|
||||
There is a maximum number of CIDR blocks that you can specify. For more information,
|
||||
see the EKS Service Quotas link in the references section.
|
||||
For more detailed information, see the EKS Cluster Endpoint documentation link in the
|
||||
references section.
|
||||
scored: false
|
||||
|
||||
- id: 5.4.2
|
||||
text: "Ensure clusters are created with Private Endpoint Enabled and Public Access Disabled (Manual)"
|
||||
type: "manual"
|
||||
remediation: |
|
||||
By enabling private endpoint access to the Kubernetes API server, all communication
|
||||
between your nodes and the API server stays within your VPC.
|
||||
With this in mind, you can update your cluster accordingly using the AWS CLI to ensure
|
||||
that Private Endpoint Access is enabled.
|
||||
For example, the following command would enable private access to the Kubernetes
|
||||
API and ensure that no public access is permitted:
|
||||
aws eks update-cluster-config --region $AWS_REGION --name $CLUSTER_NAME --resources-vpc-config endpointPrivateAccess=true,endpointPublicAccess=false
|
||||
|
||||
Note: For more detailed information, see the EKS Cluster Endpoint documentation link
|
||||
in the references section.
|
||||
scored: false
|
||||
|
||||
- id: 5.4.3
|
||||
text: "Ensure clusters are created with Private Nodes (Manual)"
|
||||
type: "manual"
|
||||
remediation: |
|
||||
aws eks update-cluster-config \
|
||||
--region region-code \
|
||||
--name my-cluster \
|
||||
--resources-vpc-config endpointPublicAccess=true,publicAccessCidrs="203.0.113.5/32",endpointPrivateAccess=true
|
||||
scored: false
|
||||
|
||||
- id: 5.4.4
|
||||
text: "Ensure Network Policy is Enabled and set as appropriate (Manual)"
|
||||
type: "manual"
|
||||
remediation: |
|
||||
Utilize Calico or other network policy engine to segment and isolate your traffic.
|
||||
scored: false
|
||||
|
||||
- id: 5.4.5
|
||||
text: "Encrypt traffic to HTTPS load balancers with TLS certificates (Manual)"
|
||||
type: "manual"
|
||||
remediation: |
|
||||
Your load balancer vendor can provide details on configuring HTTPS with TLS.
|
||||
scored: false
|
||||
|
||||
|
||||
- id: 5.5
|
||||
text: "Authentication and Authorization"
|
||||
checks:
|
||||
- id: 5.5.1
|
||||
text: "Manage Kubernetes RBAC users with AWS IAM Authenticator for Kubernetes or Upgrade to AWS CLI v1.16.156 or greater (Manual)"
|
||||
type: "manual"
|
||||
remediation: |
|
||||
Refer to the 'Managing users or IAM roles for your cluster' in Amazon EKS documentation.
|
||||
|
||||
Note: If using AWS CLI version 1.16.156 or later there is no need to install the AWS
|
||||
IAM Authenticator anymore.
|
||||
The relevant AWS CLI commands, depending on the use case, are:
|
||||
aws eks update-kubeconfig
|
||||
aws eks get-token
|
||||
scored: false
|
||||
6
cfg/eks-1.8.0/master.yaml
Normal file
6
cfg/eks-1.8.0/master.yaml
Normal file
@@ -0,0 +1,6 @@
|
||||
---
|
||||
controls:
|
||||
version: "eks-1.8.0"
|
||||
id: 1
|
||||
text: "Control Plane Components"
|
||||
type: "master"
|
||||
456
cfg/eks-1.8.0/node.yaml
Normal file
456
cfg/eks-1.8.0/node.yaml
Normal file
@@ -0,0 +1,456 @@
|
||||
---
|
||||
controls:
|
||||
version: "eks-1.8.0"
|
||||
id: 3
|
||||
text: "Worker Nodes"
|
||||
type: "node"
|
||||
groups:
|
||||
- id: 3.1
|
||||
text: "Worker Node Configuration Files"
|
||||
checks:
|
||||
- id: 3.1.1
|
||||
text: "Ensure that the kubeconfig file permissions are set to 644 or more restrictive (Automated)"
|
||||
audit: '/bin/sh -c ''if test -e $kubeletkubeconfig; then stat -c permissions=%a $kubeletkubeconfig; fi'' '
|
||||
tests:
|
||||
test_items:
|
||||
- flag: "permissions"
|
||||
compare:
|
||||
op: bitmask
|
||||
value: "644"
|
||||
remediation: |
|
||||
Run the below command (based on the file location on your system) on the each worker node.
|
||||
For example,
|
||||
chmod 644 $kubeletkubeconfig
|
||||
scored: true
|
||||
|
||||
- id: 3.1.2
|
||||
text: "Ensure that the kubelet kubeconfig file ownership is set to root:root (Automated)"
|
||||
audit: '/bin/sh -c ''if test -e $kubeletkubeconfig; then stat -c %U:%G $kubeletkubeconfig; fi'' '
|
||||
tests:
|
||||
test_items:
|
||||
- flag: root:root
|
||||
remediation: |
|
||||
Run the below command (based on the file location on your system) on the each worker node.
|
||||
For example,
|
||||
chown root:root $kubeletkubeconfig
|
||||
scored: true
|
||||
|
||||
- id: 3.1.3
|
||||
text: "Ensure that the kubelet configuration file has permissions set to 644 or more restrictive (Automated)"
|
||||
audit: '/bin/sh -c ''if test -e $kubeletconf; then stat -c permissions=%a $kubeletconf; fi'' '
|
||||
tests:
|
||||
test_items:
|
||||
- flag: "permissions"
|
||||
compare:
|
||||
op: bitmask
|
||||
value: "644"
|
||||
remediation: |
|
||||
Run the following command (using the config file location identified in the Audit step)
|
||||
chmod 644 $kubeletconf
|
||||
scored: true
|
||||
|
||||
- id: 3.1.4
|
||||
text: "Ensure that the kubelet configuration file ownership is set to root:root (Automated)"
|
||||
audit: '/bin/sh -c ''if test -e $kubeletconf; then stat -c %U:%G $kubeletconf; fi'' '
|
||||
tests:
|
||||
test_items:
|
||||
- flag: root:root
|
||||
remediation: |
|
||||
Run the following command (using the config file location identified in the Audit step)
|
||||
chown root:root $kubeletconf
|
||||
scored: true
|
||||
|
||||
- id: 3.2
|
||||
text: "Kubelet"
|
||||
checks:
|
||||
- id: 3.2.1
|
||||
text: "Ensure that the Anonymous Auth is Not Enabled (Automated)"
|
||||
audit: "/bin/ps -fC $kubeletbin"
|
||||
audit_config: "/bin/cat $kubeletconf"
|
||||
tests:
|
||||
test_items:
|
||||
- flag: "--anonymous-auth"
|
||||
path: '{.authentication.anonymous.enabled}'
|
||||
set: true
|
||||
compare:
|
||||
op: eq
|
||||
value: false
|
||||
remediation: |
|
||||
Remediation Method 1:
|
||||
If configuring via the Kubelet config file, you first need to locate the file.
|
||||
To do this, SSH to each node and execute the following command to find the kubelet
|
||||
process:
|
||||
ps -ef | grep kubelet
|
||||
The output of the above command provides details of the active kubelet process, from
|
||||
which we can see the location of the configuration file provided to the kubelet service
|
||||
with the --config argument. The file can be viewed with a command such as more or
|
||||
less, like so:
|
||||
sudo less /path/to/kubelet-config.json
|
||||
Disable Anonymous Authentication by setting the following parameter:
|
||||
"authentication": { "anonymous": { "enabled": false } }
|
||||
|
||||
Remediation Method 2.
|
||||
If using executable arguments, edit the kubelet service file on each worker node and
|
||||
ensure the below parameters are part of the KUBELET_ARGS variable string.
|
||||
For systems using systemd, such as the Amazon EKS Optimised Amazon Linux or
|
||||
Bottlerocket AMIs, then this file can be found at
|
||||
/etc/systemd/system/kubelet.service.d/10-kubelet-args.conf. Otherwise,
|
||||
you may need to look up documentation for your chosen operating system to determine
|
||||
which service manager is configured:
|
||||
--anonymous-auth=false
|
||||
|
||||
For Both Remediation Steps:
|
||||
Based on your system, restart the kubelet service and check the service status.
|
||||
The following example is for operating systems using systemd, such as the Amazon
|
||||
EKS Optimised Amazon Linux or Bottlerocket AMIs, and invokes the systemctl
|
||||
command. If systemctl is not available then you will need to look up documentation for
|
||||
your chosen operating system to determine which service manager is configured:
|
||||
systemctl daemon-reload
|
||||
systemctl restart kubelet.service
|
||||
systemctl status kubelet -l
|
||||
scored: true
|
||||
|
||||
- id: 3.2.2
|
||||
text: "Ensure that the --authorization-mode argument is not set to AlwaysAllow (Automated)"
|
||||
audit: "/bin/ps -fC $kubeletbin"
|
||||
audit_config: "/bin/cat $kubeletconf"
|
||||
tests:
|
||||
test_items:
|
||||
- flag: --authorization-mode
|
||||
path: '{.authorization.mode}'
|
||||
set: true
|
||||
compare:
|
||||
op: nothave
|
||||
value: AlwaysAllow
|
||||
remediation: |
|
||||
Remediation Method 1:
|
||||
If configuring via the Kubelet config file, you first need to locate the file.
|
||||
To do this, SSH to each node and execute the following command to find the kubelet
|
||||
process:
|
||||
ps -ef | grep kubelet
|
||||
The output of the above command provides details of the active kubelet process, from
|
||||
which we can see the location of the configuration file provided to the kubelet service
|
||||
with the --config argument. The file can be viewed with a command such as more or
|
||||
less, like so:
|
||||
sudo less /path/to/kubelet-config.json
|
||||
Enable Webhook Authentication by setting the following parameter:
|
||||
"authentication": { "webhook": { "enabled": true } }
|
||||
Next, set the Authorization Mode to Webhook by setting the following parameter:
|
||||
"authorization": { "mode": "Webhook }
|
||||
Finer detail of the authentication and authorization fields can be found in the
|
||||
Kubelet Configuration documentation.
|
||||
|
||||
Remediation Method 2:
|
||||
If using executable arguments, edit the kubelet service file on each worker node and
|
||||
ensure the below parameters are part of the KUBELET_ARGS variable string.
|
||||
For systems using systemd, such as the Amazon EKS Optimised Amazon Linux or
|
||||
Bottlerocket AMIs, then this file can be found at
|
||||
/etc/systemd/system/kubelet.service.d/10-kubelet-args.conf. Otherwise,
|
||||
you may need to look up documentation for your chosen operating system to determine
|
||||
which service manager is configured:
|
||||
--authentication-token-webhook
|
||||
--authorization-mode=Webhook
|
||||
|
||||
For Both Remediation Steps:
|
||||
Based on your system, restart the kubelet service and check the service status.
|
||||
The following example is for operating systems using systemd, such as the Amazon
|
||||
EKS Optimised Amazon Linux or Bottlerocket AMIs, and invokes the systemctl
|
||||
command. If systemctl is not available then you will need to look up documentation for
|
||||
your chosen operating system to determine which service manager is configured:
|
||||
systemctl daemon-reload
|
||||
systemctl restart kubelet.service
|
||||
systemctl status kubelet -l
|
||||
scored: true
|
||||
|
||||
- id: 3.2.3
|
||||
text: "Ensure that a Client CA File is Configured (Automated)"
|
||||
audit: "/bin/ps -fC $kubeletbin"
|
||||
audit_config: "/bin/cat $kubeletconf"
|
||||
tests:
|
||||
test_items:
|
||||
- flag: --client-ca-file
|
||||
path: '{.authentication.x509.clientCAFile}'
|
||||
set: true
|
||||
remediation: |
|
||||
Remediation Method 1:
|
||||
If configuring via the Kubelet config file, you first need to locate the file.
|
||||
To do this, SSH to each node and execute the following command to find the kubelet
|
||||
process:
|
||||
ps -ef | grep kubelet
|
||||
The output of the above command provides details of the active kubelet process, from
|
||||
which we can see the location of the configuration file provided to the kubelet service
|
||||
with the --config argument. The file can be viewed with a command such as more or
|
||||
less, like so:
|
||||
sudo less /path/to/kubelet-config.json
|
||||
Configure the client certificate authority file by setting the following parameter
|
||||
appropriately:
|
||||
"authentication": { "x509": {"clientCAFile": <path/to/client-ca-file> } }"
|
||||
|
||||
Remediation Method 2:
|
||||
If using executable arguments, edit the kubelet service file on each worker node and
|
||||
ensure the below parameters are part of the KUBELET_ARGS variable string.
|
||||
For systems using systemd, such as the Amazon EKS Optimised Amazon Linux or
|
||||
Bottlerocket AMIs, then this file can be found at
|
||||
/etc/systemd/system/kubelet.service.d/10-kubelet-args.conf. Otherwise,
|
||||
you may need to look up documentation for your chosen operating system to determine
|
||||
which service manager is configured:
|
||||
--client-ca-file=<path/to/client-ca-file>
|
||||
|
||||
For Both Remediation Steps:
|
||||
Based on your system, restart the kubelet service and check the service status.
|
||||
The following example is for operating systems using systemd, such as the Amazon
|
||||
EKS Optimised Amazon Linux or Bottlerocket AMIs, and invokes the systemctl
|
||||
command. If systemctl is not available then you will need to look up documentation for
|
||||
your chosen operating system to determine which service manager is configured:
|
||||
systemctl daemon-reload
|
||||
systemctl restart kubelet.service
|
||||
systemctl status kubelet -l
|
||||
scored: true
|
||||
|
||||
- id: 3.2.4
|
||||
text: "Ensure that the --read-only-port is disabled (Automated)"
|
||||
audit: "/bin/ps -fC $kubeletbin"
|
||||
audit_config: "/bin/cat $kubeletconf"
|
||||
tests:
|
||||
test_items:
|
||||
- flag: "--read-only-port"
|
||||
path: '{.readOnlyPort}'
|
||||
set: true
|
||||
compare:
|
||||
op: eq
|
||||
value: 0
|
||||
remediation: |
|
||||
If modifying the Kubelet config file, edit the kubelet-config.json file
|
||||
/etc/kubernetes/kubelet/kubelet-config.json and set the below parameter to 0
|
||||
"readOnlyPort": 0
|
||||
If using executable arguments, edit the kubelet service file
|
||||
/etc/systemd/system/kubelet.service.d/10-kubelet-args.conf on each
|
||||
worker node and add the below parameter at the end of the KUBELET_ARGS variable
|
||||
string.
|
||||
--read-only-port=0
|
||||
|
||||
Based on your system, restart the kubelet service and check status
|
||||
systemctl daemon-reload
|
||||
systemctl restart kubelet.service
|
||||
systemctl status kubelet -l
|
||||
scored: true
|
||||
|
||||
- id: 3.2.5
|
||||
text: "Ensure that the --streaming-connection-idle-timeout argument is not set to 0 (Automated)"
|
||||
audit: "/bin/ps -fC $kubeletbin"
|
||||
audit_config: "/bin/cat $kubeletconf"
|
||||
tests:
|
||||
test_items:
|
||||
- flag: --streaming-connection-idle-timeout
|
||||
path: '{.streamingConnectionIdleTimeout}'
|
||||
set: true
|
||||
compare:
|
||||
op: noteq
|
||||
value: 0
|
||||
- flag: --streaming-connection-idle-timeout
|
||||
path: '{.streamingConnectionIdleTimeout}'
|
||||
set: false
|
||||
bin_op: or
|
||||
remediation: |
|
||||
Remediation Method 1:
|
||||
If modifying the Kubelet config file, edit the kubelet-config.json file
|
||||
/etc/kubernetes/kubelet/kubelet-config.json and set the below parameter to a
|
||||
non-zero value in the format of #h#m#s
|
||||
"streamingConnectionIdleTimeout": "4h0m0s"
|
||||
You should ensure that the kubelet service file
|
||||
/etc/systemd/system/kubelet.service.d/10-kubelet-args.conf does not
|
||||
specify a --streaming-connection-idle-timeout argument because it would
|
||||
override the Kubelet config file.
|
||||
|
||||
Remediation Method 2:
|
||||
If using executable arguments, edit the kubelet service file
|
||||
/etc/systemd/system/kubelet.service.d/10-kubelet-args.conf on each
|
||||
worker node and add the below parameter at the end of the KUBELET_ARGS variable
|
||||
string.
|
||||
--streaming-connection-idle-timeout=4h0m0s
|
||||
|
||||
Remediation Method 3:
|
||||
If using the api configz endpoint consider searching for the status of
|
||||
"streamingConnectionIdleTimeout": by extracting the live configuration from the
|
||||
nodes running kubelet.
|
||||
**See detailed step-by-step configmap procedures in Reconfigure a Node's Kubelet in a
|
||||
Live Cluster, and then rerun the curl statement from audit process to check for kubelet
|
||||
configuration changes
|
||||
kubectl proxy --port=8001 &
|
||||
export HOSTNAME_PORT=localhost:8001 (example host and port number)
|
||||
export NODE_NAME=ip-192.168.31.226.ec2.internal (example node name from "kubectl get nodes")
|
||||
curl -sSL "http://${HOSTNAME_PORT}/api/v1/nodes/${NODE_NAME}/proxy/configz"
|
||||
|
||||
For all three remediations:
|
||||
Based on your system, restart the kubelet service and check status
|
||||
systemctl daemon-reload
|
||||
systemctl restart kubelet.service
|
||||
systemctl status kubelet -l
|
||||
scored: true
|
||||
|
||||
- id: 3.2.6
|
||||
text: "Ensure that the --make-iptables-util-chains argument is set to true (Automated)"
|
||||
audit: "/bin/ps -fC $kubeletbin"
|
||||
audit_config: "/bin/cat $kubeletconf"
|
||||
tests:
|
||||
test_items:
|
||||
- flag: --make-iptables-util-chains
|
||||
path: '{.makeIPTablesUtilChains}'
|
||||
set: true
|
||||
compare:
|
||||
op: eq
|
||||
value: true
|
||||
- flag: --make-iptables-util-chains
|
||||
path: '{.makeIPTablesUtilChains}'
|
||||
set: false
|
||||
bin_op: or
|
||||
remediation: |
|
||||
Remediation Method 1:
|
||||
If modifying the Kubelet config file, edit the kubelet-config.json file
|
||||
/etc/kubernetes/kubelet/kubelet-config.json and set the below parameter to
|
||||
true
|
||||
"makeIPTablesUtilChains": true
|
||||
Ensure that /etc/systemd/system/kubelet.service.d/10-kubelet-args.conf
|
||||
does not set the --make-iptables-util-chains argument because that would
|
||||
override your Kubelet config file.
|
||||
|
||||
Remediation Method 2:
|
||||
If using executable arguments, edit the kubelet service file
|
||||
/etc/systemd/system/kubelet.service.d/10-kubelet-args.conf on each
|
||||
worker node and add the below parameter at the end of the KUBELET_ARGS variable
|
||||
string.
|
||||
--make-iptables-util-chains:true
|
||||
|
||||
Remediation Method 3:
|
||||
If using the api configz endpoint consider searching for the status of
|
||||
"makeIPTablesUtilChains.: true by extracting the live configuration from the nodes
|
||||
running kubelet.
|
||||
**See detailed step-by-step configmap procedures in Reconfigure a Node's Kubelet in a
|
||||
Live Cluster, and then rerun the curl statement from audit process to check for kubelet
|
||||
configuration changes
|
||||
kubectl proxy --port=8001 &
|
||||
export HOSTNAME_PORT=localhost:8001 (example host and port number)
|
||||
export NODE_NAME=ip-192.168.31.226.ec2.internal (example node name from "kubectl get nodes")
|
||||
curl -sSL "http://${HOSTNAME_PORT}/api/v1/nodes/${NODE_NAME}/proxy/configz"
|
||||
|
||||
For all three remediations:
|
||||
Based on your system, restart the kubelet service and check status
|
||||
systemctl daemon-reload
|
||||
systemctl restart kubelet.service
|
||||
systemctl status kubelet -l
|
||||
scored: true
|
||||
|
||||
- id: 3.2.7
|
||||
text: "Ensure that the --eventRecordQPS argument is set to 0 or a level which ensures appropriate event capture (Automated)"
|
||||
audit: "/bin/ps -fC $kubeletbin"
|
||||
audit_config: "/bin/cat $kubeletconf"
|
||||
tests:
|
||||
test_items:
|
||||
- flag: --event-qps
|
||||
path: '{.eventRecordQPS}'
|
||||
set: true
|
||||
compare:
|
||||
op: gte
|
||||
value: 0
|
||||
- flag: --event-qps
|
||||
path: '{.eventRecordQPS}'
|
||||
set: false
|
||||
bin_op: or
|
||||
remediation: |
|
||||
If using a Kubelet config file, edit the file to set eventRecordQPS: to an appropriate
|
||||
level.
|
||||
If using command line arguments, edit the kubelet service file
|
||||
/etc/systemd/system/kubelet.service.d/10-kubeadm.conf on each worker node
|
||||
and set the below parameter in KUBELET_SYSTEM_PODS_ARGS variable.
|
||||
Based on your system, restart the kubelet service. For example:
|
||||
systemctl daemon-reload
|
||||
systemctl restart kubelet.service
|
||||
scored: true
|
||||
|
||||
- id: 3.2.8
|
||||
text: "Ensure that the --rotate-certificates argument is not present or is set to true (Automated)"
|
||||
audit: "/bin/ps -fC $kubeletbin"
|
||||
audit_config: "/bin/cat $kubeletconf"
|
||||
tests:
|
||||
test_items:
|
||||
- flag: --rotate-certificates
|
||||
path: '{.rotateCertificates}'
|
||||
compare:
|
||||
op: eq
|
||||
value: true
|
||||
- flag: --rotate-certificates
|
||||
path: '{.rotateCertificates}'
|
||||
set: false
|
||||
bin_op: or
|
||||
remediation: |
|
||||
Remediation Method 1:
|
||||
If modifying the Kubelet config file, edit the kubelet-config.json file
|
||||
/etc/kubernetes/kubelet/kubelet-config.json and set the below parameter to
|
||||
true
|
||||
"RotateCertificate":true
|
||||
Additionally, ensure that the kubelet service file
|
||||
/etc/systemd/system/kubelet.service.d/10-kubelet-args.conf does not set the --RotateCertificate
|
||||
executable argument to false because this would override the Kubelet
|
||||
config file.
|
||||
|
||||
Remediation Method 2:
|
||||
If using executable arguments, edit the kubelet service file
|
||||
/etc/systemd/system/kubelet.service.d/10-kubelet-args.conf on each
|
||||
worker node and add the below parameter at the end of the KUBELET_ARGS variable
|
||||
string.
|
||||
--rotate-certificates=true
|
||||
scored: true
|
||||
|
||||
- id: 3.2.9
|
||||
text: "Ensure that the RotateKubeletServerCertificate argument is set to true (Automated)"
|
||||
audit: "/bin/ps -fC $kubeletbin"
|
||||
audit_config: "/bin/cat $kubeletconf"
|
||||
tests:
|
||||
test_items:
|
||||
- flag: RotateKubeletServerCertificate
|
||||
path: '{.featureGates.RotateKubeletServerCertificate}'
|
||||
set: true
|
||||
compare:
|
||||
op: eq
|
||||
value: true
|
||||
remediation: |
|
||||
Remediation Method 1:
|
||||
If modifying the Kubelet config file, edit the kubelet-config.json file
|
||||
/etc/kubernetes/kubelet/kubelet-config.json and set the below parameter to
|
||||
true
|
||||
|
||||
"featureGates": {
|
||||
"RotateKubeletServerCertificate":true
|
||||
},
|
||||
|
||||
Additionally, ensure that the kubelet service file
|
||||
/etc/systemd/system/kubelet.service.d/10-kubelet-args.conf does not set
|
||||
the --rotate-kubelet-server-certificate executable argument to false because
|
||||
this would override the Kubelet config file.
|
||||
|
||||
Remediation Method 2:
|
||||
If using executable arguments, edit the kubelet service file
|
||||
/etc/systemd/system/kubelet.service.d/10-kubelet-args.conf on each
|
||||
worker node and add the below parameter at the end of the KUBELET_ARGS variable
|
||||
string.
|
||||
--rotate-kubelet-server-certificate=true
|
||||
|
||||
Remediation Method 3:
|
||||
If using the api configz endpoint consider searching for the status of
|
||||
"RotateKubeletServerCertificate": by extracting the live configuration from the
|
||||
nodes running kubelet.
|
||||
**See detailed step-by-step configmap procedures in Reconfigure a Node's Kubelet in a
|
||||
Live Cluster, and then rerun the curl statement from audit process to check for kubelet
|
||||
configuration changes
|
||||
kubectl proxy --port=8001 &
|
||||
export HOSTNAME_PORT=localhost:8001 (example host and port number)
|
||||
export NODE_NAME=ip-192.168.31.226.ec2.internal (example node name from "kubectl get nodes")
|
||||
curl -sSL "http://${HOSTNAME_PORT}/api/v1/nodes/${NODE_NAME}/proxy/configz"
|
||||
|
||||
For all three remediation methods:
|
||||
Restart the kubelet service and check status. The example below is for when using
|
||||
systemctl to manage services:
|
||||
systemctl daemon-reload
|
||||
systemctl restart kubelet.service
|
||||
systemctl status kubelet -l
|
||||
scored: true
|
||||
396
cfg/eks-1.8.0/policies.yaml
Normal file
396
cfg/eks-1.8.0/policies.yaml
Normal file
@@ -0,0 +1,396 @@
|
||||
---
|
||||
controls:
|
||||
version: "eks-1.8.0"
|
||||
id: 4
|
||||
text: "Policies"
|
||||
type: "policies"
|
||||
groups:
|
||||
- id: 4.1
|
||||
text: "RBAC and Service Accounts"
|
||||
checks:
|
||||
- id: 4.1.1
|
||||
text: "Ensure that the cluster-admin role is only used where required (Automated)"
|
||||
audit: |
|
||||
kubectl get clusterrolebindings -o json | jq -r '
|
||||
.items[]
|
||||
| select(.roleRef.name == "cluster-admin")
|
||||
| .subjects[]?
|
||||
| select(.kind != "Group" or (.name != "system:masters" and .name != "system:nodes"))
|
||||
| "FOUND_CLUSTER_ADMIN_BINDING"
|
||||
' || echo "NO_CLUSTER_ADMIN_BINDINGS"
|
||||
tests:
|
||||
test_items:
|
||||
- flag: "NO_CLUSTER_ADMIN_BINDINGS"
|
||||
set: true
|
||||
compare:
|
||||
op: eq
|
||||
value: "NO_CLUSTER_ADMIN_BINDINGS"
|
||||
remediation: |
|
||||
Identify all clusterrolebindings to the cluster-admin role. Check if they are used and if
|
||||
they need this role or if they could use a role with fewer privileges.
|
||||
Where possible, first bind users to a lower privileged role and then remove the
|
||||
clusterrolebinding to the cluster-admin role :
|
||||
kubectl delete clusterrolebinding [name]
|
||||
scored: true
|
||||
|
||||
- id: 4.1.2
|
||||
text: "Minimize access to secrets (Automated)"
|
||||
audit: |
|
||||
count=$(kubectl get roles --all-namespaces -o json | jq '
|
||||
.items[]
|
||||
| select(.rules[]?
|
||||
| (.resources[]? == "secrets")
|
||||
and ((.verbs[]? == "get") or (.verbs[]? == "list") or (.verbs[]? == "watch"))
|
||||
)' | wc -l)
|
||||
|
||||
if [ "$count" -gt 0 ]; then
|
||||
echo "SECRETS_ACCESS_FOUND"
|
||||
fi
|
||||
tests:
|
||||
test_items:
|
||||
- flag: "SECRETS_ACCESS_FOUND"
|
||||
set: false
|
||||
remediation: |
|
||||
Where possible, remove get, list and watch access to secret objects in the cluster.
|
||||
scored: true
|
||||
|
||||
- id: 4.1.3
|
||||
text: "Minimize wildcard use in Roles and ClusterRoles (Automated)"
|
||||
audit: |
|
||||
wildcards=$(kubectl get roles --all-namespaces -o json | jq '
|
||||
.items[] | select(
|
||||
.rules[]? | (.verbs[]? == "*" or .resources[]? == "*" or .apiGroups[]? == "*")
|
||||
)' | wc -l)
|
||||
|
||||
wildcards_clusterroles=$(kubectl get clusterroles -o json | jq '
|
||||
.items[] | select(
|
||||
.rules[]? | (.verbs[]? == "*" or .resources[]? == "*" or .apiGroups[]? == "*")
|
||||
)' | wc -l)
|
||||
|
||||
total=$((wildcards + wildcards_clusterroles))
|
||||
|
||||
if [ "$total" -gt 0 ]; then
|
||||
echo "wildcards_present"
|
||||
fi
|
||||
tests:
|
||||
test_items:
|
||||
- flag: wildcards_present
|
||||
set: false
|
||||
remediation: |
|
||||
Where possible replace any use of wildcards in clusterroles and roles with specific
|
||||
objects or actions.
|
||||
scored: true
|
||||
|
||||
- id: 4.1.4
|
||||
text: "Minimize access to create pods (Automated)"
|
||||
audit: |
|
||||
access=$(kubectl get roles,clusterroles -A -o json | jq '
|
||||
[.items[] |
|
||||
select(
|
||||
.rules[]? |
|
||||
(.resources[]? == "pods" and .verbs[]? == "create")
|
||||
)
|
||||
] | length')
|
||||
|
||||
if [ "$access" -gt 0 ]; then
|
||||
echo "pods_create_access"
|
||||
fi
|
||||
tests:
|
||||
test_items:
|
||||
- flag: pods_create_access
|
||||
set: false
|
||||
remediation: |
|
||||
Where possible, remove create access to pod objects in the cluster.
|
||||
scored: true
|
||||
|
||||
- id: 4.1.5
|
||||
text: "Ensure that default service accounts are not actively used. (Automated)"
|
||||
audit: |
|
||||
default_sa_count=$(kubectl get serviceaccounts --all-namespaces -o json | jq '
|
||||
[.items[] | select(.metadata.name == "default" and (.automountServiceAccountToken != false))] | length')
|
||||
if [ "$default_sa_count" -gt 0 ]; then
|
||||
echo "default_sa_not_auto_mounted"
|
||||
fi
|
||||
pods_using_default_sa=$(kubectl get pods --all-namespaces -o json | jq '
|
||||
[.items[] | select(.spec.serviceAccountName == "default")] | length')
|
||||
if [ "$pods_using_default_sa" -gt 0 ]; then
|
||||
echo "default_sa_used_in_pods"
|
||||
fi
|
||||
tests:
|
||||
test_items:
|
||||
- flag: default_sa_not_auto_mounted
|
||||
set: false
|
||||
- flag: default_sa_used_in_pods
|
||||
set: false
|
||||
remediation: |
|
||||
Create explicit service accounts wherever a Kubernetes workload requires specific
|
||||
access to the Kubernetes API server.
|
||||
Modify the configuration of each default service account to include this value
|
||||
automountServiceAccountToken: false
|
||||
Automatic remediation for the default account:
|
||||
kubectl patch serviceaccount default -p
|
||||
$'automountServiceAccountToken: false'
|
||||
scored: true
|
||||
|
||||
- id: 4.1.6
|
||||
text: "Ensure that Service Account Tokens are only mounted where necessary (Automated)"
|
||||
audit: |
|
||||
pods_with_token_mount=$(kubectl get pods --all-namespaces -o json | jq '
|
||||
[.items[] | select(.spec.automountServiceAccountToken != false)] | length')
|
||||
|
||||
if [ "$pods_with_token_mount" -gt 0 ]; then
|
||||
echo "automountServiceAccountToken"
|
||||
fi
|
||||
tests:
|
||||
test_items:
|
||||
- flag: automountServiceAccountToken
|
||||
set: false
|
||||
remediation: |
|
||||
Regularly review pod and service account objects in the cluster to ensure that the automountServiceAccountToken setting is false for pods and accounts that do not explicitly require API server access.
|
||||
scored: true
|
||||
|
||||
- id: 4.1.7
|
||||
text: "Cluster Access Manager API to streamline and enhance the management of access controls within EKS clusters (Manual)"
|
||||
type: "manual"
|
||||
remediation: |
|
||||
Log in to the AWS Management Console.
|
||||
Navigate to Amazon EKS and select your EKS cluster.
|
||||
|
||||
Go to the Access tab and click on "Manage Access" in the "Access Configuration section".
|
||||
Under Cluster Authentication Mode for Cluster Access settings.
|
||||
Click EKS API to change cluster will source authenticated IAM principals only from EKS access entry APIs.
|
||||
Click ConfigMap to change cluster will source authenticated IAM principals only from the aws-auth ConfigMap.
|
||||
Note: EKS API and ConfigMap must be selected during Cluster creation and cannot be changed once the Cluster is provisioned.
|
||||
scored: false
|
||||
|
||||
- id: 4.1.8
|
||||
text: "Limit use of the Bind, Impersonate and Escalate permissions in the Kubernetes cluster (Manual)"
|
||||
type: "manual"
|
||||
remediation: |
|
||||
Where possible, remove the impersonate, bind and escalate rights from subjects.
|
||||
scored: false
|
||||
|
||||
- id: 4.1.9
|
||||
text: "Minimize access to create PersistentVolume objects (Manual)"
|
||||
type: "manual"
|
||||
remediation: |
|
||||
Review the RBAC rules in the cluster and identify users, groups, or service accounts
|
||||
with create permissions on PersistentVolume resources.
|
||||
Where possible, remove or restrict create access to PersistentVolume objects to
|
||||
trusted administrators only.
|
||||
scored: false
|
||||
|
||||
- id: 4.1.10
|
||||
text: "Minimize access to the proxy sub-resource of Node objects (Manual)"
|
||||
type: "manual"
|
||||
remediation: |
|
||||
Review RBAC roles and bindings in the cluster to identify users, groups,
|
||||
or service accounts with access to the proxy sub-resource of Node objects.
|
||||
Where possible, remove or restrict access to the node proxy sub-resource
|
||||
to trusted administrators only.
|
||||
scored: false
|
||||
|
||||
- id: 4.1.11
|
||||
text: "Minimize access to webhook configuration objects (Manual)"
|
||||
type: "manual"
|
||||
remediation: |
|
||||
Review RBAC roles and bindings in the cluster to identify users, groups,
|
||||
or service accounts with access to validatingwebhookconfigurations or
|
||||
mutatingwebhookconfigurations objects. Where possible, remove or restrict
|
||||
access to these webhook configuration objects to trusted administrators only.
|
||||
scored: false
|
||||
|
||||
- id: 4.1.12
|
||||
text: "Minimize access to the service account token creation (Manual)"
|
||||
type: "manual"
|
||||
remediation: |
|
||||
Review RBAC roles and bindings in the cluster to identify users, groups,
|
||||
or service accounts with access to create the token sub-resource of
|
||||
serviceaccount objects. Where possible, remove or restrict access to
|
||||
token creation to trusted administrators only.
|
||||
scored: false
|
||||
|
||||
- id: 4.2
|
||||
text: "Pod Security Standards"
|
||||
checks:
|
||||
- id: 4.2.1
|
||||
text: "Minimize the admission of privileged containers (Automated)"
|
||||
audit: |
|
||||
kubectl get pods --all-namespaces -o json | \
|
||||
jq -r 'if any(.items[]?.spec.containers[]?; .securityContext?.privileged == true) then "PRIVILEGED_FOUND" else "NO_PRIVILEGED" end'
|
||||
tests:
|
||||
test_items:
|
||||
- flag: "NO_PRIVILEGED"
|
||||
set: true
|
||||
compare:
|
||||
op: eq
|
||||
value: "NO_PRIVILEGED"
|
||||
remediation: |
|
||||
Add policies to each namespace in the cluster which has user workloads to restrict the admission of privileged containers.
|
||||
To enable PSA for a namespace in your cluster, set the pod-security.kubernetes.io/enforce label with the policy value you want to enforce.
|
||||
kubectl label --overwrite ns NAMESPACE pod-security.kubernetes.io/enforce=restricted
|
||||
The above command enforces the restricted policy for the NAMESPACE namespace.
|
||||
You can also enable Pod Security Admission for all your namespaces. For example:
|
||||
kubectl label --overwrite ns --all pod-security.kubernetes.io/warn=baseline
|
||||
scored: true
|
||||
|
||||
- id: 4.2.2
|
||||
text: "Minimize the admission of containers wishing to share the host process ID namespace (Automated)"
|
||||
audit: |
|
||||
kubectl get pods --all-namespaces -o json | \
|
||||
jq -r 'if any(.items[]?; .spec.hostPID == true) then "HOSTPID_FOUND" else "NO_HOSTPID" end'
|
||||
tests:
|
||||
test_items:
|
||||
- flag: "NO_HOSTPID"
|
||||
set: true
|
||||
compare:
|
||||
op: eq
|
||||
value: "NO_HOSTPID"
|
||||
remediation: |
|
||||
Add policies to each namespace in the cluster which has user workloads to restrict the
|
||||
admission of hostPID containers.
|
||||
scored: true
|
||||
|
||||
- id: 4.2.3
|
||||
text: "Minimize the admission of containers wishing to share the host IPC namespace (Automated)"
|
||||
audit: |
|
||||
kubectl get pods --all-namespaces -o json | jq -r 'if any(.items[]?; .spec.hostIPC == true) then "HOSTIPC_FOUND" else "NO_HOSTIPC" end'
|
||||
tests:
|
||||
test_items:
|
||||
- flag: "NO_HOSTIPC"
|
||||
set: true
|
||||
compare:
|
||||
op: eq
|
||||
value: "NO_HOSTIPC"
|
||||
remediation: |
|
||||
Add policies to each namespace in the cluster which has user workloads to restrict the
|
||||
admission of hostIPC containers.
|
||||
scored: true
|
||||
|
||||
- id: 4.2.4
|
||||
text: "Minimize the admission of containers wishing to share the host network namespace (Automated)"
|
||||
audit: |
|
||||
kubectl get pods --all-namespaces -o json | jq -r 'if any(.items[]?; .spec.hostNetwork == true) then "HOSTNETWORK_FOUND" else "NO_HOSTNETWORK" end'
|
||||
tests:
|
||||
test_items:
|
||||
- flag: "NO_HOSTNETWORK"
|
||||
set: true
|
||||
compare:
|
||||
op: eq
|
||||
value: "NO_HOSTNETWORK"
|
||||
remediation: |
|
||||
Add policies to each namespace in the cluster which has user workloads to restrict the
|
||||
admission of hostNetwork containers.
|
||||
scored: true
|
||||
|
||||
- id: 4.2.5
|
||||
text: "Minimize the admission of containers with allowPrivilegeEscalation (Automated)"
|
||||
audit: |
|
||||
kubectl get pods --all-namespaces -o json | \
|
||||
jq -r 'if any(.items[]?.spec.containers[]?; .securityContext?.allowPrivilegeEscalation == true) then "ALLOWPRIVILEGEESCALTION_FOUND" else "NO_ALLOWPRIVILEGEESCALATION" end'
|
||||
tests:
|
||||
test_items:
|
||||
- flag: "NO_ALLOWPRIVILEGEESCALATION"
|
||||
set: true
|
||||
compare:
|
||||
op: eq
|
||||
value: "NO_ALLOWPRIVILEGEESCALATION"
|
||||
remediation: |
|
||||
Add policies to each namespace in the cluster which has user workloads to restrict the
|
||||
admission of containers with .spec.allowPrivilegeEscalation set to true.
|
||||
scored: true
|
||||
|
||||
- id: 4.3
|
||||
text: "CNI Plugin"
|
||||
checks:
|
||||
- id: 4.3.1
|
||||
text: "Ensure CNI plugin supports network policies (Manual)"
|
||||
type: "manual"
|
||||
remediation: |
|
||||
As with RBAC policies, network policies should adhere to the policy of least privileged
|
||||
access. Start by creating a deny all policy that restricts all inbound and outbound traffic
|
||||
from a namespace or create a global policy using Calico.
|
||||
scored: false
|
||||
|
||||
- id: 4.3.2
|
||||
text: "Ensure that all Namespaces have Network Policies defined (Automated)"
|
||||
audit: |
|
||||
ns_without_np=$(kubectl get namespaces -o json | jq -r '.items[].metadata.name' | while read ns; do
|
||||
count=$(kubectl get networkpolicy -n $ns --no-headers 2>/dev/null | wc -l)
|
||||
if [ "$count" -eq 0 ]; then echo $ns; fi
|
||||
done)
|
||||
if [ -z "$ns_without_np" ]; then
|
||||
echo "ALL_NAMESPACES_HAVE_NETWORK_POLICIES"
|
||||
else
|
||||
echo "NAMESPACES_WITHOUT_NETWORK_POLICIES: $ns_without_np"
|
||||
fi
|
||||
tests:
|
||||
test_items:
|
||||
- flag: "ALL_NAMESPACES_HAVE_NETWORK_POLICIES"
|
||||
set: true
|
||||
compare:
|
||||
op: eq
|
||||
value: "ALL_NAMESPACES_HAVE_NETWORK_POLICIES"
|
||||
remediation: |
|
||||
Create at least one NetworkPolicy in each namespace to control and restrict traffic between pods as needed.
|
||||
scored: true
|
||||
|
||||
- id: 4.4
|
||||
text: "Secrets Management"
|
||||
checks:
|
||||
- id: 4.4.1
|
||||
text: "Prefer using secrets as files over secrets as environment variables (Automated)"
|
||||
audit: |
|
||||
result=$(kubectl get all --all-namespaces -o jsonpath='{range .items[?(@..secretKeyRef)]}{.metadata.namespace} {.kind} {.metadata.name}{"\n"}{end}')
|
||||
if [ -z "$result" ]; then
|
||||
echo "NO_SECRETS_AS_ENV_VARS"
|
||||
else
|
||||
echo "SECRETS_AS_ENV_VARS_FOUND: $result"
|
||||
fi
|
||||
tests:
|
||||
test_items:
|
||||
- flag: "NO_SECRETS_AS_ENV_VARS"
|
||||
set: true
|
||||
compare:
|
||||
op: eq
|
||||
value: "NO_SECRETS_AS_ENV_VARS"
|
||||
remediation: |
|
||||
If possible, rewrite application code to read secrets from mounted secret files, rather than
|
||||
from environment variables.
|
||||
scored: true
|
||||
|
||||
- id: 4.4.2
|
||||
text: "Consider external secret storage (Manual)"
|
||||
type: "manual"
|
||||
remediation: |
|
||||
Refer to the secrets management options offered by your cloud provider or a third-party
|
||||
secrets management solution.
|
||||
scored: false
|
||||
|
||||
- id: 4.5
|
||||
text: "General Policies"
|
||||
checks:
|
||||
- id: 4.5.1
|
||||
text: "Create administrative boundaries between resources using namespaces (Manual)"
|
||||
type: "manual"
|
||||
remediation: |
|
||||
Follow the documentation and create namespaces for objects in your deployment as you need
|
||||
them.
|
||||
scored: false
|
||||
|
||||
- id: 4.5.2
|
||||
text: "The default namespace should not be used (Automated)"
|
||||
audit: |
|
||||
output=$(kubectl get $(kubectl api-resources --verbs=list --namespaced=true -o name | paste -sd, -) --ignore-not-found -n default 2>/dev/null | grep -v "^kubernetes ")
|
||||
if [ -z "$output" ]; then
|
||||
echo "NO_USER_RESOURCES_IN_DEFAULT"
|
||||
else
|
||||
echo "USER_RESOURCES_IN_DEFAULT_FOUND: $output"
|
||||
fi
|
||||
tests:
|
||||
test_items:
|
||||
- flag: "NO_USER_RESOURCES_IN_DEFAULT"
|
||||
set: true
|
||||
remediation: |
|
||||
Create and use dedicated namespaces for resources instead of the default namespace. Move any user-defined objects out of the default namespace to improve resource segregation and RBAC control.
|
||||
scored: true
|
||||
@@ -99,12 +99,13 @@ groups:
|
||||
- id: 1.1.7
|
||||
text: "Ensure that the etcd pod specification file permissions are set to 644 or more restrictive (Automated)"
|
||||
audit: |
|
||||
/bin/sh -c 'if [ -d "$etcdconf" ]; then
|
||||
find "$etcdconf" -type f -name "*etcd*" -exec stat -c "permissions=%a" {} +
|
||||
else
|
||||
echo "Directory not found"
|
||||
fi'
|
||||
use_multiple_values: true
|
||||
/bin/sh -c '
|
||||
if [ -e "$etcdconf" ]; then
|
||||
stat -c "permissions=%a %n" "$etcdconf"
|
||||
else
|
||||
echo "Directory not found"
|
||||
fi
|
||||
'
|
||||
tests:
|
||||
bin_op: or
|
||||
test_items:
|
||||
@@ -122,11 +123,13 @@ groups:
|
||||
- id: 1.1.8
|
||||
text: "Ensure that the etcd pod specification file ownership is set to root:root (Automated)"
|
||||
audit: |
|
||||
/bin/sh -c 'if [ -d "$etcdconf" ]; then
|
||||
find "$etcdconf" -type f -name "*etcd*" -exec stat -c "ownership=%U:%G %n" {} +
|
||||
else
|
||||
echo "Directory not found"
|
||||
fi'
|
||||
/bin/sh -c '
|
||||
if [ -e $etcdconf ]; then
|
||||
stat -c "ownership=%U:%G %n" $etcdconf
|
||||
else
|
||||
echo "Directory not found"
|
||||
fi
|
||||
'
|
||||
tests:
|
||||
bin_op: or
|
||||
test_items:
|
||||
|
||||
@@ -99,12 +99,13 @@ groups:
|
||||
- id: 1.1.7
|
||||
text: "Ensure that the etcd pod specification file permissions are set to 644 or more restrictive (Automated)"
|
||||
audit: |
|
||||
/bin/sh -c 'if [ -d "$etcdconf" ]; then
|
||||
find "$etcdconf" -type f -name "*etcd*" -exec stat -c "permissions=%a" {} +
|
||||
else
|
||||
echo "Directory not found"
|
||||
fi'
|
||||
use_multiple_values: true
|
||||
/bin/sh -c '
|
||||
if [ -e "$etcdconf" ]; then
|
||||
stat -c "permissions=%a %n" "$etcdconf"
|
||||
else
|
||||
echo "Directory not found"
|
||||
fi
|
||||
'
|
||||
tests:
|
||||
bin_op: or
|
||||
test_items:
|
||||
@@ -122,11 +123,13 @@ groups:
|
||||
- id: 1.1.8
|
||||
text: "Ensure that the etcd pod specification file ownership is set to root:root (Automated)"
|
||||
audit: |
|
||||
/bin/sh -c 'if [ -d "$etcdconf" ]; then
|
||||
find "$etcdconf" -type f -name "*etcd*" -exec stat -c "ownership=%U:%G %n" {} +
|
||||
else
|
||||
echo "Directory not found"
|
||||
fi'
|
||||
/bin/sh -c '
|
||||
if [ -e $etcdconf ]; then
|
||||
stat -c "ownership=%U:%G %n" $etcdconf
|
||||
else
|
||||
echo "Directory not found"
|
||||
fi
|
||||
'
|
||||
tests:
|
||||
bin_op: or
|
||||
test_items:
|
||||
|
||||
@@ -99,13 +99,13 @@ groups:
|
||||
- id: 1.1.7
|
||||
text: "Ensure that the etcd pod specification file permissions are set to 600 or more restrictive (Automated)"
|
||||
audit: |
|
||||
/bin/sh -c 'if [ -d "$etcdconf" ]; then
|
||||
find "$etcdconf" -type f -name "*etcd*" \
|
||||
-exec stat -c "permissions=%a %n" {} +
|
||||
else
|
||||
echo "Directory not found"
|
||||
fi'
|
||||
use_multiple_values: true
|
||||
/bin/sh -c '
|
||||
if [ -e "$etcdconf" ]; then
|
||||
stat -c "permissions=%a %n" "$etcdconf"
|
||||
else
|
||||
echo "Directory not found"
|
||||
fi
|
||||
'
|
||||
tests:
|
||||
bin_op: or
|
||||
test_items:
|
||||
@@ -125,11 +125,13 @@ groups:
|
||||
- id: 1.1.8
|
||||
text: "Ensure that the etcd pod specification file ownership is set to root:root (Automated)"
|
||||
audit: |
|
||||
/bin/sh -c 'if [ -d "$etcdconf" ]; then
|
||||
find "$etcdconf" -type f -name "*etcd*" -exec stat -c "ownership=%U:%G %n" {} +
|
||||
else
|
||||
echo "Directory not found"
|
||||
fi'
|
||||
/bin/sh -c '
|
||||
if [ -e $etcdconf ]; then
|
||||
stat -c "ownership=%U:%G %n" $etcdconf
|
||||
else
|
||||
echo "Directory not found"
|
||||
fi
|
||||
'
|
||||
tests:
|
||||
bin_op: or
|
||||
test_items:
|
||||
|
||||
@@ -1,22 +1,2 @@
|
||||
---
|
||||
## Version-specific settings that override the values in cfg/config.yaml
|
||||
|
||||
master:
|
||||
components:
|
||||
- apiserver
|
||||
- kubelet
|
||||
- scheduler
|
||||
- controllermanager
|
||||
- etcd
|
||||
- policies
|
||||
kubelet:
|
||||
bins:
|
||||
- kubelet
|
||||
node:
|
||||
kubelet:
|
||||
defaultkubeconfig: "/node/etc/kubernetes/ssl/kubecfg-kube-node.yaml"
|
||||
defaultcafile: "/node/etc/kubernetes/ssl/kube-ca.pem"
|
||||
bins:
|
||||
- kubelet
|
||||
proxy:
|
||||
defaultkubeconfig: "/node/etc/kubernetes/ssl/kubecfg-kube-proxy.yaml"
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
---
|
||||
controls:
|
||||
version: "cis-1.8"
|
||||
version: "rke2-cis-1.8"
|
||||
id: 3
|
||||
text: "Control Plane Configuration"
|
||||
type: "controlplane"
|
||||
|
||||
@@ -1,26 +1,29 @@
|
||||
---
|
||||
controls:
|
||||
version: "cis-1.8"
|
||||
version: "rke2-cis-1.8"
|
||||
id: 2
|
||||
text: "Etcd Node Configuration"
|
||||
type: "etcd"
|
||||
groups:
|
||||
- id: 1.1
|
||||
text: "Control Plane Node Configuration Files"
|
||||
checks:
|
||||
- id: 2
|
||||
text: "Etcd Node Configuration"
|
||||
checks:
|
||||
- id: 2.1
|
||||
text: "Ensure that the --cert-file and --key-file arguments are set as appropriate (Automated)"
|
||||
audit: "/bin/ps -ef | /bin/grep $etcdbin | /bin/grep -v grep"
|
||||
audit: "cat $etcdconf"
|
||||
tests:
|
||||
bin_op: and
|
||||
test_items:
|
||||
- flag: "--cert-file"
|
||||
env: "ETCD_CERT_FILE"
|
||||
- flag: "--key-file"
|
||||
env: "ETCD_KEY_FILE"
|
||||
- flag: "cert-file"
|
||||
path: "{.client-transport-security.cert-file}"
|
||||
compare:
|
||||
op: eq
|
||||
value: "/var/lib/rancher/rke2/server/tls/etcd/server-client.crt"
|
||||
- flag: "key-file"
|
||||
path: "{.client-transport-security.key-file}"
|
||||
compare:
|
||||
op: eq
|
||||
value: "/var/lib/rancher/rke2/server/tls/etcd/server-client.key"
|
||||
remediation: |
|
||||
Follow the etcd service documentation and configure TLS encryption.
|
||||
Then, edit the etcd pod specification file /etc/kubernetes/manifests/etcd.yaml
|
||||
@@ -31,11 +34,11 @@ groups:
|
||||
|
||||
- id: 2.2
|
||||
text: "Ensure that the --client-cert-auth argument is set to true (Automated)"
|
||||
audit: "/bin/ps -ef | /bin/grep $etcdbin | /bin/grep -v grep"
|
||||
audit: "cat $etcdconf"
|
||||
tests:
|
||||
test_items:
|
||||
- flag: "--client-cert-auth"
|
||||
env: "ETCD_CLIENT_CERT_AUTH"
|
||||
- flag: "client-cert-auth"
|
||||
path: "{.client-transport-security.client-cert-auth}"
|
||||
compare:
|
||||
op: eq
|
||||
value: true
|
||||
@@ -47,15 +50,12 @@ groups:
|
||||
|
||||
- id: 2.3
|
||||
text: "Ensure that the --auto-tls argument is not set to true (Automated)"
|
||||
audit: "/bin/ps -ef | /bin/grep $etcdbin | /bin/grep -v grep"
|
||||
audit: "cat $etcdconf"
|
||||
tests:
|
||||
bin_op: or
|
||||
test_items:
|
||||
- flag: "--auto-tls"
|
||||
env: "ETCD_AUTO_TLS"
|
||||
set: false
|
||||
- flag: "--auto-tls"
|
||||
env: "ETCD_AUTO_TLS"
|
||||
- flag: "auto-tls"
|
||||
path: "{.client-transport-security.auto-tls}"
|
||||
compare:
|
||||
op: eq
|
||||
value: false
|
||||
@@ -67,14 +67,18 @@ groups:
|
||||
|
||||
- id: 2.4
|
||||
text: "Ensure that the --peer-cert-file and --peer-key-file arguments are set as appropriate (Automated)"
|
||||
audit: "/bin/ps -ef | /bin/grep $etcdbin | /bin/grep -v grep"
|
||||
audit: "cat $etcdconf"
|
||||
audit_config: "cat $etcdconf"
|
||||
tests:
|
||||
bin_op: and
|
||||
test_items:
|
||||
- flag: "--peer-cert-file"
|
||||
env: "ETCD_PEER_CERT_FILE"
|
||||
- flag: "--peer-key-file"
|
||||
env: "ETCD_PEER_KEY_FILE"
|
||||
- path: "{.peer-transport-security.cert-file}"
|
||||
compare:
|
||||
op: eq
|
||||
value: "/var/lib/rancher/rke2/server/tls/etcd/peer-server-client.crt"
|
||||
- path: "{.peer-transport-security.key-file}"
|
||||
compare:
|
||||
op: eq
|
||||
value: "/var/lib/rancher/rke2/server/tls/etcd/peer-server-client.key"
|
||||
remediation: |
|
||||
Follow the etcd service documentation and configure peer TLS encryption as appropriate
|
||||
for your etcd cluster.
|
||||
@@ -86,11 +90,11 @@ groups:
|
||||
|
||||
- id: 2.5
|
||||
text: "Ensure that the --peer-client-cert-auth argument is set to true (Automated)"
|
||||
audit: "/bin/ps -ef | /bin/grep $etcdbin | /bin/grep -v grep"
|
||||
audit: "cat $etcdconf"
|
||||
tests:
|
||||
test_items:
|
||||
- flag: "--peer-client-cert-auth"
|
||||
env: "ETCD_PEER_CLIENT_CERT_AUTH"
|
||||
- flag: "client-cert-auth"
|
||||
path: "{.peer-transport-security.client-cert-auth}"
|
||||
compare:
|
||||
op: eq
|
||||
value: true
|
||||
@@ -102,15 +106,12 @@ groups:
|
||||
|
||||
- id: 2.6
|
||||
text: "Ensure that the --peer-auto-tls argument is not set to true (Automated)"
|
||||
audit: "/bin/ps -ef | /bin/grep $etcdbin | /bin/grep -v grep"
|
||||
audit: "cat $etcdconf"
|
||||
tests:
|
||||
bin_op: or
|
||||
test_items:
|
||||
- flag: "--peer-auto-tls"
|
||||
env: "ETCD_PEER_AUTO_TLS"
|
||||
set: false
|
||||
- flag: "--peer-auto-tls"
|
||||
env: "ETCD_PEER_AUTO_TLS"
|
||||
- flag: "peer-auto-tls"
|
||||
path: "{.peer-transport-security.auto-tls}"
|
||||
compare:
|
||||
op: eq
|
||||
value: false
|
||||
@@ -122,11 +123,14 @@ groups:
|
||||
|
||||
- id: 2.7
|
||||
text: "Ensure that a unique Certificate Authority is used for etcd (Automated)"
|
||||
audit: "/bin/ps -ef | /bin/grep $etcdbin | /bin/grep -v grep"
|
||||
audit: "cat $etcdconf"
|
||||
audit_config: "cat $etcdconf"
|
||||
tests:
|
||||
test_items:
|
||||
- flag: "--trusted-ca-file"
|
||||
env: "ETCD_TRUSTED_CA_FILE"
|
||||
- path: "{.peer-transport-security.trusted-ca-file}"
|
||||
compare:
|
||||
op: eq
|
||||
value: "/var/lib/rancher/rke2/server/tls/etcd/peer-ca.crt"
|
||||
set: true
|
||||
remediation: |
|
||||
[Manual test]
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
---
|
||||
controls:
|
||||
version: "cis-1.8"
|
||||
version: "rke2-cis-1.8"
|
||||
id: 1
|
||||
text: "Control Plane Security Configuration"
|
||||
type: "master"
|
||||
@@ -98,15 +98,14 @@ groups:
|
||||
|
||||
- id: 1.1.7
|
||||
text: "Ensure that the etcd pod specification file permissions are set to 600 or more restrictive (Automated)"
|
||||
|
||||
audit: |
|
||||
/bin/sh -c 'if [ -e "$etcdconf" ]; then
|
||||
find "$etcdconf" -type f -name "*etcd*" \
|
||||
-exec stat -c "permissions=%a %n" {} +
|
||||
/bin/sh -c '
|
||||
if [ -e "$etcdconf" ]; then
|
||||
stat -c "permissions=%a %n" "$etcdconf"
|
||||
else
|
||||
echo "Directory not found"
|
||||
fi'
|
||||
use_multiple_values: true
|
||||
fi
|
||||
'
|
||||
tests:
|
||||
bin_op: or
|
||||
test_items:
|
||||
@@ -126,16 +125,21 @@ groups:
|
||||
- id: 1.1.8
|
||||
text: "Ensure that the etcd pod specification file ownership is set to root:root (Automated)"
|
||||
audit: |
|
||||
/bin/sh -c 'if [ -e "$etcdconf" ]; then
|
||||
find "$etcdconf" -type f -name "*etcd*" -exec stat -c "%U:%G %n" {} +
|
||||
/bin/sh -c '
|
||||
if [ -e "$etcdconf" ]; then
|
||||
stat -c "ownership=%U:%G %n" "$etcdconf"
|
||||
else
|
||||
echo "Directory not found"
|
||||
fi'
|
||||
use_multiple_values: true
|
||||
fi
|
||||
'
|
||||
tests:
|
||||
bin_op: or
|
||||
test_items:
|
||||
- flag: "root:root"
|
||||
- flag: "ownership"
|
||||
compare:
|
||||
op: eq
|
||||
value: "root:root"
|
||||
set: true
|
||||
- flag: "Directory not found"
|
||||
set: true
|
||||
remediation: |
|
||||
@@ -181,20 +185,20 @@ groups:
|
||||
- id: 1.1.11
|
||||
text: "Ensure that the etcd data directory permissions are set to 700 or more restrictive (Automated)"
|
||||
audit: |
|
||||
/bin/sh -c 'if [ -e "/node/var/lib/etcd" ]; then
|
||||
stat -c "%a" "/node/var/lib/etcd"
|
||||
/bin/sh -c 'if [ -e "/var/lib/rancher/rke2/server/db/etcd" ]; then
|
||||
stat -c "permissions=%a %n" "/var/lib/rancher/rke2/server/db/etcd"
|
||||
else
|
||||
echo "Directory not found: /node/var/lib/etcd"
|
||||
echo "Directory not found: /var/lib/rancher/rke2/server/db/etcd"
|
||||
fi'
|
||||
tests:
|
||||
bin_op: or
|
||||
test_items:
|
||||
- flag: "700"
|
||||
- flag: "permissions"
|
||||
compare:
|
||||
op: eq
|
||||
op: bitmask
|
||||
value: "700"
|
||||
set: true
|
||||
- flag: "Directory not found: /node/var/lib/etcd"
|
||||
- flag: "Directory not found: /var/lib/rancher/rke2/server/db/etcd"
|
||||
remediation: |
|
||||
On the etcd server node, get the etcd data directory, passed as an argument --data-dir,
|
||||
from the command 'ps -ef | grep etcd'.
|
||||
@@ -204,10 +208,24 @@ groups:
|
||||
|
||||
- id: 1.1.12
|
||||
text: "Ensure that the etcd data directory ownership is set to etcd:etcd (Automated)"
|
||||
audit: "stat -c %U:%G /node/var/lib/etcd"
|
||||
audit: |
|
||||
/bin/sh -c '
|
||||
if [ -e "$etcdconf" ]; then
|
||||
stat -c "ownership=%U:%G %n" "$etcdconf"
|
||||
else
|
||||
echo "Directory not found"
|
||||
fi
|
||||
'
|
||||
type: "skip"
|
||||
tests:
|
||||
bin_op: or
|
||||
test_items:
|
||||
- flag: "etcd:etcd"
|
||||
- flag: "ownership"
|
||||
compare:
|
||||
op: eq
|
||||
value: "root:root"
|
||||
set: true
|
||||
- flag: "Directory not found"
|
||||
set: true
|
||||
remediation: |
|
||||
On the etcd server node, get the etcd data directory, passed as an argument --data-dir,
|
||||
@@ -220,7 +238,7 @@ groups:
|
||||
|
||||
- id: 1.1.13
|
||||
text: "Ensure that the admin.conf file permissions are set to 600 or more restrictive (Automated)"
|
||||
audit: "/bin/sh -c 'if test -e /etc/kubernetes/admin.conf; then stat -c permissions=%a /etc/kubernetes/admin.conf; fi'"
|
||||
audit: "stat -c permissions=%a /var/lib/rancher/rke2/server/cred/admin.kubeconfig"
|
||||
tests:
|
||||
test_items:
|
||||
- flag: "permissions"
|
||||
@@ -235,7 +253,7 @@ groups:
|
||||
|
||||
- id: 1.1.14
|
||||
text: "Ensure that the admin.conf file ownership is set to root:root (Automated)"
|
||||
audit: "/bin/sh -c 'if test -e /etc/kubernetes/admin.conf; then stat -c %U:%G /etc/kubernetes/admin.conf; fi'"
|
||||
audit: "stat -c %U:%G /var/lib/rancher/rke2/server/cred/admin.kubeconfig"
|
||||
tests:
|
||||
test_items:
|
||||
- flag: "root:root"
|
||||
@@ -309,18 +327,22 @@ groups:
|
||||
|
||||
- id: 1.1.19
|
||||
text: "Ensure that the Kubernetes PKI directory and file ownership is set to root:root (Automated)"
|
||||
audit: "check_files_owner_in_dir.sh /node/etc/kubernetes/ssl"
|
||||
audit: |
|
||||
if test -d /var/lib/rancher/rke2/server/tls; then
|
||||
find /var/lib/rancher/rke2/server/tls -exec stat -c "%U:%G" {} \;
|
||||
else
|
||||
echo "NA"
|
||||
fi
|
||||
tests:
|
||||
test_items:
|
||||
- flag: "true"
|
||||
compare:
|
||||
op: eq
|
||||
value: "true"
|
||||
- flag: "root:root"
|
||||
set: true
|
||||
remediation: |
|
||||
Run the below command (based on the file location on your system) on the control plane node.
|
||||
For example,
|
||||
chown -R root:root /etc/kubernetes/pki/
|
||||
chown root:root $controllermanagerkubeconfig
|
||||
Not Applicable - Cluster provisioned by RKE doesn't require or maintain a configuration file for controller-manager.
|
||||
All configuration is passed in as arguments at container run time.
|
||||
scored: true
|
||||
|
||||
- id: 1.1.20
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
---
|
||||
controls:
|
||||
version: "cis-1.8"
|
||||
version: "rke2-cis-1.8"
|
||||
id: 4
|
||||
text: "Worker Node Security Configuration"
|
||||
type: "node"
|
||||
@@ -10,32 +10,41 @@ groups:
|
||||
checks:
|
||||
- id: 4.1.1
|
||||
text: "Ensure that the kubelet service file permissions are set to 600 or more restrictive (Automated)"
|
||||
audit: '/bin/sh -c ''if test -e $kubeletsvc; then stat -c permissions=%a $kubeletsvc; fi'' '
|
||||
audit: '/bin/sh -c "if test -e $kubeletsvc; then stat -c permissions=%a $kubeletsvc; else echo \"File not found\"; fi"'
|
||||
tests:
|
||||
bin_op: or
|
||||
test_items:
|
||||
- flag: "permissions"
|
||||
compare:
|
||||
op: bitmask
|
||||
value: "600"
|
||||
- flag: "File not found"
|
||||
remediation: |
|
||||
Run the below command (based on the file location on your system) on the each worker node.
|
||||
For example, chmod 600 $kubeletsvc
|
||||
Not Applicable - Clusters provisioned by RKE doesn’t require or maintain a configuration file for the kubelet service.
|
||||
Not Applicable - Clusters provisioned by RKE doesn't require or maintain a configuration file for the kubelet service.
|
||||
All configuration is passed in as arguments at container run time.
|
||||
scored: true
|
||||
|
||||
- id: 4.1.2
|
||||
text: "Ensure that the kubelet service file ownership is set to root:root (Automated)"
|
||||
audit: '/bin/sh -c "if test -e $kubeletsvc; then stat -c %U:%G $kubeletsvc; else echo \"File not found\"; fi"'
|
||||
audit: '/bin/sh -c "if test -e $kubeletsvc; then stat -c ownership=%U:%G $kubeletsvc; else echo \"File not found\"; fi"'
|
||||
tests:
|
||||
bin_op: or
|
||||
test_items:
|
||||
- flag: root:root
|
||||
- flag: "ownership"
|
||||
compare:
|
||||
op: eq
|
||||
value: "root:root"
|
||||
- flag: "File not found"
|
||||
remediation: |
|
||||
Run the below command (based on the file location on your system) on the each worker node.
|
||||
For example,
|
||||
chown root:root $kubeletsvc
|
||||
Not Applicable - Clusters provisioned by RKE doesn’t require or maintain a configuration file for the kubelet service.
|
||||
Not Applicable - Clusters provisioned by RKE does not require or maintain a configuration file for the kubelet service.
|
||||
All configuration is passed in as arguments at container run time.
|
||||
scored: true
|
||||
|
||||
- id: 4.1.3
|
||||
text: "If proxy kubeconfig file exists ensure permissions are set to 600 or more restrictive (Automated)"
|
||||
audit: '/bin/sh -c ''if test -e $proxykubeconfig; then stat -c permissions=%a $proxykubeconfig; fi'' '
|
||||
@@ -52,6 +61,7 @@ groups:
|
||||
For example,
|
||||
chmod 600 $proxykubeconfig
|
||||
scored: true
|
||||
|
||||
- id: 4.1.4
|
||||
text: "If proxy kubeconfig file exists ensure ownership is set to root:root (Automated)"
|
||||
audit: '/bin/sh -c ''if test -e $proxykubeconfig; then stat -c %U:%G $proxykubeconfig; fi'' '
|
||||
@@ -63,6 +73,7 @@ groups:
|
||||
Run the below command (based on the file location on your system) on the each worker node.
|
||||
For example, chown root:root $proxykubeconfig
|
||||
scored: true
|
||||
|
||||
- id: 4.1.5
|
||||
text: "Ensure that the --kubeconfig kubelet.conf file permissions are set to 600 or more restrictive (Automated)"
|
||||
audit: '/bin/sh -c ''if test -e $kubeletkubeconfig; then stat -c permissions=%a $kubeletkubeconfig; fi'' '
|
||||
@@ -77,6 +88,7 @@ groups:
|
||||
For example,
|
||||
chmod 600 $kubeletkubeconfig
|
||||
scored: true
|
||||
|
||||
- id: 4.1.6
|
||||
text: "Ensure that the --kubeconfig kubelet.conf file ownership is set to root:root (Automated)"
|
||||
audit: '/bin/sh -c ''if test -e $kubeletkubeconfig; then stat -c %U:%G $kubeletkubeconfig; fi'' '
|
||||
@@ -88,9 +100,15 @@ groups:
|
||||
For example,
|
||||
chown root:root $kubeletkubeconfig
|
||||
scored: true
|
||||
|
||||
- id: 4.1.7
|
||||
text: "Ensure that the certificate authorities file permissions are set to 600 or more restrictive (Automated)"
|
||||
audit: "stat -c permissions=%a /node/etc/kubernetes/ssl/kube-ca.pem"
|
||||
audit: |
|
||||
if [ -f /var/lib/rancher/rke2/agent/client-ca.crt ]; then
|
||||
stat -c permissions=%a /var/lib/rancher/rke2/agent/client-ca.crt
|
||||
else
|
||||
echo "NA"
|
||||
fi
|
||||
tests:
|
||||
test_items:
|
||||
- flag: "permissions"
|
||||
@@ -101,9 +119,15 @@ groups:
|
||||
Run the following command to modify the file permissions of the
|
||||
--client-ca-file chmod 600 <filename>
|
||||
scored: true
|
||||
|
||||
- id: 4.1.8
|
||||
text: "Ensure that the client certificate authorities file ownership is set to root:root (Automated)"
|
||||
audit: "stat -c %U:%G /node/etc/kubernetes/ssl/kube-ca.pem"
|
||||
audit: |
|
||||
if [ -f /var/lib/rancher/rke2/agent/client-ca.crt ]; then
|
||||
stat -c %U:%G /var/lib/rancher/rke2/agent/client-ca.crt
|
||||
else
|
||||
echo "NA"
|
||||
fi
|
||||
tests:
|
||||
test_items:
|
||||
- flag: root:root
|
||||
@@ -114,6 +138,7 @@ groups:
|
||||
Run the following command to modify the ownership of the --client-ca-file.
|
||||
chown root:root <filename>
|
||||
scored: true
|
||||
|
||||
- id: 4.1.9
|
||||
text: "If the kubelet config.yaml configuration file is being used validate permissions set to 600 or more restrictive (Automated)"
|
||||
audit: |
|
||||
@@ -136,6 +161,7 @@ groups:
|
||||
Not Applicable - Clusters provisioned by RKE do not require or maintain a configuration file for the kubelet.
|
||||
All configuration is passed in as arguments at container run time.
|
||||
scored: true
|
||||
|
||||
- id: 4.1.10
|
||||
text: "If the kubelet config.yaml configuration file is being used validate file ownership is set to root:root (Manual)"
|
||||
audit: |
|
||||
@@ -152,19 +178,24 @@ groups:
|
||||
remediation: |
|
||||
Run the following command (using the config file location identified in the Audit step)
|
||||
chown root:root $kubeletconf
|
||||
Not Applicable - Clusters provisioned by RKE doesn’t require or maintain a configuration file for the kubelet.
|
||||
Not Applicable - Clusters provisioned by RKE does not require or maintain a configuration file for the kubelet.
|
||||
All configuration is passed in as arguments at container run time.
|
||||
scored: false
|
||||
|
||||
- id: 4.2
|
||||
text: "Kubelet"
|
||||
checks:
|
||||
- id: 4.2.1
|
||||
text: "Ensure that the --anonymous-auth argument is set to false (Automated)"
|
||||
audit: "/bin/ps -fC $kubeletbin"
|
||||
audit_config: "/bin/sh -c 'if test -e $kubeletconf; then /bin/cat $kubeletconf; fi' "
|
||||
audit: "/bin/sh -c 'cat /var/lib/rancher/rke2/agent/etc/kubelet.conf.d/*.conf 2>/dev/null' "
|
||||
audit_config: >
|
||||
/bin/sh -c '
|
||||
if ls /var/lib/rancher/rke2/agent/etc/kubelet.conf.d/*.conf 1>/dev/null 2>&1; then
|
||||
/bin/cat /var/lib/rancher/rke2/agent/etc/kubelet.conf.d/*.conf
|
||||
fi'
|
||||
tests:
|
||||
test_items:
|
||||
- flag: "--anonymous-auth"
|
||||
- flag: "anonymous-auth"
|
||||
path: '{.authentication.anonymous.enabled}'
|
||||
compare:
|
||||
op: eq
|
||||
@@ -180,13 +211,18 @@ groups:
|
||||
systemctl daemon-reload
|
||||
systemctl restart kubelet.service
|
||||
scored: true
|
||||
|
||||
- id: 4.2.2
|
||||
text: "Ensure that the --authorization-mode argument is not set to AlwaysAllow (Automated)"
|
||||
audit: "/bin/ps -fC $kubeletbin"
|
||||
audit_config: "/bin/sh -c 'if test -e $kubeletconf; then /bin/cat $kubeletconf; fi' "
|
||||
audit: "/bin/sh -c 'cat /var/lib/rancher/rke2/agent/etc/kubelet.conf.d/*.conf 2>/dev/null' "
|
||||
audit_config: >
|
||||
/bin/sh -c '
|
||||
if ls /var/lib/rancher/rke2/agent/etc/kubelet.conf.d/*.conf 1>/dev/null 2>&1; then
|
||||
/bin/cat /var/lib/rancher/rke2/agent/etc/kubelet.conf.d/*.conf
|
||||
fi'
|
||||
tests:
|
||||
test_items:
|
||||
- flag: --authorization-mode
|
||||
- flag: authorization-mode
|
||||
path: '{.authorization.mode}'
|
||||
compare:
|
||||
op: nothave
|
||||
@@ -201,14 +237,23 @@ groups:
|
||||
systemctl daemon-reload
|
||||
systemctl restart kubelet.service
|
||||
scored: true
|
||||
|
||||
- id: 4.2.3
|
||||
text: "Ensure that the --client-ca-file argument is set as appropriate (Automated)"
|
||||
audit: "/bin/ps -fC $kubeletbin"
|
||||
audit_config: "/bin/sh -c 'if test -e $kubeletconf; then /bin/cat $kubeletconf; fi' "
|
||||
audit: |
|
||||
/bin/sh -c '
|
||||
if grep -q "clientCAFile: /var/lib/rancher/rke2/agent/client-ca.crt" \
|
||||
/var/lib/rancher/rke2/agent/etc/kubelet.conf.d/* 2>/dev/null; then
|
||||
echo "clientCAFile: /var/lib/rancher/rke2/agent/client-ca.crt"
|
||||
else
|
||||
echo "clientCAFile=NA"
|
||||
fi'
|
||||
tests:
|
||||
test_items:
|
||||
- flag: --client-ca-file
|
||||
path: '{.authentication.x509.clientCAFile}'
|
||||
- flag: "clientCAFile: /var/lib/rancher/rke2/agent/client-ca.crt"
|
||||
compare:
|
||||
op: eq
|
||||
value: "clientCAFile: /var/lib/rancher/rke2/agent/client-ca.crt"
|
||||
remediation: |
|
||||
If using a Kubelet config file, edit the file to set `authentication.x509.clientCAFile` to
|
||||
the location of the client CA file.
|
||||
@@ -220,6 +265,7 @@ groups:
|
||||
systemctl daemon-reload
|
||||
systemctl restart kubelet.service
|
||||
scored: true
|
||||
|
||||
- id: 4.2.4
|
||||
text: "Verify that the --read-only-port argument is set to 0 (Automated)"
|
||||
audit: "/bin/ps -fC $kubeletbin"
|
||||
@@ -245,6 +291,7 @@ groups:
|
||||
systemctl daemon-reload
|
||||
systemctl restart kubelet.service
|
||||
scored: true
|
||||
|
||||
- id: 4.2.5
|
||||
text: "Ensure that the --streaming-connection-idle-timeout argument is not set to 0 (Manual)"
|
||||
audit: "/bin/ps -fC $kubeletbin"
|
||||
@@ -271,6 +318,7 @@ groups:
|
||||
systemctl daemon-reload
|
||||
systemctl restart kubelet.service
|
||||
scored: false
|
||||
|
||||
- id: 4.2.6
|
||||
text: "Ensure that the --make-iptables-util-chains argument is set to true (Automated)"
|
||||
audit: "/bin/ps -fC $kubeletbin"
|
||||
@@ -296,6 +344,7 @@ groups:
|
||||
systemctl daemon-reload
|
||||
systemctl restart kubelet.service
|
||||
scored: true
|
||||
|
||||
- id: 4.2.7
|
||||
text: "Ensure that the --hostname-override argument is not set (Manual)"
|
||||
audit: "/bin/ps -fC $kubeletbin "
|
||||
@@ -312,6 +361,7 @@ groups:
|
||||
systemctl restart kubelet.service
|
||||
Not Applicable - Clusters provisioned by RKE set the --hostname-override to avoid any hostname configuration errors
|
||||
scored: false
|
||||
|
||||
- id: 4.2.8
|
||||
text: "Ensure that the eventRecordQPS argument is set to a level which ensures appropriate event capture (Manual)"
|
||||
audit: "/bin/ps -fC $kubeletbin"
|
||||
@@ -336,6 +386,7 @@ groups:
|
||||
systemctl daemon-reload
|
||||
systemctl restart kubelet.service
|
||||
scored: false
|
||||
|
||||
- id: 4.2.9
|
||||
text: "Ensure that the --tls-cert-file and --tls-private-key-file arguments are set as appropriate (Manual)"
|
||||
audit: "/bin/ps -fC $kubeletbin"
|
||||
@@ -360,6 +411,7 @@ groups:
|
||||
systemctl restart kubelet.service
|
||||
Permissive - When generating serving certificates, functionality could break in conjunction with hostname overrides which are required for certain cloud providers.
|
||||
scored: false
|
||||
|
||||
- id: 4.2.10
|
||||
text: "Ensure that the --rotate-certificates argument is not set to false (Automated)"
|
||||
audit: "/bin/ps -fC $kubeletbin"
|
||||
@@ -386,6 +438,7 @@ groups:
|
||||
systemctl daemon-reload
|
||||
systemctl restart kubelet.service
|
||||
scored: true
|
||||
|
||||
- id: 4.2.11
|
||||
text: "Verify that the RotateKubeletServerCertificate argument is set to true (Manual)"
|
||||
audit: "/bin/ps -fC $kubeletbin"
|
||||
@@ -410,6 +463,7 @@ groups:
|
||||
systemctl restart kubelet.service
|
||||
Not Applicable - Clusters provisioned by RKE handles certificate rotation directly through RKE.
|
||||
scored: false
|
||||
|
||||
- id: 4.2.12
|
||||
text: "Ensure that the Kubelet only makes use of Strong Cryptographic Ciphers (Automated)"
|
||||
audit: "/bin/ps -fC $kubeletbin"
|
||||
@@ -433,6 +487,7 @@ groups:
|
||||
systemctl daemon-reload
|
||||
systemctl restart kubelet.service
|
||||
scored: true
|
||||
|
||||
- id: 4.2.13
|
||||
text: "Ensure that a limit is set on pod PIDs (Manual)"
|
||||
audit: "/bin/ps -fC $kubeletbin"
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
---
|
||||
controls:
|
||||
version: "cis-1.8"
|
||||
version: "rke2-cis-1.8"
|
||||
id: 5
|
||||
text: "Kubernetes Policies"
|
||||
type: "policies"
|
||||
|
||||
@@ -247,7 +247,9 @@ func TestMapToCISVersion(t *testing.T) {
|
||||
{kubeVersion: "1.29", succeed: true, exp: "cis-1.11"},
|
||||
{kubeVersion: "1.30", succeed: true, exp: "cis-1.11"},
|
||||
{kubeVersion: "1.31", succeed: true, exp: "cis-1.11"},
|
||||
{kubeVersion: "1.32", succeed: true, exp: "cis-1.11"},
|
||||
{kubeVersion: "1.32", succeed: true, exp: "cis-1.12"},
|
||||
{kubeVersion: "1.33", succeed: true, exp: "cis-1.12"},
|
||||
{kubeVersion: "1.34", succeed: true, exp: "cis-1.12"},
|
||||
{kubeVersion: "gke-1.2.0", succeed: true, exp: "gke-1.2.0"},
|
||||
{kubeVersion: "ocp-3.10", succeed: true, exp: "rh-0.7"},
|
||||
{kubeVersion: "ocp-3.11", succeed: true, exp: "rh-0.7"},
|
||||
|
||||
11
cmd/util.go
11
cmd/util.go
@@ -522,11 +522,16 @@ func getPlatformBenchmarkVersion(platform Platform) string {
|
||||
glog.V(3).Infof("getPlatformBenchmarkVersion platform: %s", platform)
|
||||
switch platform.Name {
|
||||
case "eks":
|
||||
oldEKSVersions := []string{"1.15", "1.16", "1.17", "1.18", "1.19", "1.20", "1.21", "1.22", "1.23", "1.24", "1.25", "1.26", "1.27", "1.28"}
|
||||
if slices.Contains(oldEKSVersions, platform.Version) {
|
||||
switch platform.Version {
|
||||
case "1.15", "1.16", "1.17", "1.18", "1.19":
|
||||
return "eks-1.0.1"
|
||||
case "1.29", "1.30", "1.31":
|
||||
return "eks-1.7.0"
|
||||
case "1.32", "1.33", "1.34":
|
||||
return "eks-1.8.0"
|
||||
default:
|
||||
return "eks-1.5.0"
|
||||
}
|
||||
return "eks-1.7.0"
|
||||
case "aks":
|
||||
return "aks-1.7"
|
||||
case "gke":
|
||||
|
||||
@@ -657,6 +657,27 @@ func Test_getPlatformBenchmarkVersion(t *testing.T) {
|
||||
},
|
||||
want: "eks-1.7.0",
|
||||
},
|
||||
{
|
||||
name: "eks 1.29",
|
||||
args: args{
|
||||
platform: Platform{Name: "eks", Version: "1.29"},
|
||||
},
|
||||
want: "eks-1.7.0",
|
||||
},
|
||||
{
|
||||
name: "eks 1.30",
|
||||
args: args{
|
||||
platform: Platform{Name: "eks", Version: "1.30"},
|
||||
},
|
||||
want: "eks-1.7.0",
|
||||
},
|
||||
{
|
||||
name: "eks 1.32",
|
||||
args: args{
|
||||
platform: Platform{Name: "eks", Version: "1.32"},
|
||||
},
|
||||
want: "eks-1.8.0",
|
||||
},
|
||||
{
|
||||
name: "eks 1.24",
|
||||
args: args{
|
||||
|
||||
@@ -25,6 +25,7 @@ The following table shows the valid targets based on the CIS Benchmark version.
|
||||
| cis-1.9 | master, controlplane, node, etcd, policies |
|
||||
| cis-1.10 | master, controlplane, node, etcd, policies |
|
||||
| cis-1.11 | master, controlplane, node, etcd, policies |
|
||||
| cis-1.12 | master, controlplane, node, etcd, policies |
|
||||
| gke-1.0 | master, controlplane, node, etcd, policies, managedservices |
|
||||
| gke-1.2.0 | controlplane, node, policies, managedservices |
|
||||
| gke-1.6.0 | controlplane, node, policies, managedservices |
|
||||
|
||||
@@ -19,7 +19,8 @@ Other benchmarks are defined by hardening guides.
|
||||
| CIS | [1.8](https://workbench.cisecurity.org/benchmarks/12958) | cis-1.8 | 1.26 |
|
||||
| CIS | [1.9](https://workbench.cisecurity.org/benchmarks/16828) | cis-1.9 | 1.27 |
|
||||
| CIS | [1.10](https://workbench.cisecurity.org/benchmarks/17568) | cis-1.10 | 1.28 |
|
||||
| CIS | [1.11](https://workbench.cisecurity.org/benchmarks/21709) | cis-1.11 | 1.29-1.32 |
|
||||
| CIS | [1.11](https://workbench.cisecurity.org/benchmarks/21709) | cis-1.11 | 1.29-1.31 |
|
||||
| CIS | [1.12](https://workbench.cisecurity.org/benchmarks/22107) | cis-1.12 | 1.32-1.34 |
|
||||
| CIS | [GKE 1.0.0](https://workbench.cisecurity.org/benchmarks/4536) | gke-1.0 | GKE |
|
||||
| CIS | [GKE 1.2.0](https://workbench.cisecurity.org/benchmarks/7534) | gke-1.2.0 | GKE |
|
||||
| CIS | [GKE 1.6.0](https://workbench.cisecurity.org/benchmarks/16093) | gke-1.6.0 | GKE |
|
||||
|
||||
68
go.mod
68
go.mod
@@ -1,49 +1,49 @@
|
||||
module github.com/aquasecurity/kube-bench
|
||||
|
||||
go 1.24.4
|
||||
go 1.25.0
|
||||
|
||||
require (
|
||||
github.com/aws/aws-sdk-go-v2 v1.39.6
|
||||
github.com/aws/aws-sdk-go-v2/config v1.31.17
|
||||
github.com/aws/aws-sdk-go-v2/service/securityhub v1.65.2
|
||||
github.com/aws/aws-sdk-go-v2 v1.41.1
|
||||
github.com/aws/aws-sdk-go-v2/config v1.32.7
|
||||
github.com/aws/aws-sdk-go-v2/service/securityhub v1.67.3
|
||||
github.com/fatih/color v1.18.0
|
||||
github.com/golang/glog v1.2.5
|
||||
github.com/magiconair/properties v1.8.10
|
||||
github.com/onsi/ginkgo v1.16.5
|
||||
github.com/pkg/errors v0.9.1
|
||||
github.com/spf13/cobra v1.10.1
|
||||
github.com/spf13/cobra v1.10.2
|
||||
github.com/spf13/viper v1.21.0
|
||||
github.com/stretchr/testify v1.11.1
|
||||
golang.org/x/exp v0.0.0-20250718183923-645b1fa84792
|
||||
gopkg.in/yaml.v2 v2.4.0
|
||||
gorm.io/driver/postgres v1.6.0
|
||||
gorm.io/gorm v1.31.0
|
||||
k8s.io/apimachinery v0.34.1
|
||||
k8s.io/client-go v0.34.1
|
||||
gorm.io/gorm v1.31.1
|
||||
k8s.io/apimachinery v0.35.1
|
||||
k8s.io/client-go v0.35.1
|
||||
)
|
||||
|
||||
require (
|
||||
github.com/aws/aws-sdk-go-v2/credentials v1.18.21 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.18.13 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.13 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.13 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/credentials v1.19.7 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.18.17 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.17 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.17 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/internal/ini v1.8.4 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.13.3 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.13 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/sso v1.30.1 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.35.5 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/sts v1.39.1 // indirect
|
||||
github.com/aws/smithy-go v1.23.2 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.13.4 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.17 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/signin v1.0.5 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/sso v1.30.9 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.35.13 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/sts v1.41.6 // indirect
|
||||
github.com/aws/smithy-go v1.24.0 // indirect
|
||||
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect
|
||||
github.com/emicklei/go-restful/v3 v3.12.2 // indirect
|
||||
github.com/fsnotify/fsnotify v1.9.0 // indirect
|
||||
github.com/fxamacker/cbor/v2 v2.9.0 // indirect
|
||||
github.com/go-logr/logr v1.4.2 // indirect
|
||||
github.com/go-logr/logr v1.4.3 // indirect
|
||||
github.com/go-openapi/jsonpointer v0.21.0 // indirect
|
||||
github.com/go-openapi/jsonreference v0.20.2 // indirect
|
||||
github.com/go-openapi/swag v0.23.0 // indirect
|
||||
github.com/go-viper/mapstructure/v2 v2.4.0 // indirect
|
||||
github.com/gogo/protobuf v1.3.2 // indirect
|
||||
github.com/google/gnostic-models v0.7.0 // indirect
|
||||
github.com/google/uuid v1.6.0 // indirect
|
||||
github.com/inconshreveable/mousetrap v1.1.0 // indirect
|
||||
@@ -71,25 +71,25 @@ require (
|
||||
github.com/stretchr/objx v0.5.2 // indirect
|
||||
github.com/subosito/gotenv v1.6.0 // indirect
|
||||
github.com/x448/float16 v0.8.4 // indirect
|
||||
go.yaml.in/yaml/v2 v2.4.2 // indirect
|
||||
go.yaml.in/yaml/v2 v2.4.3 // indirect
|
||||
go.yaml.in/yaml/v3 v3.0.4 // indirect
|
||||
golang.org/x/crypto v0.36.0 // indirect
|
||||
golang.org/x/net v0.38.0 // indirect
|
||||
golang.org/x/oauth2 v0.27.0 // indirect
|
||||
golang.org/x/sync v0.16.0 // indirect
|
||||
golang.org/x/sys v0.31.0 // indirect
|
||||
golang.org/x/term v0.30.0 // indirect
|
||||
golang.org/x/text v0.28.0 // indirect
|
||||
golang.org/x/crypto v0.45.0 // indirect
|
||||
golang.org/x/net v0.47.0 // indirect
|
||||
golang.org/x/oauth2 v0.30.0 // indirect
|
||||
golang.org/x/sync v0.18.0 // indirect
|
||||
golang.org/x/sys v0.38.0 // indirect
|
||||
golang.org/x/term v0.37.0 // indirect
|
||||
golang.org/x/text v0.31.0 // indirect
|
||||
golang.org/x/time v0.9.0 // indirect
|
||||
google.golang.org/protobuf v1.36.5 // indirect
|
||||
gopkg.in/evanphx/json-patch.v4 v4.12.0 // indirect
|
||||
google.golang.org/protobuf v1.36.8 // indirect
|
||||
gopkg.in/evanphx/json-patch.v4 v4.13.0 // indirect
|
||||
gopkg.in/inf.v0 v0.9.1 // indirect
|
||||
gopkg.in/yaml.v3 v3.0.1 // indirect
|
||||
k8s.io/api v0.34.1 // indirect
|
||||
k8s.io/api v0.35.1 // indirect
|
||||
k8s.io/klog/v2 v2.130.1 // indirect
|
||||
k8s.io/kube-openapi v0.0.0-20250710124328-f3f2b991d03b // indirect
|
||||
k8s.io/utils v0.0.0-20250604170112-4c0f3b243397 // indirect
|
||||
sigs.k8s.io/json v0.0.0-20241014173422-cfa47c3a1cc8 // indirect
|
||||
k8s.io/kube-openapi v0.0.0-20250910181357-589584f1c912 // indirect
|
||||
k8s.io/utils v0.0.0-20251002143259-bc988d571ff4 // indirect
|
||||
sigs.k8s.io/json v0.0.0-20250730193827-2d320260d730 // indirect
|
||||
sigs.k8s.io/randfill v1.0.0 // indirect
|
||||
sigs.k8s.io/structured-merge-diff/v6 v6.3.0 // indirect
|
||||
sigs.k8s.io/yaml v1.6.0 // indirect
|
||||
|
||||
164
go.sum
164
go.sum
@@ -1,31 +1,35 @@
|
||||
github.com/aws/aws-sdk-go-v2 v1.39.6 h1:2JrPCVgWJm7bm83BDwY5z8ietmeJUbh3O2ACnn+Xsqk=
|
||||
github.com/aws/aws-sdk-go-v2 v1.39.6/go.mod h1:c9pm7VwuW0UPxAEYGyTmyurVcNrbF6Rt/wixFqDhcjE=
|
||||
github.com/aws/aws-sdk-go-v2/config v1.31.17 h1:QFl8lL6RgakNK86vusim14P2k8BFSxjvUkcWLDjgz9Y=
|
||||
github.com/aws/aws-sdk-go-v2/config v1.31.17/go.mod h1:V8P7ILjp/Uef/aX8TjGk6OHZN6IKPM5YW6S78QnRD5c=
|
||||
github.com/aws/aws-sdk-go-v2/credentials v1.18.21 h1:56HGpsgnmD+2/KpG0ikvvR8+3v3COCwaF4r+oWwOeNA=
|
||||
github.com/aws/aws-sdk-go-v2/credentials v1.18.21/go.mod h1:3YELwedmQbw7cXNaII2Wywd+YY58AmLPwX4LzARgmmA=
|
||||
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.18.13 h1:T1brd5dR3/fzNFAQch/iBKeX07/ffu/cLu+q+RuzEWk=
|
||||
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.18.13/go.mod h1:Peg/GBAQ6JDt+RoBf4meB1wylmAipb7Kg2ZFakZTlwk=
|
||||
github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.13 h1:a+8/MLcWlIxo1lF9xaGt3J/u3yOZx+CdSveSNwjhD40=
|
||||
github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.13/go.mod h1:oGnKwIYZ4XttyU2JWxFrwvhF6YKiK/9/wmE3v3Iu9K8=
|
||||
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.13 h1:HBSI2kDkMdWz4ZM7FjwE7e/pWDEZ+nR95x8Ztet1ooY=
|
||||
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.13/go.mod h1:YE94ZoDArI7awZqJzBAZ3PDD2zSfuP7w6P2knOzIn8M=
|
||||
github.com/Masterminds/semver/v3 v3.4.0 h1:Zog+i5UMtVoCU8oKka5P7i9q9HgrJeGzI9SA1Xbatp0=
|
||||
github.com/Masterminds/semver/v3 v3.4.0/go.mod h1:4V+yj/TJE1HU9XfppCwVMZq3I84lprf4nC11bSS5beM=
|
||||
github.com/aws/aws-sdk-go-v2 v1.41.1 h1:ABlyEARCDLN034NhxlRUSZr4l71mh+T5KAeGh6cerhU=
|
||||
github.com/aws/aws-sdk-go-v2 v1.41.1/go.mod h1:MayyLB8y+buD9hZqkCW3kX1AKq07Y5pXxtgB+rRFhz0=
|
||||
github.com/aws/aws-sdk-go-v2/config v1.32.7 h1:vxUyWGUwmkQ2g19n7JY/9YL8MfAIl7bTesIUykECXmY=
|
||||
github.com/aws/aws-sdk-go-v2/config v1.32.7/go.mod h1:2/Qm5vKUU/r7Y+zUk/Ptt2MDAEKAfUtKc1+3U1Mo3oY=
|
||||
github.com/aws/aws-sdk-go-v2/credentials v1.19.7 h1:tHK47VqqtJxOymRrNtUXN5SP/zUTvZKeLx4tH6PGQc8=
|
||||
github.com/aws/aws-sdk-go-v2/credentials v1.19.7/go.mod h1:qOZk8sPDrxhf+4Wf4oT2urYJrYt3RejHSzgAquYeppw=
|
||||
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.18.17 h1:I0GyV8wiYrP8XpA70g1HBcQO1JlQxCMTW9npl5UbDHY=
|
||||
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.18.17/go.mod h1:tyw7BOl5bBe/oqvoIeECFJjMdzXoa/dfVz3QQ5lgHGA=
|
||||
github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.17 h1:xOLELNKGp2vsiteLsvLPwxC+mYmO6OZ8PYgiuPJzF8U=
|
||||
github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.17/go.mod h1:5M5CI3D12dNOtH3/mk6minaRwI2/37ifCURZISxA/IQ=
|
||||
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.17 h1:WWLqlh79iO48yLkj1v3ISRNiv+3KdQoZ6JWyfcsyQik=
|
||||
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.17/go.mod h1:EhG22vHRrvF8oXSTYStZhJc1aUgKtnJe+aOiFEV90cM=
|
||||
github.com/aws/aws-sdk-go-v2/internal/ini v1.8.4 h1:WKuaxf++XKWlHWu9ECbMlha8WOEGm0OUEZqm4K/Gcfk=
|
||||
github.com/aws/aws-sdk-go-v2/internal/ini v1.8.4/go.mod h1:ZWy7j6v1vWGmPReu0iSGvRiise4YI5SkR3OHKTZ6Wuc=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.13.3 h1:x2Ibm/Af8Fi+BH+Hsn9TXGdT+hKbDd5XOTZxTMxDk7o=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.13.3/go.mod h1:IW1jwyrQgMdhisceG8fQLmQIydcT/jWY21rFhzgaKwo=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.13 h1:kDqdFvMY4AtKoACfzIGD8A0+hbT41KTKF//gq7jITfM=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.13/go.mod h1:lmKuogqSU3HzQCwZ9ZtcqOc5XGMqtDK7OIc2+DxiUEg=
|
||||
github.com/aws/aws-sdk-go-v2/service/securityhub v1.65.2 h1:JhuxQSODGqdC3H0/K/4mtHlGiLdZyA8kANEA/Aexu/k=
|
||||
github.com/aws/aws-sdk-go-v2/service/securityhub v1.65.2/go.mod h1:QaXNTOs7OkNR7y9uFWajUymXwUh28Q7cTz3tqWiE6sc=
|
||||
github.com/aws/aws-sdk-go-v2/service/sso v1.30.1 h1:0JPwLz1J+5lEOfy/g0SURC9cxhbQ1lIMHMa+AHZSzz0=
|
||||
github.com/aws/aws-sdk-go-v2/service/sso v1.30.1/go.mod h1:fKvyjJcz63iL/ftA6RaM8sRCtN4r4zl4tjL3qw5ec7k=
|
||||
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.35.5 h1:OWs0/j2UYR5LOGi88sD5/lhN6TDLG6SfA7CqsQO9zF0=
|
||||
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.35.5/go.mod h1:klO+ejMvYsB4QATfEOIXk8WAEwN4N0aBfJpvC+5SZBo=
|
||||
github.com/aws/aws-sdk-go-v2/service/sts v1.39.1 h1:mLlUgHn02ue8whiR4BmxxGJLR2gwU6s6ZzJ5wDamBUs=
|
||||
github.com/aws/aws-sdk-go-v2/service/sts v1.39.1/go.mod h1:E19xDjpzPZC7LS2knI9E6BaRFDK43Eul7vd6rSq2HWk=
|
||||
github.com/aws/smithy-go v1.23.2 h1:Crv0eatJUQhaManss33hS5r40CG3ZFH+21XSkqMrIUM=
|
||||
github.com/aws/smithy-go v1.23.2/go.mod h1:LEj2LM3rBRQJxPZTB4KuzZkaZYnZPnvgIhb4pu07mx0=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.13.4 h1:0ryTNEdJbzUCEWkVXEXoqlXV72J5keC1GvILMOuD00E=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.13.4/go.mod h1:HQ4qwNZh32C3CBeO6iJLQlgtMzqeG17ziAA/3KDJFow=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.17 h1:RuNSMoozM8oXlgLG/n6WLaFGoea7/CddrCfIiSA+xdY=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.17/go.mod h1:F2xxQ9TZz5gDWsclCtPQscGpP0VUOc8RqgFM3vDENmU=
|
||||
github.com/aws/aws-sdk-go-v2/service/securityhub v1.67.3 h1:FEs3IkfJWp+Sz3ZY6sAxmebBF0lr1wBcTWkuFW1OFJg=
|
||||
github.com/aws/aws-sdk-go-v2/service/securityhub v1.67.3/go.mod h1:3wnS16Wip5w0uh9kVFBhuMFmdkrMBr8Fc96kAY5h13o=
|
||||
github.com/aws/aws-sdk-go-v2/service/signin v1.0.5 h1:VrhDvQib/i0lxvr3zqlUwLwJP4fpmpyD9wYG1vfSu+Y=
|
||||
github.com/aws/aws-sdk-go-v2/service/signin v1.0.5/go.mod h1:k029+U8SY30/3/ras4G/Fnv/b88N4mAfliNn08Dem4M=
|
||||
github.com/aws/aws-sdk-go-v2/service/sso v1.30.9 h1:v6EiMvhEYBoHABfbGB4alOYmCIrcgyPPiBE1wZAEbqk=
|
||||
github.com/aws/aws-sdk-go-v2/service/sso v1.30.9/go.mod h1:yifAsgBxgJWn3ggx70A3urX2AN49Y5sJTD1UQFlfqBw=
|
||||
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.35.13 h1:gd84Omyu9JLriJVCbGApcLzVR3XtmC4ZDPcAI6Ftvds=
|
||||
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.35.13/go.mod h1:sTGThjphYE4Ohw8vJiRStAcu3rbjtXRsdNB0TvZ5wwo=
|
||||
github.com/aws/aws-sdk-go-v2/service/sts v1.41.6 h1:5fFjR/ToSOzB2OQ/XqWpZBmNvmP/pJ1jOWYlFDJTjRQ=
|
||||
github.com/aws/aws-sdk-go-v2/service/sts v1.41.6/go.mod h1:qgFDZQSD/Kys7nJnVqYlWKnh0SSdMjAi0uSwON4wgYQ=
|
||||
github.com/aws/smithy-go v1.24.0 h1:LpilSUItNPFr1eY85RYgTIg5eIEPtvFbskaFcmmIUnk=
|
||||
github.com/aws/smithy-go v1.24.0/go.mod h1:LEj2LM3rBRQJxPZTB4KuzZkaZYnZPnvgIhb4pu07mx0=
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.6/go.mod h1:oOW0eioCTA6cOiMLiUPZOpcVxMig6NIQQ7OS05n1F4g=
|
||||
github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
|
||||
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
@@ -44,8 +48,8 @@ github.com/fsnotify/fsnotify v1.9.0 h1:2Ml+OJNzbYCTzsxtv8vKSFD9PbJjmhYF14k/jKC7S
|
||||
github.com/fsnotify/fsnotify v1.9.0/go.mod h1:8jBTzvmWwFyi3Pb8djgCCO5IBqzKJ/Jwo8TRcHyHii0=
|
||||
github.com/fxamacker/cbor/v2 v2.9.0 h1:NpKPmjDBgUfBms6tr6JZkTHtfFGcMKsw3eGcmD/sapM=
|
||||
github.com/fxamacker/cbor/v2 v2.9.0/go.mod h1:vM4b+DJCtHn+zz7h3FFp/hDAI9WNWCsZj23V5ytsSxQ=
|
||||
github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY=
|
||||
github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY=
|
||||
github.com/go-logr/logr v1.4.3 h1:CjnDlHq8ikf6E492q6eKboGOC0T8CDaOvkHCIg8idEI=
|
||||
github.com/go-logr/logr v1.4.3/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY=
|
||||
github.com/go-openapi/jsonpointer v0.19.6/go.mod h1:osyAmYz/mB/C3I+WsTTSgw1ONzaLJoLCyoi6/zppojs=
|
||||
github.com/go-openapi/jsonpointer v0.21.0 h1:YgdVicSA9vH5RiHs9TZW5oyafXZFc6+2Vc1rr/O9oNQ=
|
||||
github.com/go-openapi/jsonpointer v0.21.0/go.mod h1:IUyH9l/+uyhIYQ/PXVA41Rexl+kOkAPDdXEYns6fzUY=
|
||||
@@ -60,8 +64,6 @@ github.com/go-task/slim-sprig/v3 v3.0.0 h1:sUs3vkvUymDpBKi3qH1YSqBQk9+9D/8M2mN1v
|
||||
github.com/go-task/slim-sprig/v3 v3.0.0/go.mod h1:W848ghGpv3Qj3dhTPRyJypKRiqCdHZiAzKg9hl15HA8=
|
||||
github.com/go-viper/mapstructure/v2 v2.4.0 h1:EBsztssimR/CONLSZZ04E8qAkxNYq4Qp9LvH92wZUgs=
|
||||
github.com/go-viper/mapstructure/v2 v2.4.0/go.mod h1:oJDH3BJKyqBA2TXFhDsKDGDTlndYOZ6rGS0BRZIxGhM=
|
||||
github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q=
|
||||
github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q=
|
||||
github.com/golang/glog v1.2.5 h1:DrW6hGnjIhtvhOIiAKT6Psh/Kd/ldepEa81DKeiRJ5I=
|
||||
github.com/golang/glog v1.2.5/go.mod h1:6AhwSGph0fcJtXVM/PEHPqZlFeoLxhs7/t5UDAwmO+w=
|
||||
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||
@@ -79,8 +81,8 @@ github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/
|
||||
github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8=
|
||||
github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU=
|
||||
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
|
||||
github.com/google/pprof v0.0.0-20241029153458-d1b30febd7db h1:097atOisP2aRj7vFgYQBbFN4U4JNXUNYpxael3UzMyo=
|
||||
github.com/google/pprof v0.0.0-20241029153458-d1b30febd7db/go.mod h1:vavhavw2zAxS5dIdcRluK6cSGGPlZynqzFM8NdvU144=
|
||||
github.com/google/pprof v0.0.0-20250403155104-27863c87afa6 h1:BHT72Gu3keYf3ZEu2J0b1vyeLSOYI8bm5wbJM/8yDe8=
|
||||
github.com/google/pprof v0.0.0-20250403155104-27863c87afa6/go.mod h1:boTsfXsheKC2y+lKOCMpSfarhxDeIzfZG1jqGcPl3cA=
|
||||
github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0=
|
||||
github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||
github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
|
||||
@@ -102,8 +104,6 @@ github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8Hm
|
||||
github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y=
|
||||
github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM=
|
||||
github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo=
|
||||
github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8=
|
||||
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
|
||||
github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=
|
||||
github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE=
|
||||
github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk=
|
||||
@@ -135,12 +135,12 @@ github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+W
|
||||
github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk=
|
||||
github.com/onsi/ginkgo v1.16.5 h1:8xi0RTUf59SOSfEtZMvwTvXYMzG4gV23XVHOZiXNtnE=
|
||||
github.com/onsi/ginkgo v1.16.5/go.mod h1:+E8gABHa3K6zRBolWtd+ROzc/U5bkGt0FwiG042wbpU=
|
||||
github.com/onsi/ginkgo/v2 v2.21.0 h1:7rg/4f3rB88pb5obDgNZrNHrQ4e6WpjonchcpuBRnZM=
|
||||
github.com/onsi/ginkgo/v2 v2.21.0/go.mod h1:7Du3c42kxCUegi0IImZ1wUQzMBVecgIHjR1C+NkhLQo=
|
||||
github.com/onsi/ginkgo/v2 v2.27.2 h1:LzwLj0b89qtIy6SSASkzlNvX6WktqurSHwkk2ipF/Ns=
|
||||
github.com/onsi/ginkgo/v2 v2.27.2/go.mod h1:ArE1D/XhNXBXCBkKOLkbsb2c81dQHCRcF5zwn/ykDRo=
|
||||
github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY=
|
||||
github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo=
|
||||
github.com/onsi/gomega v1.35.1 h1:Cwbd75ZBPxFSuZ6T+rN/WCb/gOc6YgFBXLlZLhC7Ds4=
|
||||
github.com/onsi/gomega v1.35.1/go.mod h1:PvZbdDc8J6XJEpDK4HCuRBm8a6Fzp9/DmhC9C7yFlog=
|
||||
github.com/onsi/gomega v1.38.2 h1:eZCjf2xjZAqe+LeWvKb5weQ+NcPwX84kqJ0cZNxok2A=
|
||||
github.com/onsi/gomega v1.38.2/go.mod h1:W2MJcYxRGV63b418Ai34Ud0hEdTVXq9NW9+Sx6uXf3k=
|
||||
github.com/pelletier/go-toml/v2 v2.2.4 h1:mye9XuhQ6gvn5h28+VilKrrPoQVanw5PMw/TB0t5Ec4=
|
||||
github.com/pelletier/go-toml/v2 v2.2.4/go.mod h1:2gIqNv+qfxSVS7cM2xJQKtLSTLUE9V8t9Stt+h56mCY=
|
||||
github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
|
||||
@@ -148,8 +148,8 @@ github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINE
|
||||
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U=
|
||||
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||
github.com/rogpeppe/go-internal v1.13.1 h1:KvO1DLK/DRN07sQ1LQKScxyZJuNnedQ5/wKSR38lUII=
|
||||
github.com/rogpeppe/go-internal v1.13.1/go.mod h1:uMEvuHeurkdAXX61udpOXGD/AzZDWNMNyH2VO9fmH0o=
|
||||
github.com/rogpeppe/go-internal v1.14.1 h1:UQB4HGPB6osV0SQTLymcB4TgvyWu6ZyliaW0tI/otEQ=
|
||||
github.com/rogpeppe/go-internal v1.14.1/go.mod h1:MaRKkUm5W0goXpeCfT7UZI6fk/L7L7so1lCWt35ZSgc=
|
||||
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
|
||||
github.com/sagikazarmark/locafero v0.11.0 h1:1iurJgmM9G3PA/I+wWYIOw/5SyBtxapeHDcg+AAIFXc=
|
||||
github.com/sagikazarmark/locafero v0.11.0/go.mod h1:nVIGvgyzw595SUSUE6tvCp3YYTeHs15MvlmU87WwIik=
|
||||
@@ -159,8 +159,8 @@ github.com/spf13/afero v1.15.0 h1:b/YBCLWAJdFWJTN9cLhiXXcD7mzKn9Dm86dNnfyQw1I=
|
||||
github.com/spf13/afero v1.15.0/go.mod h1:NC2ByUVxtQs4b3sIUphxK0NioZnmxgyCrfzeuq8lxMg=
|
||||
github.com/spf13/cast v1.10.0 h1:h2x0u2shc1QuLHfxi+cTJvs30+ZAHOGRic8uyGTDWxY=
|
||||
github.com/spf13/cast v1.10.0/go.mod h1:jNfB8QC9IA6ZuY2ZjDp0KtFO2LZZlg4S/7bzP6qqeHo=
|
||||
github.com/spf13/cobra v1.10.1 h1:lJeBwCfmrnXthfAupyUTzJ/J4Nc1RsHC/mSRU2dll/s=
|
||||
github.com/spf13/cobra v1.10.1/go.mod h1:7SmJGaTHFVBY0jW4NXGluQoLvhqFQM+6XSKD+P4XaB0=
|
||||
github.com/spf13/cobra v1.10.2 h1:DMTTonx5m65Ic0GOoRY2c16WCbHxOOw6xxezuLaBpcU=
|
||||
github.com/spf13/cobra v1.10.2/go.mod h1:7C1pvHqHw5A4vrJfjNwvOdzYu0Gml16OCs2GRiTUUS4=
|
||||
github.com/spf13/pflag v1.0.9/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
|
||||
github.com/spf13/pflag v1.0.10 h1:4EBh2KAYBwaONj6b2Ye1GiHfwjqyROoF4RwYO+vPwFk=
|
||||
github.com/spf13/pflag v1.0.10/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
|
||||
@@ -183,37 +183,35 @@ github.com/subosito/gotenv v1.6.0 h1:9NlTDc1FTs4qu0DDq7AEtTPNw6SVm7uBMsUCUjABIf8
|
||||
github.com/subosito/gotenv v1.6.0/go.mod h1:Dk4QP5c2W3ibzajGcXpNraDfq2IrhjMIvMSWPKKo0FU=
|
||||
github.com/x448/float16 v0.8.4 h1:qLwI1I70+NjRFUR3zs1JPUCgaCXSh3SW62uAKT1mSBM=
|
||||
github.com/x448/float16 v0.8.4/go.mod h1:14CWIYCyZA/cWjXOioeEpHeN/83MdbZDRQHoFcYsOfg=
|
||||
github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
|
||||
github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
|
||||
go.yaml.in/yaml/v2 v2.4.2 h1:DzmwEr2rDGHl7lsFgAHxmNz/1NlQ7xLIrlN2h5d1eGI=
|
||||
go.yaml.in/yaml/v2 v2.4.2/go.mod h1:081UH+NErpNdqlCXm3TtEran0rJZGxAYx9hb/ELlsPU=
|
||||
go.yaml.in/yaml/v2 v2.4.3 h1:6gvOSjQoTB3vt1l+CU+tSyi/HOjfOjRLJ4YwYZGwRO0=
|
||||
go.yaml.in/yaml/v2 v2.4.3/go.mod h1:zSxWcmIDjOzPXpjlTTbAsKokqkDNAVtZO0WOMiT90s8=
|
||||
go.yaml.in/yaml/v3 v3.0.4 h1:tfq32ie2Jv2UxXFdLJdh3jXuOzWiL1fo0bu/FbuKpbc=
|
||||
go.yaml.in/yaml/v3 v3.0.4/go.mod h1:DhzuOOF2ATzADvBadXxruRBLzYTpT36CKvDb3+aBEFg=
|
||||
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
|
||||
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||
golang.org/x/crypto v0.36.0 h1:AnAEvhDddvBdpY+uR+MyHmuZzzNqXSe/GvuDeob5L34=
|
||||
golang.org/x/crypto v0.36.0/go.mod h1:Y4J0ReaxCR1IMaabaSMugxJES1EpwhBHhv2bDHklZvc=
|
||||
golang.org/x/crypto v0.45.0 h1:jMBrvKuj23MTlT0bQEOBcAE0mjg8mK9RXFhRH6nyF3Q=
|
||||
golang.org/x/crypto v0.45.0/go.mod h1:XTGrrkGJve7CYK7J8PEww4aY7gM3qMCElcJQ8n8JdX4=
|
||||
golang.org/x/exp v0.0.0-20250718183923-645b1fa84792 h1:R9PFI6EUdfVKgwKjZef7QIwGcBKu86OEFpJ9nUEP2l4=
|
||||
golang.org/x/exp v0.0.0-20250718183923-645b1fa84792/go.mod h1:A+z0yzpGtvnG90cToK5n2tu8UJVP2XUATh+r+sfOOOc=
|
||||
golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||
golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||
golang.org/x/mod v0.29.0 h1:HV8lRxZC4l2cr3Zq1LvtOsi/ThTgWnUk/y64QSs8GwA=
|
||||
golang.org/x/mod v0.29.0/go.mod h1:NyhrlYXJ2H4eJiRy/WDBO6HMqZQ6q9nk4JzS3NuCK+w=
|
||||
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
|
||||
golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
|
||||
golang.org/x/net v0.38.0 h1:vRMAPTMaeGqVhG5QyLJHqNDwecKTomGeqbnfZyKlBI8=
|
||||
golang.org/x/net v0.38.0/go.mod h1:ivrbrMbzFq5J41QOQh0siUuly180yBYtLp+CKbEaFx8=
|
||||
golang.org/x/oauth2 v0.27.0 h1:da9Vo7/tDv5RH/7nZDz1eMGS/q1Vv1N/7FCrBhI9I3M=
|
||||
golang.org/x/oauth2 v0.27.0/go.mod h1:onh5ek6nERTohokkhCD/y2cV4Do3fxFHFuAejCkRWT8=
|
||||
golang.org/x/net v0.47.0 h1:Mx+4dIFzqraBXUugkia1OOvlD6LemFo1ALMHjrXDOhY=
|
||||
golang.org/x/net v0.47.0/go.mod h1:/jNxtkgq5yWUGYkaZGqo27cfGZ1c5Nen03aYrrKpVRU=
|
||||
golang.org/x/oauth2 v0.30.0 h1:dnDm7JmhM45NNpd8FDDeLhK6FwqbOf4MLCM9zb1BOHI=
|
||||
golang.org/x/oauth2 v0.30.0/go.mod h1:B++QgG3ZKulg6sRPGD/mqlHQs5rB3Ml9erfeDY7xKlU=
|
||||
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.16.0 h1:ycBJEhp9p4vXvUZNszeOq0kGTPghopOL8q0fq3vstxw=
|
||||
golang.org/x/sync v0.16.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA=
|
||||
golang.org/x/sync v0.18.0 h1:kr88TuHDroi+UVf+0hZnirlk8o8T+4MrK6mr60WkH/I=
|
||||
golang.org/x/sync v0.18.0/go.mod h1:9KTHXmSnoGruLpwFjVSX0lNNA75CykiMECbovNTZqGI=
|
||||
golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
@@ -225,23 +223,21 @@ golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7w
|
||||
golang.org/x/sys v0.0.0-20210112080510-489259a85091/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.31.0 h1:ioabZlmFYtWhL+TRYpcnNlLwhyxaM9kWTDEmfnprqik=
|
||||
golang.org/x/sys v0.31.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k=
|
||||
golang.org/x/term v0.30.0 h1:PQ39fJZ+mfadBm0y5WlL4vlM7Sx1Hgf13sMIY2+QS9Y=
|
||||
golang.org/x/term v0.30.0/go.mod h1:NYYFdzHoI5wRh/h5tDMdMqCqPJZEuNqVR5xJLd/n67g=
|
||||
golang.org/x/sys v0.38.0 h1:3yZWxaJjBmCWXqhN1qh02AkOnCQ1poK6oF+a7xWL6Gc=
|
||||
golang.org/x/sys v0.38.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks=
|
||||
golang.org/x/term v0.37.0 h1:8EGAD0qCmHYZg6J17DvsMy9/wJ7/D/4pV/wfnld5lTU=
|
||||
golang.org/x/term v0.37.0/go.mod h1:5pB4lxRNYYVZuTLmy8oR2BH8dflOR+IbTYFD8fi3254=
|
||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
golang.org/x/text v0.28.0 h1:rhazDwis8INMIwQ4tpjLDzUhx6RlXqZNPEM0huQojng=
|
||||
golang.org/x/text v0.28.0/go.mod h1:U8nCwOR8jO/marOQ0QbDiOngZVEBB7MAiitBuMjXiNU=
|
||||
golang.org/x/text v0.31.0 h1:aC8ghyu4JhP8VojJ2lEHBnochRno1sgL6nEi9WGFGMM=
|
||||
golang.org/x/text v0.31.0/go.mod h1:tKRAlv61yKIjGGHX/4tP1LTbc13YSec1pxVEWXzfoeM=
|
||||
golang.org/x/time v0.9.0 h1:EsRrnYcQiGH+5FfbgvV4AP7qEZstoyrHB0DzarOQ4ZY=
|
||||
golang.org/x/time v0.9.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM=
|
||||
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||
golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
|
||||
golang.org/x/tools v0.0.0-20201224043029-2b0845dc783e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
|
||||
golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
|
||||
golang.org/x/tools v0.35.0 h1:mBffYraMEf7aa0sB+NuKnuCy8qI/9Bughn8dC2Gu5r0=
|
||||
golang.org/x/tools v0.35.0/go.mod h1:NKdj5HkL/73byiZSJjqJgKn3ep7KjFkBOkR/Hps3VPw=
|
||||
golang.org/x/tools v0.38.0 h1:Hx2Xv8hISq8Lm16jvBZ2VQf+RLmbd7wVUsALibYI/IQ=
|
||||
golang.org/x/tools v0.38.0/go.mod h1:yEsQ/d/YK8cjh0L6rZlY8tgtlKiBNTL14pGDJPJpYQs=
|
||||
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
@@ -252,13 +248,13 @@ google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQ
|
||||
google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE=
|
||||
google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo=
|
||||
google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
|
||||
google.golang.org/protobuf v1.36.5 h1:tPhr+woSbjfYvY6/GPufUoYizxw1cF/yFoxJ2fmpwlM=
|
||||
google.golang.org/protobuf v1.36.5/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE=
|
||||
google.golang.org/protobuf v1.36.8 h1:xHScyCOEuuwZEc6UtSOvPbAT4zRh0xcNRYekJwfqyMc=
|
||||
google.golang.org/protobuf v1.36.8/go.mod h1:fuxRtAxBytpl4zzqUh6/eyUujkJdNiuEkXntxiD/uRU=
|
||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
|
||||
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=
|
||||
gopkg.in/evanphx/json-patch.v4 v4.12.0 h1:n6jtcsulIzXPJaxegRbvFNNrZDjbij7ny3gmSPG+6V4=
|
||||
gopkg.in/evanphx/json-patch.v4 v4.12.0/go.mod h1:p8EYWUEYMpynmqDbY58zCKCFZw8pRWMG4EsWvDvM72M=
|
||||
gopkg.in/evanphx/json-patch.v4 v4.13.0 h1:czT3CmqEaQ1aanPc5SdlgQrrEIb8w/wwCvWWnfEbYzo=
|
||||
gopkg.in/evanphx/json-patch.v4 v4.13.0/go.mod h1:p8EYWUEYMpynmqDbY58zCKCFZw8pRWMG4EsWvDvM72M=
|
||||
gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys=
|
||||
gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc=
|
||||
gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw=
|
||||
@@ -274,22 +270,22 @@ gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
|
||||
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
gorm.io/driver/postgres v1.6.0 h1:2dxzU8xJ+ivvqTRph34QX+WrRaJlmfyPqXmoGVjMBa4=
|
||||
gorm.io/driver/postgres v1.6.0/go.mod h1:vUw0mrGgrTK+uPHEhAdV4sfFELrByKVGnaVRkXDhtWo=
|
||||
gorm.io/gorm v1.31.0 h1:0VlycGreVhK7RF/Bwt51Fk8v0xLiiiFdbGDPIZQ7mJY=
|
||||
gorm.io/gorm v1.31.0/go.mod h1:XyQVbO2k6YkOis7C2437jSit3SsDK72s7n7rsSHd+Gs=
|
||||
k8s.io/api v0.34.1 h1:jC+153630BMdlFukegoEL8E/yT7aLyQkIVuwhmwDgJM=
|
||||
k8s.io/api v0.34.1/go.mod h1:SB80FxFtXn5/gwzCoN6QCtPD7Vbu5w2n1S0J5gFfTYk=
|
||||
k8s.io/apimachinery v0.34.1 h1:dTlxFls/eikpJxmAC7MVE8oOeP1zryV7iRyIjB0gky4=
|
||||
k8s.io/apimachinery v0.34.1/go.mod h1:/GwIlEcWuTX9zKIg2mbw0LRFIsXwrfoVxn+ef0X13lw=
|
||||
k8s.io/client-go v0.34.1 h1:ZUPJKgXsnKwVwmKKdPfw4tB58+7/Ik3CrjOEhsiZ7mY=
|
||||
k8s.io/client-go v0.34.1/go.mod h1:kA8v0FP+tk6sZA0yKLRG67LWjqufAoSHA2xVGKw9Of8=
|
||||
gorm.io/gorm v1.31.1 h1:7CA8FTFz/gRfgqgpeKIBcervUn3xSyPUmr6B2WXJ7kg=
|
||||
gorm.io/gorm v1.31.1/go.mod h1:XyQVbO2k6YkOis7C2437jSit3SsDK72s7n7rsSHd+Gs=
|
||||
k8s.io/api v0.35.1 h1:0PO/1FhlK/EQNVK5+txc4FuhQibV25VLSdLMmGpDE/Q=
|
||||
k8s.io/api v0.35.1/go.mod h1:28uR9xlXWml9eT0uaGo6y71xK86JBELShLy4wR1XtxM=
|
||||
k8s.io/apimachinery v0.35.1 h1:yxO6gV555P1YV0SANtnTjXYfiivaTPvCTKX6w6qdDsU=
|
||||
k8s.io/apimachinery v0.35.1/go.mod h1:jQCgFZFR1F4Ik7hvr2g84RTJSZegBc8yHgFWKn//hns=
|
||||
k8s.io/client-go v0.35.1 h1:+eSfZHwuo/I19PaSxqumjqZ9l5XiTEKbIaJ+j1wLcLM=
|
||||
k8s.io/client-go v0.35.1/go.mod h1:1p1KxDt3a0ruRfc/pG4qT/3oHmUj1AhSHEcxNSGg+OA=
|
||||
k8s.io/klog/v2 v2.130.1 h1:n9Xl7H1Xvksem4KFG4PYbdQCQxqc/tTUyrgXaOhHSzk=
|
||||
k8s.io/klog/v2 v2.130.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE=
|
||||
k8s.io/kube-openapi v0.0.0-20250710124328-f3f2b991d03b h1:MloQ9/bdJyIu9lb1PzujOPolHyvO06MXG5TUIj2mNAA=
|
||||
k8s.io/kube-openapi v0.0.0-20250710124328-f3f2b991d03b/go.mod h1:UZ2yyWbFTpuhSbFhv24aGNOdoRdJZgsIObGBUaYVsts=
|
||||
k8s.io/utils v0.0.0-20250604170112-4c0f3b243397 h1:hwvWFiBzdWw1FhfY1FooPn3kzWuJ8tmbZBHi4zVsl1Y=
|
||||
k8s.io/utils v0.0.0-20250604170112-4c0f3b243397/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0=
|
||||
sigs.k8s.io/json v0.0.0-20241014173422-cfa47c3a1cc8 h1:gBQPwqORJ8d8/YNZWEjoZs7npUVDpVXUUOFfW6CgAqE=
|
||||
sigs.k8s.io/json v0.0.0-20241014173422-cfa47c3a1cc8/go.mod h1:mdzfpAEoE6DHQEN0uh9ZbOCuHbLK5wOm7dK4ctXE9Tg=
|
||||
k8s.io/kube-openapi v0.0.0-20250910181357-589584f1c912 h1:Y3gxNAuB0OBLImH611+UDZcmKS3g6CthxToOb37KgwE=
|
||||
k8s.io/kube-openapi v0.0.0-20250910181357-589584f1c912/go.mod h1:kdmbQkyfwUagLfXIad1y2TdrjPFWp2Q89B3qkRwf/pQ=
|
||||
k8s.io/utils v0.0.0-20251002143259-bc988d571ff4 h1:SjGebBtkBqHFOli+05xYbK8YF1Dzkbzn+gDM4X9T4Ck=
|
||||
k8s.io/utils v0.0.0-20251002143259-bc988d571ff4/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0=
|
||||
sigs.k8s.io/json v0.0.0-20250730193827-2d320260d730 h1:IpInykpT6ceI+QxKBbEflcR5EXP7sU1kvOlxwZh5txg=
|
||||
sigs.k8s.io/json v0.0.0-20250730193827-2d320260d730/go.mod h1:mdzfpAEoE6DHQEN0uh9ZbOCuHbLK5wOm7dK4ctXE9Tg=
|
||||
sigs.k8s.io/randfill v1.0.0 h1:JfjMILfT8A6RbawdsK2JXGBR5AQVfd+9TbzrlneTyrU=
|
||||
sigs.k8s.io/randfill v1.0.0/go.mod h1:XeLlZ/jmk4i1HRopwe7/aU3H5n1zNUcX6TM94b3QxOY=
|
||||
sigs.k8s.io/structured-merge-diff/v6 v6.3.0 h1:jTijUJbW353oVOd9oTlifJqOGEkUw2jB/fXCbTiQEco=
|
||||
|
||||
2
job.yaml
2
job.yaml
@@ -11,7 +11,7 @@ spec:
|
||||
spec:
|
||||
containers:
|
||||
- command: ["kube-bench"]
|
||||
image: docker.io/aquasec/kube-bench:v0.14.0
|
||||
image: docker.io/aquasec/kube-bench:v0.15.0
|
||||
name: kube-bench
|
||||
volumeMounts:
|
||||
- name: var-lib-cni
|
||||
|
||||
2
makefile
2
makefile
@@ -11,7 +11,7 @@ uname := $(shell uname -s)
|
||||
BUILDX_PLATFORM ?= linux/amd64,linux/arm64,linux/arm,linux/ppc64le,linux/s390x
|
||||
DOCKER_ORGS ?= aquasec public.ecr.aws/aquasecurity
|
||||
GOARCH ?= $@
|
||||
KUBECTL_VERSION ?= 1.35.0-alpha.2
|
||||
KUBECTL_VERSION ?= 1.36.0-alpha.1
|
||||
ARCH ?= $(shell go env GOARCH)
|
||||
|
||||
ifneq ($(findstring Microsoft,$(shell uname -r)),)
|
||||
|
||||
Reference in New Issue
Block a user